summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorOliver Schinagl <oliver@schinagl.nl>2011-02-26 14:57:47 (GMT)
committerOliver Schinagl <oliver@schinagl.nl>2011-02-26 14:57:47 (GMT)
commitfaadb245e8e1d5c4245dbdacc201e85b47c0c1fb (patch)
tree725f1ac685590a82aa182776c0af40e6243cb8e6
parentedb45850f53478c7779484105c30f8df0a3a3782 (diff)
downloadopenipcam-faadb245e8e1d5c4245dbdacc201e85b47c0c1fb.zip
openipcam-faadb245e8e1d5c4245dbdacc201e85b47c0c1fb.tar.gz
openipcam-faadb245e8e1d5c4245dbdacc201e85b47c0c1fb.tar.bz2
Applied mtd patches and updated lib/Config.lib to 2.6.24 +mtd
-rw-r--r--linux-2.4.x/Documentation/DocBook/librs.tmpl289
-rw-r--r--linux-2.4.x/Documentation/DocBook/mtdnand.tmpl1320
-rw-r--r--linux-2.4.x/drivers/mtd/Config.in14
-rw-r--r--linux-2.4.x/drivers/mtd/Makefile87
-rw-r--r--linux-2.4.x/drivers/mtd/Makefile.common28
-rw-r--r--linux-2.4.x/drivers/mtd/afs.c119
-rw-r--r--linux-2.4.x/drivers/mtd/chips/Config.in37
-rw-r--r--linux-2.4.x/drivers/mtd/chips/Makefile28
-rw-r--r--linux-2.4.x/drivers/mtd/chips/Makefile.common26
-rw-r--r--linux-2.4.x/drivers/mtd/chips/amd_flash.c483
-rw-r--r--linux-2.4.x/drivers/mtd/chips/cfi_cmdset_0001.c2852
-rw-r--r--linux-2.4.x/drivers/mtd/chips/cfi_cmdset_0002.c1847
-rw-r--r--linux-2.4.x/drivers/mtd/chips/cfi_cmdset_0020.c1429
-rw-r--r--linux-2.4.x/drivers/mtd/chips/cfi_probe.c308
-rw-r--r--linux-2.4.x/drivers/mtd/chips/cfi_util.c187
-rw-r--r--linux-2.4.x/drivers/mtd/chips/chipreg.c47
-rw-r--r--linux-2.4.x/drivers/mtd/chips/fwh_lock.h107
-rw-r--r--linux-2.4.x/drivers/mtd/chips/gen_probe.c221
-rw-r--r--linux-2.4.x/drivers/mtd/chips/jedec.c379
-rw-r--r--linux-2.4.x/drivers/mtd/chips/jedec_probe.c2307
-rw-r--r--linux-2.4.x/drivers/mtd/chips/map_absent.c25
-rw-r--r--linux-2.4.x/drivers/mtd/chips/map_ram.c54
-rw-r--r--linux-2.4.x/drivers/mtd/chips/map_rom.c25
-rw-r--r--linux-2.4.x/drivers/mtd/chips/sharp.c170
-rw-r--r--linux-2.4.x/drivers/mtd/cmdlinepart.c372
-rw-r--r--linux-2.4.x/drivers/mtd/devices/Config.in30
-rw-r--r--linux-2.4.x/drivers/mtd/devices/Makefile35
-rw-r--r--linux-2.4.x/drivers/mtd/devices/Makefile.common26
-rw-r--r--linux-2.4.x/drivers/mtd/devices/blkmtd-24.c1056
-rw-r--r--linux-2.4.x/drivers/mtd/devices/blkmtd.c1864
-rw-r--r--linux-2.4.x/drivers/mtd/devices/block2mtd.c494
-rw-r--r--linux-2.4.x/drivers/mtd/devices/doc2000.c758
-rw-r--r--linux-2.4.x/drivers/mtd/devices/doc2001.c114
-rw-r--r--linux-2.4.x/drivers/mtd/devices/doc2001plus.c1154
-rw-r--r--linux-2.4.x/drivers/mtd/devices/docecc.c49
-rw-r--r--linux-2.4.x/drivers/mtd/devices/docprobe.c221
-rw-r--r--linux-2.4.x/drivers/mtd/devices/lart.c94
-rw-r--r--linux-2.4.x/drivers/mtd/devices/ms02-nv.c45
-rw-r--r--linux-2.4.x/drivers/mtd/devices/ms02-nv.h82
-rw-r--r--linux-2.4.x/drivers/mtd/devices/mtdram.c218
-rw-r--r--linux-2.4.x/drivers/mtd/devices/phram.c300
-rw-r--r--linux-2.4.x/drivers/mtd/devices/pmc551.c72
-rw-r--r--linux-2.4.x/drivers/mtd/devices/ramtd.c249
-rw-r--r--linux-2.4.x/drivers/mtd/devices/slram.c128
-rw-r--r--linux-2.4.x/drivers/mtd/ftl.c636
-rw-r--r--linux-2.4.x/drivers/mtd/inftlcore.c903
-rw-r--r--linux-2.4.x/drivers/mtd/inftlmount.c809
-rw-r--r--linux-2.4.x/drivers/mtd/maps/Config.in120
-rw-r--r--linux-2.4.x/drivers/mtd/maps/Makefile51
-rw-r--r--linux-2.4.x/drivers/mtd/maps/Makefile.common75
-rw-r--r--linux-2.4.x/drivers/mtd/maps/alchemy-flash.c190
-rw-r--r--linux-2.4.x/drivers/mtd/maps/amd76xrom.c330
-rw-r--r--linux-2.4.x/drivers/mtd/maps/arctic-mtd.c135
-rw-r--r--linux-2.4.x/drivers/mtd/maps/autcpu12-nvram.c108
-rw-r--r--linux-2.4.x/drivers/mtd/maps/bast-flash.c225
-rw-r--r--linux-2.4.x/drivers/mtd/maps/beech-mtd.c112
-rw-r--r--linux-2.4.x/drivers/mtd/maps/cdb89712.c185
-rw-r--r--linux-2.4.x/drivers/mtd/maps/ceiva.c350
-rw-r--r--linux-2.4.x/drivers/mtd/maps/cfi_flagadm.c113
-rw-r--r--linux-2.4.x/drivers/mtd/maps/cstm_mips_ixx.c190
-rw-r--r--linux-2.4.x/drivers/mtd/maps/dbox2-flash.c148
-rw-r--r--linux-2.4.x/drivers/mtd/maps/dc21285.c252
-rw-r--r--linux-2.4.x/drivers/mtd/maps/dilnetpc.c160
-rw-r--r--linux-2.4.x/drivers/mtd/maps/dmv182.c149
-rw-r--r--linux-2.4.x/drivers/mtd/maps/ebony.c162
-rw-r--r--linux-2.4.x/drivers/mtd/maps/edb7312.c147
-rw-r--r--linux-2.4.x/drivers/mtd/maps/epxa10db-flash.c156
-rw-r--r--linux-2.4.x/drivers/mtd/maps/fortunet.c274
-rw-r--r--linux-2.4.x/drivers/mtd/maps/h720x-flash.c144
-rw-r--r--linux-2.4.x/drivers/mtd/maps/ichxrom.c382
-rw-r--r--linux-2.4.x/drivers/mtd/maps/impa7.c161
-rw-r--r--linux-2.4.x/drivers/mtd/maps/integrator-flash-v24.c258
-rw-r--r--linux-2.4.x/drivers/mtd/maps/integrator-flash.c341
-rw-r--r--linux-2.4.x/drivers/mtd/maps/ipaq-flash.c463
-rw-r--r--linux-2.4.x/drivers/mtd/maps/iq80310.c128
-rw-r--r--linux-2.4.x/drivers/mtd/maps/ixp2000.c275
-rw-r--r--linux-2.4.x/drivers/mtd/maps/ixp4xx.c297
-rw-r--r--linux-2.4.x/drivers/mtd/maps/l440gx.c100
-rw-r--r--linux-2.4.x/drivers/mtd/maps/lasat.c102
-rw-r--r--linux-2.4.x/drivers/mtd/maps/map_funcs.c45
-rw-r--r--linux-2.4.x/drivers/mtd/maps/mbx860.c96
-rw-r--r--linux-2.4.x/drivers/mtd/maps/mpc1211.c81
-rw-r--r--linux-2.4.x/drivers/mtd/maps/mphysmap.c186
-rw-r--r--linux-2.4.x/drivers/mtd/maps/mtx-1_flash.c96
-rw-r--r--linux-2.4.x/drivers/mtd/maps/netsc520.c126
-rw-r--r--linux-2.4.x/drivers/mtd/maps/nettel.c187
-rw-r--r--linux-2.4.x/drivers/mtd/maps/ocelot.c113
-rw-r--r--linux-2.4.x/drivers/mtd/maps/ocotea.c151
-rw-r--r--linux-2.4.x/drivers/mtd/maps/octagon-5066.c170
-rw-r--r--linux-2.4.x/drivers/mtd/maps/omap-toto-flash.c136
-rw-r--r--linux-2.4.x/drivers/mtd/maps/omap_nor.c179
-rw-r--r--linux-2.4.x/drivers/mtd/maps/p3p440.c133
-rw-r--r--linux-2.4.x/drivers/mtd/maps/pci.c227
-rw-r--r--linux-2.4.x/drivers/mtd/maps/pcmciamtd.c883
-rw-r--r--linux-2.4.x/drivers/mtd/maps/physmap.c177
-rw-r--r--linux-2.4.x/drivers/mtd/maps/plat-ram.c281
-rw-r--r--linux-2.4.x/drivers/mtd/maps/pnc2000.c90
-rw-r--r--linux-2.4.x/drivers/mtd/maps/pxa2xx-flash.c202
-rw-r--r--linux-2.4.x/drivers/mtd/maps/redwood.c168
-rw-r--r--linux-2.4.x/drivers/mtd/maps/rpxlite.c78
-rw-r--r--linux-2.4.x/drivers/mtd/maps/sa1100-flash.c1283
-rw-r--r--linux-2.4.x/drivers/mtd/maps/sbc8240.c245
-rw-r--r--linux-2.4.x/drivers/mtd/maps/sbc_gxx.c146
-rw-r--r--linux-2.4.x/drivers/mtd/maps/sc520cdp.c131
-rw-r--r--linux-2.4.x/drivers/mtd/maps/scb2_flash.c256
-rw-r--r--linux-2.4.x/drivers/mtd/maps/scx200_docflash.c233
-rw-r--r--linux-2.4.x/drivers/mtd/maps/sharpsl-flash.c118
-rw-r--r--linux-2.4.x/drivers/mtd/maps/solutionengine.c91
-rw-r--r--linux-2.4.x/drivers/mtd/maps/sun_uflash.c91
-rw-r--r--linux-2.4.x/drivers/mtd/maps/tqm834x.c291
-rw-r--r--linux-2.4.x/drivers/mtd/maps/tqm8xxl.c232
-rw-r--r--linux-2.4.x/drivers/mtd/maps/ts5500_flash.c125
-rw-r--r--linux-2.4.x/drivers/mtd/maps/tsunami_flash.c36
-rw-r--r--linux-2.4.x/drivers/mtd/maps/uclinux.c92
-rw-r--r--linux-2.4.x/drivers/mtd/maps/vmax301.c127
-rw-r--r--linux-2.4.x/drivers/mtd/maps/walnut.c121
-rw-r--r--linux-2.4.x/drivers/mtd/maps/wr_sbc82xx_flash.c181
-rw-r--r--linux-2.4.x/drivers/mtd/mtd_blkdevs-24.c692
-rw-r--r--linux-2.4.x/drivers/mtd/mtd_blkdevs.c482
-rw-r--r--linux-2.4.x/drivers/mtd/mtdblock.c535
-rw-r--r--linux-2.4.x/drivers/mtd/mtdblock_ro.c348
-rw-r--r--linux-2.4.x/drivers/mtd/mtdchar.c485
-rw-r--r--linux-2.4.x/drivers/mtd/mtdconcat.c740
-rw-r--r--linux-2.4.x/drivers/mtd/mtdcore.c326
-rw-r--r--linux-2.4.x/drivers/mtd/mtdpart.c350
-rw-r--r--linux-2.4.x/drivers/mtd/nand/Config.in56
-rw-r--r--linux-2.4.x/drivers/mtd/nand/Makefile18
-rw-r--r--linux-2.4.x/drivers/mtd/nand/Makefile.common23
-rw-r--r--linux-2.4.x/drivers/mtd/nand/at91_nand.c246
-rw-r--r--linux-2.4.x/drivers/mtd/nand/au1550nd.c634
-rw-r--r--linux-2.4.x/drivers/mtd/nand/autcpu12.c224
-rw-r--r--linux-2.4.x/drivers/mtd/nand/diskonchip.c1794
-rw-r--r--linux-2.4.x/drivers/mtd/nand/edb7312.c218
-rw-r--r--linux-2.4.x/drivers/mtd/nand/h1910.c208
-rw-r--r--linux-2.4.x/drivers/mtd/nand/nand_base.c3039
-rw-r--r--linux-2.4.x/drivers/mtd/nand/nand_bbt.c1112
-rw-r--r--linux-2.4.x/drivers/mtd/nand/nand_ecc.c101
-rw-r--r--linux-2.4.x/drivers/mtd/nand/nand_ids.c137
-rw-r--r--linux-2.4.x/drivers/mtd/nand/nandsim.c1609
-rw-r--r--linux-2.4.x/drivers/mtd/nand/ppchameleonevb.c420
-rw-r--r--linux-2.4.x/drivers/mtd/nand/rtc_from4.c683
-rw-r--r--linux-2.4.x/drivers/mtd/nand/s3c2410.c801
-rwxr-xr-xlinux-2.4.x/drivers/mtd/nand/sharpsl.c287
-rw-r--r--linux-2.4.x/drivers/mtd/nand/spia.c99
-rw-r--r--linux-2.4.x/drivers/mtd/nand/toto.c205
-rw-r--r--linux-2.4.x/drivers/mtd/nand/tx4925ndfmc.c416
-rw-r--r--linux-2.4.x/drivers/mtd/nand/tx4938ndfmc.c406
-rw-r--r--linux-2.4.x/drivers/mtd/nftlcore.c632
-rw-r--r--linux-2.4.x/drivers/mtd/nftlmount.c263
-rw-r--r--linux-2.4.x/drivers/mtd/redboot.c157
-rw-r--r--linux-2.4.x/drivers/mtd/rfd_ftl.c856
-rwxr-xr-xlinux-2.4.x/drivers/mtd/ssfdc.c1132
-rw-r--r--linux-2.4.x/fs/Config.in12
-rw-r--r--linux-2.4.x/fs/jffs2/Makefile49
-rw-r--r--linux-2.4.x/fs/jffs2/Makefile.common18
-rw-r--r--linux-2.4.x/fs/jffs2/background.c167
-rw-r--r--linux-2.4.x/fs/jffs2/build.c477
-rw-r--r--linux-2.4.x/fs/jffs2/compr.c514
-rw-r--r--linux-2.4.x/fs/jffs2/compr.h107
-rw-r--r--linux-2.4.x/fs/jffs2/compr_rtime.c104
-rw-r--r--linux-2.4.x/fs/jffs2/compr_rubin.c156
-rw-r--r--linux-2.4.x/fs/jffs2/compr_rubin.h13
-rw-r--r--linux-2.4.x/fs/jffs2/compr_zlib.c231
-rw-r--r--linux-2.4.x/fs/jffs2/comprtest.c36
-rw-r--r--linux-2.4.x/fs/jffs2/crc32.c4
-rw-r--r--linux-2.4.x/fs/jffs2/crc32.h8
-rw-r--r--linux-2.4.x/fs/jffs2/debug.c695
-rw-r--r--linux-2.4.x/fs/jffs2/debug.h279
-rw-r--r--linux-2.4.x/fs/jffs2/dir.c719
-rw-r--r--linux-2.4.x/fs/jffs2/erase.c487
-rw-r--r--linux-2.4.x/fs/jffs2/file.c521
-rw-r--r--linux-2.4.x/fs/jffs2/fs.c695
-rw-r--r--linux-2.4.x/fs/jffs2/gc.c1272
-rw-r--r--linux-2.4.x/fs/jffs2/histo.h2
-rw-r--r--linux-2.4.x/fs/jffs2/histo_mips.h2
-rw-r--r--linux-2.4.x/fs/jffs2/ioctl.c38
-rw-r--r--linux-2.4.x/fs/jffs2/malloc.c201
-rw-r--r--linux-2.4.x/fs/jffs2/nodelist.c1108
-rw-r--r--linux-2.4.x/fs/jffs2/nodelist.h472
-rw-r--r--linux-2.4.x/fs/jffs2/nodemgmt.c771
-rw-r--r--linux-2.4.x/fs/jffs2/os-linux.h219
-rw-r--r--linux-2.4.x/fs/jffs2/pushpull.h68
-rw-r--r--linux-2.4.x/fs/jffs2/rbtree.c363
-rw-r--r--linux-2.4.x/fs/jffs2/read.c170
-rw-r--r--linux-2.4.x/fs/jffs2/readinode.c1242
-rw-r--r--linux-2.4.x/fs/jffs2/scan.c1148
-rw-r--r--linux-2.4.x/fs/jffs2/summary.c865
-rw-r--r--linux-2.4.x/fs/jffs2/summary.h217
-rw-r--r--linux-2.4.x/fs/jffs2/super-v24.c183
-rw-r--r--linux-2.4.x/fs/jffs2/super.c547
-rw-r--r--linux-2.4.x/fs/jffs2/symlink-v24.c52
-rw-r--r--linux-2.4.x/fs/jffs2/symlink.c119
-rw-r--r--linux-2.4.x/fs/jffs2/wbuf.c1372
-rw-r--r--linux-2.4.x/fs/jffs2/wear_leveling.c107
-rw-r--r--linux-2.4.x/fs/jffs2/write.c770
-rw-r--r--linux-2.4.x/fs/jffs2/writev.c82
-rw-r--r--linux-2.4.x/include/linux/crc32.h23
-rw-r--r--linux-2.4.x/include/linux/jffs2.h195
-rw-r--r--linux-2.4.x/include/linux/jffs2_fs_i.h46
-rw-r--r--linux-2.4.x/include/linux/jffs2_fs_sb.h141
-rw-r--r--linux-2.4.x/include/linux/mtd/bbm.h122
-rw-r--r--linux-2.4.x/include/linux/mtd/blktrans.h72
-rw-r--r--linux-2.4.x/include/linux/mtd/cfi.h584
-rw-r--r--linux-2.4.x/include/linux/mtd/cfi_endian.h8
-rw-r--r--linux-2.4.x/include/linux/mtd/compatmac.h349
-rw-r--r--linux-2.4.x/include/linux/mtd/doc2000.h97
-rw-r--r--linux-2.4.x/include/linux/mtd/flashchip.h36
-rw-r--r--linux-2.4.x/include/linux/mtd/ftl.h6
-rw-r--r--linux-2.4.x/include/linux/mtd/gen_probe.h8
-rw-r--r--linux-2.4.x/include/linux/mtd/inftl.h62
-rw-r--r--linux-2.4.x/include/linux/mtd/jedec.h21
-rw-r--r--linux-2.4.x/include/linux/mtd/map.h432
-rw-r--r--linux-2.4.x/include/linux/mtd/mtd.h227
-rw-r--r--linux-2.4.x/include/linux/mtd/nand.h540
-rw-r--r--linux-2.4.x/include/linux/mtd/nand_ecc.h18
-rw-r--r--linux-2.4.x/include/linux/mtd/nftl.h88
-rw-r--r--linux-2.4.x/include/linux/mtd/onenand.h161
-rw-r--r--linux-2.4.x/include/linux/mtd/onenand_regs.h190
-rw-r--r--linux-2.4.x/include/linux/mtd/partitions.h47
-rw-r--r--linux-2.4.x/include/linux/mtd/physmap.h61
-rw-r--r--linux-2.4.x/include/linux/mtd/plat-ram.h35
-rw-r--r--linux-2.4.x/include/linux/mtd/pmc551.h14
-rw-r--r--linux-2.4.x/include/linux/mtd/xip.h102
-rw-r--r--linux-2.4.x/include/linux/rbtree-24.h133
-rw-r--r--linux-2.4.x/include/linux/rbtree.h144
-rw-r--r--linux-2.4.x/include/linux/rslib.h105
-rw-r--r--linux-2.4.x/include/linux/suspend.h10
-rw-r--r--linux-2.4.x/include/linux/workqueue.h21
-rw-r--r--linux-2.4.x/include/mtd/inftl-user.h91
-rw-r--r--linux-2.4.x/include/mtd/jffs2-user.h35
-rw-r--r--linux-2.4.x/include/mtd/mtd-abi.h122
-rw-r--r--linux-2.4.x/include/mtd/mtd-user.h20
-rw-r--r--linux-2.4.x/include/mtd/nftl-user.h76
-rw-r--r--linux-2.4.x/lib/Config.in26
-rw-r--r--linux-2.4.x/lib/Config.in.orig38
-rw-r--r--linux-2.4.x/lib/Makefile1
-rw-r--r--linux-2.4.x/lib/Makefile.orig29
-rw-r--r--linux-2.4.x/lib/reed_solomon/Makefile12
-rw-r--r--linux-2.4.x/lib/reed_solomon/decode_rs.c272
-rw-r--r--linux-2.4.x/lib/reed_solomon/encode_rs.c54
-rw-r--r--linux-2.4.x/lib/reed_solomon/rslib.c335
241 files changed, 61368 insertions, 16242 deletions
diff --git a/linux-2.4.x/Documentation/DocBook/librs.tmpl b/linux-2.4.x/Documentation/DocBook/librs.tmpl
new file mode 100644
index 0000000..3ff39ba
--- /dev/null
+++ b/linux-2.4.x/Documentation/DocBook/librs.tmpl
@@ -0,0 +1,289 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE book PUBLIC "-//OASIS//DTD DocBook XML V4.1.2//EN"
+ "http://www.oasis-open.org/docbook/xml/4.1.2/docbookx.dtd" []>
+
+<book id="Reed-Solomon-Library-Guide">
+ <bookinfo>
+ <title>Reed-Solomon Library Programming Interface</title>
+
+ <authorgroup>
+ <author>
+ <firstname>Thomas</firstname>
+ <surname>Gleixner</surname>
+ <affiliation>
+ <address>
+ <email>tglx@linutronix.de</email>
+ </address>
+ </affiliation>
+ </author>
+ </authorgroup>
+
+ <copyright>
+ <year>2004</year>
+ <holder>Thomas Gleixner</holder>
+ </copyright>
+
+ <legalnotice>
+ <para>
+ This documentation is free software; you can redistribute
+ it and/or modify it under the terms of the GNU General Public
+ License version 2 as published by the Free Software Foundation.
+ </para>
+
+ <para>
+ This program is distributed in the hope that it will be
+ useful, but WITHOUT ANY WARRANTY; without even the implied
+ warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details.
+ </para>
+
+ <para>
+ You should have received a copy of the GNU General Public
+ License along with this program; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ MA 02111-1307 USA
+ </para>
+
+ <para>
+ For more details see the file COPYING in the source
+ distribution of Linux.
+ </para>
+ </legalnotice>
+ </bookinfo>
+
+<toc></toc>
+
+ <chapter id="intro">
+ <title>Introduction</title>
+ <para>
+ The generic Reed-Solomon Library provides encoding, decoding
+ and error correction functions.
+ </para>
+ <para>
+ Reed-Solomon codes are used in communication and storage
+ applications to ensure data integrity.
+ </para>
+ <para>
+ This documentation is provided for developers who want to utilize
+ the functions provided by the library.
+ </para>
+ </chapter>
+
+ <chapter id="bugs">
+ <title>Known Bugs And Assumptions</title>
+ <para>
+ None.
+ </para>
+ </chapter>
+
+ <chapter id="usage">
+ <title>Usage</title>
+ <para>
+ This chapter provides examples how to use the library.
+ </para>
+ <sect1>
+ <title>Initializing</title>
+ <para>
+ The init function init_rs returns a pointer to a
+ rs decoder structure, which holds the necessary
+ information for encoding, decoding and error correction
+ with the given polynomial. It either uses an existing
+ matching decoder or creates a new one. On creation all
+ the lookup tables for fast en/decoding are created.
+ The function may take a while, so make sure not to
+ call it in critical code paths.
+ </para>
+ <programlisting>
+/* the Reed Solomon control structure */
+static struct rs_control *rs_decoder;
+
+/* Symbolsize is 10 (bits)
+ * Primitve polynomial is x^10+x^3+1
+ * first consecutive root is 0
+ * primitve element to generate roots = 1
+ * generator polinomial degree (number of roots) = 6
+ */
+rs_decoder = init_rs (10, 0x409, 0, 1, 6);
+ </programlisting>
+ </sect1>
+ <sect1>
+ <title>Encoding</title>
+ <para>
+ The encoder calculates the Reed-Solomon code over
+ the given data length and stores the result in
+ the parity buffer. Note that the parity buffer must
+ be initialized before calling the encoder.
+ </para>
+ <para>
+ The expanded data can be inverted on the fly by
+ providing a non zero inversion mask. The expanded data is
+ XOR'ed with the mask. This is used e.g. for FLASH
+ ECC, where the all 0xFF is inverted to an all 0x00.
+ The Reed-Solomon code for all 0x00 is all 0x00. The
+ code is inverted before storing to FLASH so it is 0xFF
+ too. This prevent's that reading from an erased FLASH
+ results in ECC errors.
+ </para>
+ <para>
+ The databytes are expanded to the given symbol size
+ on the fly. There is no support for encoding continuous
+ bitstreams with a symbol size != 8 at the moment. If
+ it is necessary it should be not a big deal to implement
+ such functionality.
+ </para>
+ <programlisting>
+/* Parity buffer. Size = number of roots */
+uint16_t par[6];
+/* Initialize the parity buffer */
+memset(par, 0, sizeof(par));
+/* Encode 512 byte in data8. Store parity in buffer par */
+encode_rs8 (rs_decoder, data8, 512, par, 0);
+ </programlisting>
+ </sect1>
+ <sect1>
+ <title>Decoding</title>
+ <para>
+ The decoder calculates the syndrome over
+ the given data length and the received parity symbols
+ and corrects errors in the data.
+ </para>
+ <para>
+ If a syndrome is available from a hardware decoder
+ then the syndrome calculation is skipped.
+ </para>
+ <para>
+ The correction of the data buffer can be suppressed
+ by providing a correction pattern buffer and an error
+ location buffer to the decoder. The decoder stores the
+ calculated error location and the correction bitmask
+ in the given buffers. This is useful for hardware
+ decoders which use a weird bit ordering scheme.
+ </para>
+ <para>
+ The databytes are expanded to the given symbol size
+ on the fly. There is no support for decoding continuous
+ bitstreams with a symbolsize != 8 at the moment. If
+ it is necessary it should be not a big deal to implement
+ such functionality.
+ </para>
+
+ <sect2>
+ <title>
+ Decoding with syndrome calculation, direct data correction
+ </title>
+ <programlisting>
+/* Parity buffer. Size = number of roots */
+uint16_t par[6];
+uint8_t data[512];
+int numerr;
+/* Receive data */
+.....
+/* Receive parity */
+.....
+/* Decode 512 byte in data8.*/
+numerr = decode_rs8 (rs_decoder, data8, par, 512, NULL, 0, NULL, 0, NULL);
+ </programlisting>
+ </sect2>
+
+ <sect2>
+ <title>
+ Decoding with syndrome given by hardware decoder, direct data correction
+ </title>
+ <programlisting>
+/* Parity buffer. Size = number of roots */
+uint16_t par[6], syn[6];
+uint8_t data[512];
+int numerr;
+/* Receive data */
+.....
+/* Receive parity */
+.....
+/* Get syndrome from hardware decoder */
+.....
+/* Decode 512 byte in data8.*/
+numerr = decode_rs8 (rs_decoder, data8, par, 512, syn, 0, NULL, 0, NULL);
+ </programlisting>
+ </sect2>
+
+ <sect2>
+ <title>
+ Decoding with syndrome given by hardware decoder, no direct data correction.
+ </title>
+ <para>
+ Note: It's not necessary to give data and received parity to the decoder.
+ </para>
+ <programlisting>
+/* Parity buffer. Size = number of roots */
+uint16_t par[6], syn[6], corr[8];
+uint8_t data[512];
+int numerr, errpos[8];
+/* Receive data */
+.....
+/* Receive parity */
+.....
+/* Get syndrome from hardware decoder */
+.....
+/* Decode 512 byte in data8.*/
+numerr = decode_rs8 (rs_decoder, NULL, NULL, 512, syn, 0, errpos, 0, corr);
+for (i = 0; i &lt; numerr; i++) {
+ do_error_correction_in_your_buffer(errpos[i], corr[i]);
+}
+ </programlisting>
+ </sect2>
+ </sect1>
+ <sect1>
+ <title>Cleanup</title>
+ <para>
+ The function free_rs frees the allocated resources,
+ if the caller is the last user of the decoder.
+ </para>
+ <programlisting>
+/* Release resources */
+free_rs(rs_decoder);
+ </programlisting>
+ </sect1>
+
+ </chapter>
+
+ <chapter id="structs">
+ <title>Structures</title>
+ <para>
+ This chapter contains the autogenerated documentation of the structures which are
+ used in the Reed-Solomon Library and are relevant for a developer.
+ </para>
+!Iinclude/linux/rslib.h
+ </chapter>
+
+ <chapter id="pubfunctions">
+ <title>Public Functions Provided</title>
+ <para>
+ This chapter contains the autogenerated documentation of the Reed-Solomon functions
+ which are exported.
+ </para>
+!Elib/reed_solomon/reed_solomon.c
+ </chapter>
+
+ <chapter id="credits">
+ <title>Credits</title>
+ <para>
+ The library code for encoding and decoding was written by Phil Karn.
+ </para>
+ <programlisting>
+ Copyright 2002, Phil Karn, KA9Q
+ May be used under the terms of the GNU General Public License (GPL)
+ </programlisting>
+ <para>
+ The wrapper functions and interfaces are written by Thomas Gleixner
+ </para>
+ <para>
+ Many users have provided bugfixes, improvements and helping hands for testing.
+ Thanks a lot.
+ </para>
+ <para>
+ The following people have contributed to this document:
+ </para>
+ <para>
+ Thomas Gleixner<email>tglx@linutronix.de</email>
+ </para>
+ </chapter>
+</book>
diff --git a/linux-2.4.x/Documentation/DocBook/mtdnand.tmpl b/linux-2.4.x/Documentation/DocBook/mtdnand.tmpl
new file mode 100644
index 0000000..6e463d0
--- /dev/null
+++ b/linux-2.4.x/Documentation/DocBook/mtdnand.tmpl
@@ -0,0 +1,1320 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE book PUBLIC "-//OASIS//DTD DocBook XML V4.1.2//EN"
+ "http://www.oasis-open.org/docbook/xml/4.1.2/docbookx.dtd" []>
+
+<book id="MTD-NAND-Guide">
+ <bookinfo>
+ <title>MTD NAND Driver Programming Interface</title>
+
+ <authorgroup>
+ <author>
+ <firstname>Thomas</firstname>
+ <surname>Gleixner</surname>
+ <affiliation>
+ <address>
+ <email>tglx@linutronix.de</email>
+ </address>
+ </affiliation>
+ </author>
+ </authorgroup>
+
+ <copyright>
+ <year>2004</year>
+ <holder>Thomas Gleixner</holder>
+ </copyright>
+
+ <legalnotice>
+ <para>
+ This documentation is free software; you can redistribute
+ it and/or modify it under the terms of the GNU General Public
+ License version 2 as published by the Free Software Foundation.
+ </para>
+
+ <para>
+ This program is distributed in the hope that it will be
+ useful, but WITHOUT ANY WARRANTY; without even the implied
+ warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details.
+ </para>
+
+ <para>
+ You should have received a copy of the GNU General Public
+ License along with this program; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ MA 02111-1307 USA
+ </para>
+
+ <para>
+ For more details see the file COPYING in the source
+ distribution of Linux.
+ </para>
+ </legalnotice>
+ </bookinfo>
+
+<toc></toc>
+
+ <chapter id="intro">
+ <title>Introduction</title>
+ <para>
+ The generic NAND driver supports almost all NAND and AG-AND based
+ chips and connects them to the Memory Technology Devices (MTD)
+ subsystem of the Linux Kernel.
+ </para>
+ <para>
+ This documentation is provided for developers who want to implement
+ board drivers or filesystem drivers suitable for NAND devices.
+ </para>
+ </chapter>
+
+ <chapter id="bugs">
+ <title>Known Bugs And Assumptions</title>
+ <para>
+ None.
+ </para>
+ </chapter>
+
+ <chapter id="dochints">
+ <title>Documentation hints</title>
+ <para>
+ The function and structure docs are autogenerated. Each function and
+ struct member has a short description which is marked with an [XXX] identifier.
+ The following chapters explain the meaning of those identifiers.
+ </para>
+ <sect1>
+ <title>Function identifiers [XXX]</title>
+ <para>
+ The functions are marked with [XXX] identifiers in the short
+ comment. The identifiers explain the usage and scope of the
+ functions. Following identifiers are used:
+ </para>
+ <itemizedlist>
+ <listitem><para>
+ [MTD Interface]</para><para>
+ These functions provide the interface to the MTD kernel API.
+ They are not replacable and provide functionality
+ which is complete hardware independent.
+ </para></listitem>
+ <listitem><para>
+ [NAND Interface]</para><para>
+ These functions are exported and provide the interface to the NAND kernel API.
+ </para></listitem>
+ <listitem><para>
+ [GENERIC]</para><para>
+ Generic functions are not replacable and provide functionality
+ which is complete hardware independent.
+ </para></listitem>
+ <listitem><para>
+ [DEFAULT]</para><para>
+ Default functions provide hardware related functionality which is suitable
+ for most of the implementations. These functions can be replaced by the
+ board driver if neccecary. Those functions are called via pointers in the
+ NAND chip description structure. The board driver can set the functions which
+ should be replaced by board dependend functions before calling nand_scan().
+ If the function pointer is NULL on entry to nand_scan() then the pointer
+ is set to the default function which is suitable for the detected chip type.
+ </para></listitem>
+ </itemizedlist>
+ </sect1>
+ <sect1>
+ <title>Struct member identifiers [XXX]</title>
+ <para>
+ The struct members are marked with [XXX] identifiers in the
+ comment. The identifiers explain the usage and scope of the
+ members. Following identifiers are used:
+ </para>
+ <itemizedlist>
+ <listitem><para>
+ [INTERN]</para><para>
+ These members are for NAND driver internal use only and must not be
+ modified. Most of these values are calculated from the chip geometry
+ information which is evaluated during nand_scan().
+ </para></listitem>
+ <listitem><para>
+ [REPLACEABLE]</para><para>
+ Replaceable members hold hardware related functions which can be
+ provided by the board driver. The board driver can set the functions which
+ should be replaced by board dependend functions before calling nand_scan().
+ If the function pointer is NULL on entry to nand_scan() then the pointer
+ is set to the default function which is suitable for the detected chip type.
+ </para></listitem>
+ <listitem><para>
+ [BOARDSPECIFIC]</para><para>
+ Board specific members hold hardware related information which must
+ be provided by the board driver. The board driver must set the function
+ pointers and datafields before calling nand_scan().
+ </para></listitem>
+ <listitem><para>
+ [OPTIONAL]</para><para>
+ Optional members can hold information relevant for the board driver. The
+ generic NAND driver code does not use this information.
+ </para></listitem>
+ </itemizedlist>
+ </sect1>
+ </chapter>
+
+ <chapter id="basicboarddriver">
+ <title>Basic board driver</title>
+ <para>
+ For most boards it will be sufficient to provide just the
+ basic functions and fill out some really board dependend
+ members in the nand chip description structure.
+ See drivers/mtd/nand/skeleton for reference.
+ </para>
+ <sect1>
+ <title>Basic defines</title>
+ <para>
+ At least you have to provide a mtd structure and
+ a storage for the ioremap'ed chip address.
+ You can allocate the mtd structure using kmalloc
+ or you can allocate it statically.
+ In case of static allocation you have to allocate
+ a nand_chip structure too.
+ </para>
+ <para>
+ Kmalloc based example
+ </para>
+ <programlisting>
+static struct mtd_info *board_mtd;
+static unsigned long baseaddr;
+ </programlisting>
+ <para>
+ Static example
+ </para>
+ <programlisting>
+static struct mtd_info board_mtd;
+static struct nand_chip board_chip;
+static unsigned long baseaddr;
+ </programlisting>
+ </sect1>
+ <sect1>
+ <title>Partition defines</title>
+ <para>
+ If you want to divide your device into parititions, then
+ enable the configuration switch CONFIG_MTD_PARITIONS and define
+ a paritioning scheme suitable to your board.
+ </para>
+ <programlisting>
+#define NUM_PARTITIONS 2
+static struct mtd_partition partition_info[] = {
+ { .name = "Flash partition 1",
+ .offset = 0,
+ .size = 8 * 1024 * 1024 },
+ { .name = "Flash partition 2",
+ .offset = MTDPART_OFS_NEXT,
+ .size = MTDPART_SIZ_FULL },
+};
+ </programlisting>
+ </sect1>
+ <sect1>
+ <title>Hardware control function</title>
+ <para>
+ The hardware control function provides access to the
+ control pins of the NAND chip(s).
+ The access can be done by GPIO pins or by address lines.
+ If you use address lines, make sure that the timing
+ requirements are met.
+ </para>
+ <para>
+ <emphasis>GPIO based example</emphasis>
+ </para>
+ <programlisting>
+static void board_hwcontrol(struct mtd_info *mtd, int cmd)
+{
+ switch(cmd){
+ case NAND_CTL_SETCLE: /* Set CLE pin high */ break;
+ case NAND_CTL_CLRCLE: /* Set CLE pin low */ break;
+ case NAND_CTL_SETALE: /* Set ALE pin high */ break;
+ case NAND_CTL_CLRALE: /* Set ALE pin low */ break;
+ case NAND_CTL_SETNCE: /* Set nCE pin low */ break;
+ case NAND_CTL_CLRNCE: /* Set nCE pin high */ break;
+ }
+}
+ </programlisting>
+ <para>
+ <emphasis>Address lines based example.</emphasis> It's assumed that the
+ nCE pin is driven by a chip select decoder.
+ </para>
+ <programlisting>
+static void board_hwcontrol(struct mtd_info *mtd, int cmd)
+{
+ struct nand_chip *this = (struct nand_chip *) mtd->priv;
+ switch(cmd){
+ case NAND_CTL_SETCLE: this->IO_ADDR_W |= CLE_ADRR_BIT; break;
+ case NAND_CTL_CLRCLE: this->IO_ADDR_W &amp;= ~CLE_ADRR_BIT; break;
+ case NAND_CTL_SETALE: this->IO_ADDR_W |= ALE_ADRR_BIT; break;
+ case NAND_CTL_CLRALE: this->IO_ADDR_W &amp;= ~ALE_ADRR_BIT; break;
+ }
+}
+ </programlisting>
+ </sect1>
+ <sect1>
+ <title>Device ready function</title>
+ <para>
+ If the hardware interface has the ready busy pin of the NAND chip connected to a
+ GPIO or other accesible I/O pin, this function is used to read back the state of the
+ pin. The function has no arguments and should return 0, if the device is busy (R/B pin
+ is low) and 1, if the device is ready (R/B pin is high).
+ If the hardware interface does not give access to the ready busy pin, then
+ the function must not be defined and the function pointer this->dev_ready is set to NULL.
+ </para>
+ </sect1>
+ <sect1>
+ <title>Init function</title>
+ <para>
+ The init function allocates memory and sets up all the board
+ specific parameters and function pointers. When everything
+ is set up nand_scan() is called. This function tries to
+ detect and identify then chip. If a chip is found all the
+ internal data fields are initialized accordingly.
+ The structure(s) have to be zeroed out first and then filled with the neccecary
+ information about the device.
+ </para>
+ <programlisting>
+int __init board_init (void)
+{
+ struct nand_chip *this;
+ int err = 0;
+
+ /* Allocate memory for MTD device structure and private data */
+ board_mtd = kmalloc (sizeof(struct mtd_info) + sizeof (struct nand_chip), GFP_KERNEL);
+ if (!board_mtd) {
+ printk ("Unable to allocate NAND MTD device structure.\n");
+ err = -ENOMEM;
+ goto out;
+ }
+
+ /* Initialize structures */
+ memset ((char *) board_mtd, 0, sizeof(struct mtd_info) + sizeof(struct nand_chip));
+
+ /* map physical adress */
+ baseaddr = (unsigned long)ioremap(CHIP_PHYSICAL_ADDRESS, 1024);
+ if(!baseaddr){
+ printk("Ioremap to access NAND chip failed\n");
+ err = -EIO;
+ goto out_mtd;
+ }
+
+ /* Get pointer to private data */
+ this = (struct nand_chip *) ();
+ /* Link the private data with the MTD structure */
+ board_mtd->priv = this;
+
+ /* Set address of NAND IO lines */
+ this->IO_ADDR_R = baseaddr;
+ this->IO_ADDR_W = baseaddr;
+ /* Reference hardware control function */
+ this->hwcontrol = board_hwcontrol;
+ /* Set command delay time, see datasheet for correct value */
+ this->chip_delay = CHIP_DEPENDEND_COMMAND_DELAY;
+ /* Assign the device ready function, if available */
+ this->dev_ready = board_dev_ready;
+ this->eccmode = NAND_ECC_SOFT;
+
+ /* Scan to find existance of the device */
+ if (nand_scan (board_mtd, 1)) {
+ err = -ENXIO;
+ goto out_ior;
+ }
+
+ add_mtd_partitions(board_mtd, partition_info, NUM_PARTITIONS);
+ goto out;
+
+out_ior:
+ iounmap((void *)baseaddr);
+out_mtd:
+ kfree (board_mtd);
+out:
+ return err;
+}
+module_init(board_init);
+ </programlisting>
+ </sect1>
+ <sect1>
+ <title>Exit function</title>
+ <para>
+ The exit function is only neccecary if the driver is
+ compiled as a module. It releases all resources which
+ are held by the chip driver and unregisters the partitions
+ in the MTD layer.
+ </para>
+ <programlisting>
+#ifdef MODULE
+static void __exit board_cleanup (void)
+{
+ /* Release resources, unregister device */
+ nand_release (board_mtd);
+
+ /* unmap physical adress */
+ iounmap((void *)baseaddr);
+
+ /* Free the MTD device structure */
+ kfree (board_mtd);
+}
+module_exit(board_cleanup);
+#endif
+ </programlisting>
+ </sect1>
+ </chapter>
+
+ <chapter id="boarddriversadvanced">
+ <title>Advanced board driver functions</title>
+ <para>
+ This chapter describes the advanced functionality of the NAND
+ driver. For a list of functions which can be overridden by the board
+ driver see the documentation of the nand_chip structure.
+ </para>
+ <sect1>
+ <title>Multiple chip control</title>
+ <para>
+ The nand driver can control chip arrays. Therefor the
+ board driver must provide an own select_chip function. This
+ function must (de)select the requested chip.
+ The function pointer in the nand_chip structure must
+ be set before calling nand_scan(). The maxchip parameter
+ of nand_scan() defines the maximum number of chips to
+ scan for. Make sure that the select_chip function can
+ handle the requested number of chips.
+ </para>
+ <para>
+ The nand driver concatenates the chips to one virtual
+ chip and provides this virtual chip to the MTD layer.
+ </para>
+ <para>
+ <emphasis>Note: The driver can only handle linear chip arrays
+ of equally sized chips. There is no support for
+ parallel arrays which extend the buswidth.</emphasis>
+ </para>
+ <para>
+ <emphasis>GPIO based example</emphasis>
+ </para>
+ <programlisting>
+static void board_select_chip (struct mtd_info *mtd, int chip)
+{
+ /* Deselect all chips, set all nCE pins high */
+ GPIO(BOARD_NAND_NCE) |= 0xff;
+ if (chip >= 0)
+ GPIO(BOARD_NAND_NCE) &amp;= ~ (1 &lt;&lt; chip);
+}
+ </programlisting>
+ <para>
+ <emphasis>Address lines based example.</emphasis>
+ Its assumed that the nCE pins are connected to an
+ address decoder.
+ </para>
+ <programlisting>
+static void board_select_chip (struct mtd_info *mtd, int chip)
+{
+ struct nand_chip *this = (struct nand_chip *) mtd->priv;
+
+ /* Deselect all chips */
+ this->IO_ADDR_R &amp;= ~BOARD_NAND_ADDR_MASK;
+ this->IO_ADDR_W &amp;= ~BOARD_NAND_ADDR_MASK;
+ switch (chip) {
+ case 0:
+ this->IO_ADDR_R |= BOARD_NAND_ADDR_CHIP0;
+ this->IO_ADDR_W |= BOARD_NAND_ADDR_CHIP0;
+ break;
+ ....
+ case n:
+ this->IO_ADDR_R |= BOARD_NAND_ADDR_CHIPn;
+ this->IO_ADDR_W |= BOARD_NAND_ADDR_CHIPn;
+ break;
+ }
+}
+ </programlisting>
+ </sect1>
+ <sect1>
+ <title>Hardware ECC support</title>
+ <sect2>
+ <title>Functions and constants</title>
+ <para>
+ The nand driver supports three different types of
+ hardware ECC.
+ <itemizedlist>
+ <listitem><para>NAND_ECC_HW3_256</para><para>
+ Hardware ECC generator providing 3 bytes ECC per
+ 256 byte.
+ </para> </listitem>
+ <listitem><para>NAND_ECC_HW3_512</para><para>
+ Hardware ECC generator providing 3 bytes ECC per
+ 512 byte.
+ </para> </listitem>
+ <listitem><para>NAND_ECC_HW6_512</para><para>
+ Hardware ECC generator providing 6 bytes ECC per
+ 512 byte.
+ </para> </listitem>
+ <listitem><para>NAND_ECC_HW8_512</para><para>
+ Hardware ECC generator providing 6 bytes ECC per
+ 512 byte.
+ </para> </listitem>
+ </itemizedlist>
+ If your hardware generator has a different functionality
+ add it at the appropriate place in nand_base.c
+ </para>
+ <para>
+ The board driver must provide following functions:
+ <itemizedlist>
+ <listitem><para>enable_hwecc</para><para>
+ This function is called before reading / writing to
+ the chip. Reset or initialize the hardware generator
+ in this function. The function is called with an
+ argument which let you distinguish between read
+ and write operations.
+ </para> </listitem>
+ <listitem><para>calculate_ecc</para><para>
+ This function is called after read / write from / to
+ the chip. Transfer the ECC from the hardware to
+ the buffer. If the option NAND_HWECC_SYNDROME is set
+ then the function is only called on write. See below.
+ </para> </listitem>
+ <listitem><para>correct_data</para><para>
+ In case of an ECC error this function is called for
+ error detection and correction. Return 1 respectively 2
+ in case the error can be corrected. If the error is
+ not correctable return -1. If your hardware generator
+ matches the default algorithm of the nand_ecc software
+ generator then use the correction function provided
+ by nand_ecc instead of implementing duplicated code.
+ </para> </listitem>
+ </itemizedlist>
+ </para>
+ </sect2>
+ <sect2>
+ <title>Hardware ECC with syndrome calculation</title>
+ <para>
+ Many hardware ECC implementations provide Reed-Solomon
+ codes and calculate an error syndrome on read. The syndrome
+ must be converted to a standard Reed-Solomon syndrome
+ before calling the error correction code in the generic
+ Reed-Solomon library.
+ </para>
+ <para>
+ The ECC bytes must be placed immidiately after the data
+ bytes in order to make the syndrome generator work. This
+ is contrary to the usual layout used by software ECC. The
+ seperation of data and out of band area is not longer
+ possible. The nand driver code handles this layout and
+ the remaining free bytes in the oob area are managed by
+ the autoplacement code. Provide a matching oob-layout
+ in this case. See rts_from4.c and diskonchip.c for
+ implementation reference. In those cases we must also
+ use bad block tables on FLASH, because the ECC layout is
+ interferring with the bad block marker positions.
+ See bad block table support for details.
+ </para>
+ </sect2>
+ </sect1>
+ <sect1>
+ <title>Bad block table support</title>
+ <para>
+ Most NAND chips mark the bad blocks at a defined
+ position in the spare area. Those blocks must
+ not be erased under any circumstances as the bad
+ block information would be lost.
+ It is possible to check the bad block mark each
+ time when the blocks are accessed by reading the
+ spare area of the first page in the block. This
+ is time consuming so a bad block table is used.
+ </para>
+ <para>
+ The nand driver supports various types of bad block
+ tables.
+ <itemizedlist>
+ <listitem><para>Per device</para><para>
+ The bad block table contains all bad block information
+ of the device which can consist of multiple chips.
+ </para> </listitem>
+ <listitem><para>Per chip</para><para>
+ A bad block table is used per chip and contains the
+ bad block information for this particular chip.
+ </para> </listitem>
+ <listitem><para>Fixed offset</para><para>
+ The bad block table is located at a fixed offset
+ in the chip (device). This applies to various
+ DiskOnChip devices.
+ </para> </listitem>
+ <listitem><para>Automatic placed</para><para>
+ The bad block table is automatically placed and
+ detected either at the end or at the beginning
+ of a chip (device)
+ </para> </listitem>
+ <listitem><para>Mirrored tables</para><para>
+ The bad block table is mirrored on the chip (device) to
+ allow updates of the bad block table without data loss.
+ </para> </listitem>
+ </itemizedlist>
+ </para>
+ <para>
+ nand_scan() calls the function nand_default_bbt().
+ nand_default_bbt() selects appropriate default
+ bad block table desriptors depending on the chip information
+ which was retrieved by nand_scan().
+ </para>
+ <para>
+ The standard policy is scanning the device for bad
+ blocks and build a ram based bad block table which
+ allows faster access than always checking the
+ bad block information on the flash chip itself.
+ </para>
+ <sect2>
+ <title>Flash based tables</title>
+ <para>
+ It may be desired or neccecary to keep a bad block table in FLASH.
+ For AG-AND chips this is mandatory, as they have no factory marked
+ bad blocks. They have factory marked good blocks. The marker pattern
+ is erased when the block is erased to be reused. So in case of
+ powerloss before writing the pattern back to the chip this block
+ would be lost and added to the bad blocks. Therefor we scan the
+ chip(s) when we detect them the first time for good blocks and
+ store this information in a bad block table before erasing any
+ of the blocks.
+ </para>
+ <para>
+ The blocks in which the tables are stored are procteted against
+ accidental access by marking them bad in the memory bad block
+ table. The bad block table managment functions are allowed
+ to circumvernt this protection.
+ </para>
+ <para>
+ The simplest way to activate the FLASH based bad block table support
+ is to set the option NAND_USE_FLASH_BBT in the option field of
+ the nand chip structure before calling nand_scan(). For AG-AND
+ chips is this done by default.
+ This activates the default FLASH based bad block table functionality
+ of the NAND driver. The default bad block table options are
+ <itemizedlist>
+ <listitem><para>Store bad block table per chip</para></listitem>
+ <listitem><para>Use 2 bits per block</para></listitem>
+ <listitem><para>Automatic placement at the end of the chip</para></listitem>
+ <listitem><para>Use mirrored tables with version numbers</para></listitem>
+ <listitem><para>Reserve 4 blocks at the end of the chip</para></listitem>
+ </itemizedlist>
+ </para>
+ </sect2>
+ <sect2>
+ <title>User defined tables</title>
+ <para>
+ User defined tables are created by filling out a
+ nand_bbt_descr structure and storing the pointer in the
+ nand_chip structure member bbt_td before calling nand_scan().
+ If a mirror table is neccecary a second structure must be
+ created and a pointer to this structure must be stored
+ in bbt_md inside the nand_chip structure. If the bbt_md
+ member is set to NULL then only the main table is used
+ and no scan for the mirrored table is performed.
+ </para>
+ <para>
+ The most important field in the nand_bbt_descr structure
+ is the options field. The options define most of the
+ table properties. Use the predefined constants from
+ nand.h to define the options.
+ <itemizedlist>
+ <listitem><para>Number of bits per block</para>
+ <para>The supported number of bits is 1, 2, 4, 8.</para></listitem>
+ <listitem><para>Table per chip</para>
+ <para>Setting the constant NAND_BBT_PERCHIP selects that
+ a bad block table is managed for each chip in a chip array.
+ If this option is not set then a per device bad block table
+ is used.</para></listitem>
+ <listitem><para>Table location is absolute</para>
+ <para>Use the option constant NAND_BBT_ABSPAGE and
+ define the absolute page number where the bad block
+ table starts in the field pages. If you have selected bad block
+ tables per chip and you have a multi chip array then the start page
+ must be given for each chip in the chip array. Note: there is no scan
+ for a table ident pattern performed, so the fields
+ pattern, veroffs, offs, len can be left uninitialized</para></listitem>
+ <listitem><para>Table location is automatically detected</para>
+ <para>The table can either be located in the first or the last good
+ blocks of the chip (device). Set NAND_BBT_LASTBLOCK to place
+ the bad block table at the end of the chip (device). The
+ bad block tables are marked and identified by a pattern which
+ is stored in the spare area of the first page in the block which
+ holds the bad block table. Store a pointer to the pattern
+ in the pattern field. Further the length of the pattern has to be
+ stored in len and the offset in the spare area must be given
+ in the offs member of the nand_bbt_descr stucture. For mirrored
+ bad block tables different patterns are mandatory.</para></listitem>
+ <listitem><para>Table creation</para>
+ <para>Set the option NAND_BBT_CREATE to enable the table creation
+ if no table can be found during the scan. Usually this is done only
+ once if a new chip is found. </para></listitem>
+ <listitem><para>Table write support</para>
+ <para>Set the option NAND_BBT_WRITE to enable the table write support.
+ This allows the update of the bad block table(s) in case a block has
+ to be marked bad due to wear. The MTD interface function block_markbad
+ is calling the update function of the bad block table. If the write
+ support is enabled then the table is updated on FLASH.</para>
+ <para>
+ Note: Write support should only be enabled for mirrored tables with
+ version control.
+ </para></listitem>
+ <listitem><para>Table version control</para>
+ <para>Set the option NAND_BBT_VERSION to enable the table version control.
+ It's highly recommended to enable this for mirrored tables with write
+ support. It makes sure that the risk of loosing the bad block
+ table information is reduced to the loss of the information about the
+ one worn out block which should be marked bad. The version is stored in
+ 4 consecutive bytes in the spare area of the device. The position of
+ the version number is defined by the member veroffs in the bad block table
+ descriptor.</para></listitem>
+ <listitem><para>Save block contents on write</para>
+ <para>
+ In case that the block which holds the bad block table does contain
+ other useful information, set the option NAND_BBT_SAVECONTENT. When
+ the bad block table is written then the whole block is read the bad
+ block table is updated and the block is erased and everything is
+ written back. If this option is not set only the bad block table
+ is written and everything else in the block is ignored and erased.
+ </para></listitem>
+ <listitem><para>Number of reserved blocks</para>
+ <para>
+ For automatic placement some blocks must be reserved for
+ bad block table storage. The number of reserved blocks is defined
+ in the maxblocks member of the babd block table description structure.
+ Reserving 4 blocks for mirrored tables should be a reasonable number.
+ This also limits the number of blocks which are scanned for the bad
+ block table ident pattern.
+ </para></listitem>
+ </itemizedlist>
+ </para>
+ </sect2>
+ </sect1>
+ <sect1>
+ <title>Spare area (auto)placement</title>
+ <para>
+ The nand driver implements different possibilities for
+ placement of filesystem data in the spare area,
+ <itemizedlist>
+ <listitem><para>Placement defined by fs driver</para></listitem>
+ <listitem><para>Automatic placement</para></listitem>
+ </itemizedlist>
+ The default placement function is automatic placement. The
+ nand driver has built in default placement schemes for the
+ various chiptypes. If due to hardware ECC functionality the
+ default placement does not fit then the board driver can
+ provide a own placement scheme.
+ </para>
+ <para>
+ File system drivers can provide a own placement scheme which
+ is used instead of the default placement scheme.
+ </para>
+ <para>
+ Placement schemes are defined by a nand_oobinfo structure
+ <programlisting>
+struct nand_oobinfo {
+ int useecc;
+ int eccbytes;
+ int eccpos[24];
+ int oobfree[8][2];
+};
+ </programlisting>
+ <itemizedlist>
+ <listitem><para>useecc</para><para>
+ The useecc member controls the ecc and placement function. The header
+ file include/mtd/mtd-abi.h contains constants to select ecc and
+ placement. MTD_NANDECC_OFF switches off the ecc complete. This is
+ not recommended and available for testing and diagnosis only.
+ MTD_NANDECC_PLACE selects caller defined placement, MTD_NANDECC_AUTOPLACE
+ selects automatic placement.
+ </para></listitem>
+ <listitem><para>eccbytes</para><para>
+ The eccbytes member defines the number of ecc bytes per page.
+ </para></listitem>
+ <listitem><para>eccpos</para><para>
+ The eccpos array holds the byte offsets in the spare area where
+ the ecc codes are placed.
+ </para></listitem>
+ <listitem><para>oobfree</para><para>
+ The oobfree array defines the areas in the spare area which can be
+ used for automatic placement. The information is given in the format
+ {offset, size}. offset defines the start of the usable area, size the
+ length in bytes. More than one area can be defined. The list is terminated
+ by an {0, 0} entry.
+ </para></listitem>
+ </itemizedlist>
+ </para>
+ <sect2>
+ <title>Placement defined by fs driver</title>
+ <para>
+ The calling function provides a pointer to a nand_oobinfo
+ structure which defines the ecc placement. For writes the
+ caller must provide a spare area buffer along with the
+ data buffer. The spare area buffer size is (number of pages) *
+ (size of spare area). For reads the buffer size is
+ (number of pages) * ((size of spare area) + (number of ecc
+ steps per page) * sizeof (int)). The driver stores the
+ result of the ecc check for each tuple in the spare buffer.
+ The storage sequence is
+ </para>
+ <para>
+ &lt;spare data page 0&gt;&lt;ecc result 0&gt;...&lt;ecc result n&gt;
+ </para>
+ <para>
+ ...
+ </para>
+ <para>
+ &lt;spare data page n&gt;&lt;ecc result 0&gt;...&lt;ecc result n&gt;
+ </para>
+ <para>
+ This is a legacy mode used by YAFFS1.
+ </para>
+ <para>
+ If the spare area buffer is NULL then only the ECC placement is
+ done according to the given scheme in the nand_oobinfo structure.
+ </para>
+ </sect2>
+ <sect2>
+ <title>Automatic placement</title>
+ <para>
+ Automatic placement uses the built in defaults to place the
+ ecc bytes in the spare area. If filesystem data have to be stored /
+ read into the spare area then the calling function must provide a
+ buffer. The buffer size per page is determined by the oobfree array in
+ the nand_oobinfo structure.
+ </para>
+ <para>
+ If the spare area buffer is NULL then only the ECC placement is
+ done according to the default builtin scheme.
+ </para>
+ </sect2>
+ <sect2>
+ <title>User space placement selection</title>
+ <para>
+ All non ecc functions like mtd->read and mtd->write use an internal
+ structure, which can be set by an ioctl. This structure is preset
+ to the autoplacement default.
+ <programlisting>
+ ioctl (fd, MEMSETOOBSEL, oobsel);
+ </programlisting>
+ oobsel is a pointer to a user supplied structure of type
+ nand_oobconfig. The contents of this structure must match the
+ criteria of the filesystem, which will be used. See an example in utils/nandwrite.c.
+ </para>
+ </sect2>
+ </sect1>
+ <sect1>
+ <title>Spare area autoplacement default schemes</title>
+ <sect2>
+ <title>256 byte pagesize</title>
+<informaltable><tgroup cols="3"><tbody>
+<row>
+<entry>Offset</entry>
+<entry>Content</entry>
+<entry>Comment</entry>
+</row>
+<row>
+<entry>0x00</entry>
+<entry>ECC byte 0</entry>
+<entry>Error correction code byte 0</entry>
+</row>
+<row>
+<entry>0x01</entry>
+<entry>ECC byte 1</entry>
+<entry>Error correction code byte 1</entry>
+</row>
+<row>
+<entry>0x02</entry>
+<entry>ECC byte 2</entry>
+<entry>Error correction code byte 2</entry>
+</row>
+<row>
+<entry>0x03</entry>
+<entry>Autoplace 0</entry>
+<entry></entry>
+</row>
+<row>
+<entry>0x04</entry>
+<entry>Autoplace 1</entry>
+<entry></entry>
+</row>
+<row>
+<entry>0x05</entry>
+<entry>Bad block marker</entry>
+<entry>If any bit in this byte is zero, then this block is bad.
+This applies only to the first page in a block. In the remaining
+pages this byte is reserved</entry>
+</row>
+<row>
+<entry>0x06</entry>
+<entry>Autoplace 2</entry>
+<entry></entry>
+</row>
+<row>
+<entry>0x07</entry>
+<entry>Autoplace 3</entry>
+<entry></entry>
+</row>
+</tbody></tgroup></informaltable>
+ </sect2>
+ <sect2>
+ <title>512 byte pagesize</title>
+<informaltable><tgroup cols="3"><tbody>
+<row>
+<entry>Offset</entry>
+<entry>Content</entry>
+<entry>Comment</entry>
+</row>
+<row>
+<entry>0x00</entry>
+<entry>ECC byte 0</entry>
+<entry>Error correction code byte 0 of the lower 256 Byte data in
+this page</entry>
+</row>
+<row>
+<entry>0x01</entry>
+<entry>ECC byte 1</entry>
+<entry>Error correction code byte 1 of the lower 256 Bytes of data
+in this page</entry>
+</row>
+<row>
+<entry>0x02</entry>
+<entry>ECC byte 2</entry>
+<entry>Error correction code byte 2 of the lower 256 Bytes of data
+in this page</entry>
+</row>
+<row>
+<entry>0x03</entry>
+<entry>ECC byte 3</entry>
+<entry>Error correction code byte 0 of the upper 256 Bytes of data
+in this page</entry>
+</row>
+<row>
+<entry>0x04</entry>
+<entry>reserved</entry>
+<entry>reserved</entry>
+</row>
+<row>
+<entry>0x05</entry>
+<entry>Bad block marker</entry>
+<entry>If any bit in this byte is zero, then this block is bad.
+This applies only to the first page in a block. In the remaining
+pages this byte is reserved</entry>
+</row>
+<row>
+<entry>0x06</entry>
+<entry>ECC byte 4</entry>
+<entry>Error correction code byte 1 of the upper 256 Bytes of data
+in this page</entry>
+</row>
+<row>
+<entry>0x07</entry>
+<entry>ECC byte 5</entry>
+<entry>Error correction code byte 2 of the upper 256 Bytes of data
+in this page</entry>
+</row>
+<row>
+<entry>0x08 - 0x0F</entry>
+<entry>Autoplace 0 - 7</entry>
+<entry></entry>
+</row>
+</tbody></tgroup></informaltable>
+ </sect2>
+ <sect2>
+ <title>2048 byte pagesize</title>
+<informaltable><tgroup cols="3"><tbody>
+<row>
+<entry>Offset</entry>
+<entry>Content</entry>
+<entry>Comment</entry>
+</row>
+<row>
+<entry>0x00</entry>
+<entry>Bad block marker</entry>
+<entry>If any bit in this byte is zero, then this block is bad.
+This applies only to the first page in a block. In the remaining
+pages this byte is reserved</entry>
+</row>
+<row>
+<entry>0x01</entry>
+<entry>Reserved</entry>
+<entry>Reserved</entry>
+</row>
+<row>
+<entry>0x02-0x27</entry>
+<entry>Autoplace 0 - 37</entry>
+<entry></entry>
+</row>
+<row>
+<entry>0x28</entry>
+<entry>ECC byte 0</entry>
+<entry>Error correction code byte 0 of the first 256 Byte data in
+this page</entry>
+</row>
+<row>
+<entry>0x29</entry>
+<entry>ECC byte 1</entry>
+<entry>Error correction code byte 1 of the first 256 Bytes of data
+in this page</entry>
+</row>
+<row>
+<entry>0x2A</entry>
+<entry>ECC byte 2</entry>
+<entry>Error correction code byte 2 of the first 256 Bytes data in
+this page</entry>
+</row>
+<row>
+<entry>0x2B</entry>
+<entry>ECC byte 3</entry>
+<entry>Error correction code byte 0 of the second 256 Bytes of data
+in this page</entry>
+</row>
+<row>
+<entry>0x2C</entry>
+<entry>ECC byte 4</entry>
+<entry>Error correction code byte 1 of the second 256 Bytes of data
+in this page</entry>
+</row>
+<row>
+<entry>0x2D</entry>
+<entry>ECC byte 5</entry>
+<entry>Error correction code byte 2 of the second 256 Bytes of data
+in this page</entry>
+</row>
+<row>
+<entry>0x2E</entry>
+<entry>ECC byte 6</entry>
+<entry>Error correction code byte 0 of the third 256 Bytes of data
+in this page</entry>
+</row>
+<row>
+<entry>0x2F</entry>
+<entry>ECC byte 7</entry>
+<entry>Error correction code byte 1 of the third 256 Bytes of data
+in this page</entry>
+</row>
+<row>
+<entry>0x30</entry>
+<entry>ECC byte 8</entry>
+<entry>Error correction code byte 2 of the third 256 Bytes of data
+in this page</entry>
+</row>
+<row>
+<entry>0x31</entry>
+<entry>ECC byte 9</entry>
+<entry>Error correction code byte 0 of the fourth 256 Bytes of data
+in this page</entry>
+</row>
+<row>
+<entry>0x32</entry>
+<entry>ECC byte 10</entry>
+<entry>Error correction code byte 1 of the fourth 256 Bytes of data
+in this page</entry>
+</row>
+<row>
+<entry>0x33</entry>
+<entry>ECC byte 11</entry>
+<entry>Error correction code byte 2 of the fourth 256 Bytes of data
+in this page</entry>
+</row>
+<row>
+<entry>0x34</entry>
+<entry>ECC byte 12</entry>
+<entry>Error correction code byte 0 of the fifth 256 Bytes of data
+in this page</entry>
+</row>
+<row>
+<entry>0x35</entry>
+<entry>ECC byte 13</entry>
+<entry>Error correction code byte 1 of the fifth 256 Bytes of data
+in this page</entry>
+</row>
+<row>
+<entry>0x36</entry>
+<entry>ECC byte 14</entry>
+<entry>Error correction code byte 2 of the fifth 256 Bytes of data
+in this page</entry>
+</row>
+<row>
+<entry>0x37</entry>
+<entry>ECC byte 15</entry>
+<entry>Error correction code byte 0 of the sixt 256 Bytes of data
+in this page</entry>
+</row>
+<row>
+<entry>0x38</entry>
+<entry>ECC byte 16</entry>
+<entry>Error correction code byte 1 of the sixt 256 Bytes of data
+in this page</entry>
+</row>
+<row>
+<entry>0x39</entry>
+<entry>ECC byte 17</entry>
+<entry>Error correction code byte 2 of the sixt 256 Bytes of data
+in this page</entry>
+</row>
+<row>
+<entry>0x3A</entry>
+<entry>ECC byte 18</entry>
+<entry>Error correction code byte 0 of the seventh 256 Bytes of
+data in this page</entry>
+</row>
+<row>
+<entry>0x3B</entry>
+<entry>ECC byte 19</entry>
+<entry>Error correction code byte 1 of the seventh 256 Bytes of
+data in this page</entry>
+</row>
+<row>
+<entry>0x3C</entry>
+<entry>ECC byte 20</entry>
+<entry>Error correction code byte 2 of the seventh 256 Bytes of
+data in this page</entry>
+</row>
+<row>
+<entry>0x3D</entry>
+<entry>ECC byte 21</entry>
+<entry>Error correction code byte 0 of the eigth 256 Bytes of data
+in this page</entry>
+</row>
+<row>
+<entry>0x3E</entry>
+<entry>ECC byte 22</entry>
+<entry>Error correction code byte 1 of the eigth 256 Bytes of data
+in this page</entry>
+</row>
+<row>
+<entry>0x3F</entry>
+<entry>ECC byte 23</entry>
+<entry>Error correction code byte 2 of the eigth 256 Bytes of data
+in this page</entry>
+</row>
+</tbody></tgroup></informaltable>
+ </sect2>
+ </sect1>
+ </chapter>
+
+ <chapter id="filesystems">
+ <title>Filesystem support</title>
+ <para>
+ The NAND driver provides all neccecary functions for a
+ filesystem via the MTD interface.
+ </para>
+ <para>
+ Filesystems must be aware of the NAND pecularities and
+ restrictions. One major restrictions of NAND Flash is, that you cannot
+ write as often as you want to a page. The consecutive writes to a page,
+ before erasing it again, are restricted to 1-3 writes, depending on the
+ manufacturers specifications. This applies similar to the spare area.
+ </para>
+ <para>
+ Therefor NAND aware filesystems must either write in page size chunks
+ or hold a writebuffer to collect smaller writes until they sum up to
+ pagesize. Available NAND aware filesystems: JFFS2, YAFFS.
+ </para>
+ <para>
+ The spare area usage to store filesystem data is controlled by
+ the spare area placement functionality which is described in one
+ of the earlier chapters.
+ </para>
+ </chapter>
+ <chapter id="tools">
+ <title>Tools</title>
+ <para>
+ The MTD project provides a couple of helpful tools to handle NAND Flash.
+ <itemizedlist>
+ <listitem><para>flasherase, flasheraseall: Erase and format FLASH partitions</para></listitem>
+ <listitem><para>nandwrite: write filesystem images to NAND FLASH</para></listitem>
+ <listitem><para>nanddump: dump the contents of a NAND FLASH partitions</para></listitem>
+ </itemizedlist>
+ </para>
+ <para>
+ These tools are aware of the NAND restrictions. Please use those tools
+ instead of complaining about errors which are caused by non NAND aware
+ access methods.
+ </para>
+ </chapter>
+
+ <chapter id="defines">
+ <title>Constants</title>
+ <para>
+ This chapter describes the constants which might be relevant for a driver developer.
+ </para>
+ <sect1>
+ <title>Chip option constants</title>
+ <sect2>
+ <title>Constants for chip id table</title>
+ <para>
+ These constants are defined in nand.h. They are ored together to describe
+ the chip functionality.
+ <programlisting>
+/* Chip can not auto increment pages */
+#define NAND_NO_AUTOINCR 0x00000001
+/* Buswitdh is 16 bit */
+#define NAND_BUSWIDTH_16 0x00000002
+/* Device supports partial programming without padding */
+#define NAND_NO_PADDING 0x00000004
+/* Chip has cache program function */
+#define NAND_CACHEPRG 0x00000008
+/* Chip has copy back function */
+#define NAND_COPYBACK 0x00000010
+/* AND Chip which has 4 banks and a confusing page / block
+ * assignment. See Renesas datasheet for further information */
+#define NAND_IS_AND 0x00000020
+/* Chip has a array of 4 pages which can be read without
+ * additional ready /busy waits */
+#define NAND_4PAGE_ARRAY 0x00000040
+ </programlisting>
+ </para>
+ </sect2>
+ <sect2>
+ <title>Constants for runtime options</title>
+ <para>
+ These constants are defined in nand.h. They are ored together to describe
+ the functionality.
+ <programlisting>
+/* Use a flash based bad block table. This option is parsed by the
+ * default bad block table function (nand_default_bbt). */
+#define NAND_USE_FLASH_BBT 0x00010000
+/* The hw ecc generator provides a syndrome instead a ecc value on read
+ * This can only work if we have the ecc bytes directly behind the
+ * data bytes. Applies for DOC and AG-AND Renesas HW Reed Solomon generators */
+#define NAND_HWECC_SYNDROME 0x00020000
+ </programlisting>
+ </para>
+ </sect2>
+ </sect1>
+
+ <sect1>
+ <title>ECC selection constants</title>
+ <para>
+ Use these constants to select the ECC algorithm.
+ <programlisting>
+/* No ECC. Usage is not recommended ! */
+#define NAND_ECC_NONE 0
+/* Software ECC 3 byte ECC per 256 Byte data */
+#define NAND_ECC_SOFT 1
+/* Hardware ECC 3 byte ECC per 256 Byte data */
+#define NAND_ECC_HW3_256 2
+/* Hardware ECC 3 byte ECC per 512 Byte data */
+#define NAND_ECC_HW3_512 3
+/* Hardware ECC 6 byte ECC per 512 Byte data */
+#define NAND_ECC_HW6_512 4
+/* Hardware ECC 6 byte ECC per 512 Byte data */
+#define NAND_ECC_HW8_512 6
+ </programlisting>
+ </para>
+ </sect1>
+
+ <sect1>
+ <title>Hardware control related constants</title>
+ <para>
+ These constants describe the requested hardware access function when
+ the boardspecific hardware control function is called
+ <programlisting>
+/* Select the chip by setting nCE to low */
+#define NAND_CTL_SETNCE 1
+/* Deselect the chip by setting nCE to high */
+#define NAND_CTL_CLRNCE 2
+/* Select the command latch by setting CLE to high */
+#define NAND_CTL_SETCLE 3
+/* Deselect the command latch by setting CLE to low */
+#define NAND_CTL_CLRCLE 4
+/* Select the address latch by setting ALE to high */
+#define NAND_CTL_SETALE 5
+/* Deselect the address latch by setting ALE to low */
+#define NAND_CTL_CLRALE 6
+/* Set write protection by setting WP to high. Not used! */
+#define NAND_CTL_SETWP 7
+/* Clear write protection by setting WP to low. Not used! */
+#define NAND_CTL_CLRWP 8
+ </programlisting>
+ </para>
+ </sect1>
+
+ <sect1>
+ <title>Bad block table related constants</title>
+ <para>
+ These constants describe the options used for bad block
+ table descriptors.
+ <programlisting>
+/* Options for the bad block table descriptors */
+
+/* The number of bits used per block in the bbt on the device */
+#define NAND_BBT_NRBITS_MSK 0x0000000F
+#define NAND_BBT_1BIT 0x00000001
+#define NAND_BBT_2BIT 0x00000002
+#define NAND_BBT_4BIT 0x00000004
+#define NAND_BBT_8BIT 0x00000008
+/* The bad block table is in the last good block of the device */
+#define NAND_BBT_LASTBLOCK 0x00000010
+/* The bbt is at the given page, else we must scan for the bbt */
+#define NAND_BBT_ABSPAGE 0x00000020
+/* The bbt is at the given page, else we must scan for the bbt */
+#define NAND_BBT_SEARCH 0x00000040
+/* bbt is stored per chip on multichip devices */
+#define NAND_BBT_PERCHIP 0x00000080
+/* bbt has a version counter at offset veroffs */
+#define NAND_BBT_VERSION 0x00000100
+/* Create a bbt if none axists */
+#define NAND_BBT_CREATE 0x00000200
+/* Search good / bad pattern through all pages of a block */
+#define NAND_BBT_SCANALLPAGES 0x00000400
+/* Scan block empty during good / bad block scan */
+#define NAND_BBT_SCANEMPTY 0x00000800
+/* Write bbt if neccecary */
+#define NAND_BBT_WRITE 0x00001000
+/* Read and write back block contents when writing bbt */
+#define NAND_BBT_SAVECONTENT 0x00002000
+ </programlisting>
+ </para>
+ </sect1>
+
+ </chapter>
+
+ <chapter id="structs">
+ <title>Structures</title>
+ <para>
+ This chapter contains the autogenerated documentation of the structures which are
+ used in the NAND driver and might be relevant for a driver developer. Each
+ struct member has a short description which is marked with an [XXX] identifier.
+ See the chapter "Documentation hints" for an explanation.
+ </para>
+!Iinclude/linux/mtd/nand.h
+ </chapter>
+
+ <chapter id="pubfunctions">
+ <title>Public Functions Provided</title>
+ <para>
+ This chapter contains the autogenerated documentation of the NAND kernel API functions
+ which are exported. Each function has a short description which is marked with an [XXX] identifier.
+ See the chapter "Documentation hints" for an explanation.
+ </para>
+!Edrivers/mtd/nand/nand_base.c
+!Edrivers/mtd/nand/nand_bbt.c
+!Edrivers/mtd/nand/nand_ecc.c
+ </chapter>
+
+ <chapter id="intfunctions">
+ <title>Internal Functions Provided</title>
+ <para>
+ This chapter contains the autogenerated documentation of the NAND driver internal functions.
+ Each function has a short description which is marked with an [XXX] identifier.
+ See the chapter "Documentation hints" for an explanation.
+ The functions marked with [DEFAULT] might be relevant for a board driver developer.
+ </para>
+!Idrivers/mtd/nand/nand_base.c
+!Idrivers/mtd/nand/nand_bbt.c
+!Idrivers/mtd/nand/nand_ecc.c
+ </chapter>
+
+ <chapter id="credits">
+ <title>Credits</title>
+ <para>
+ The following people have contributed to the NAND driver:
+ <orderedlist>
+ <listitem><para>Steven J. Hill<email>sjhill@realitydiluted.com</email></para></listitem>
+ <listitem><para>David Woodhouse<email>dwmw2@infradead.org</email></para></listitem>
+ <listitem><para>Thomas Gleixner<email>tglx@linutronix.de</email></para></listitem>
+ </orderedlist>
+ A lot of users have provided bugfixes, improvements and helping hands for testing.
+ Thanks a lot.
+ </para>
+ <para>
+ The following people have contributed to this document:
+ <orderedlist>
+ <listitem><para>Thomas Gleixner<email>tglx@linutronix.de</email></para></listitem>
+ </orderedlist>
+ </para>
+ </chapter>
+</book>
diff --git a/linux-2.4.x/drivers/mtd/Config.in b/linux-2.4.x/drivers/mtd/Config.in
index 96aedab..d591013 100644
--- a/linux-2.4.x/drivers/mtd/Config.in
+++ b/linux-2.4.x/drivers/mtd/Config.in
@@ -1,5 +1,5 @@
-# $Id: Config.in,v 1.73 2002/03/08 16:34:35 rkaiser Exp $
+# $Id: Config.in,v 1.78 2004/08/09 18:46:03 dmarlin Exp $
mainmenu_option next_comment
comment 'Memory Technology Devices (MTD)'
@@ -11,12 +11,17 @@ if [ "$CONFIG_MTD" = "y" -o "$CONFIG_MTD" = "m" ]; then
if [ "$CONFIG_MTD_DEBUG" = "y" ]; then
int ' Debugging verbosity (0 = quiet, 3 = noisy)' CONFIG_MTD_DEBUG_VERBOSE 0
fi
- dep_tristate ' MTD partitioning support' CONFIG_MTD_PARTITIONS $CONFIG_MTD
+ bool ' MTD partitioning support' CONFIG_MTD_PARTITIONS $CONFIG_MTD
dep_tristate ' MTD concatenating support' CONFIG_MTD_CONCAT $CONFIG_MTD
dep_tristate ' RedBoot partition table parsing' CONFIG_MTD_REDBOOT_PARTS $CONFIG_MTD_PARTITIONS
- dep_tristate ' Command line partition table parsing' CONFIG_MTD_CMDLINE_PARTS $CONFIG_MTD_PARTITIONS
+ if [ "$CONFIG_MTD_REDBOOT_PARTS" = "y" -o "$CONFIG_MTD_REDBOOT_PARTS" = "m" ]; then
+ bool ' Include unallocated flash space' CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED
+ bool ' Force read-only for RedBoot system images' CONFIG_MTD_REDBOOT_PARTS_READONLY
+ fi
+ if [ "$CONFIG_MTD_PARTITIONS" = "y" ]; then
+ bool ' Command line partition table parsing' CONFIG_MTD_CMDLINE_PARTS
+ fi
if [ "$CONFIG_ARM" = "y" ]; then
- dep_tristate ' Compaq bootldr partition table parsing' CONFIG_MTD_BOOTLDR_PARTS $CONFIG_MTD_PARTITIONS
dep_tristate ' ARM Firmware Suite partition parsing' CONFIG_MTD_AFS_PARTS $CONFIG_MTD_PARTITIONS
fi
@@ -31,6 +36,7 @@ comment 'User Modules And Translation Layers'
if [ "$CONFIG_NFTL" = "y" -o "$CONFIG_NFTL" = "m" ]; then
bool ' Write support for NFTL (BETA)' CONFIG_NFTL_RW
fi
+ dep_tristate ' INFTL (Inverse NAND Flash Translation Layer) support' CONFIG_INFTL $CONFIG_MTD
source drivers/mtd/chips/Config.in
diff --git a/linux-2.4.x/drivers/mtd/Makefile b/linux-2.4.x/drivers/mtd/Makefile
index b88a33a..24f4f8a 100644
--- a/linux-2.4.x/drivers/mtd/Makefile
+++ b/linux-2.4.x/drivers/mtd/Makefile
@@ -1,67 +1,54 @@
#
-# Makefile for the memory technology device drivers.
+# linux/drivers/Makefile.24
+# Makefile for obsolete kernels.
#
-# Note! Dependencies are done automagically by 'make dep', which also
-# removes any old dependencies. DON'T put your own dependencies here
-# unless it's something special (ie not a .c file).
-#
-# Note 2! The CFLAGS definitions are now inherited from the
-# parent makes..
-#
-# $Id: Makefile,v 1.65 2002/03/22 07:10:34 dwmw2 Exp $
+# $Id: Makefile.24,v 1.3 2004/08/11 14:45:53 dmarlin Exp $
+# Core functionality.
+mtd-y := mtdcore.o
+mtd-$(CONFIG_MTD_PARTITIONS) += mtdpart.o
+obj-$(CONFIG_MTD) += $(mtd-y)
-obj-y += chips/chipslink.o maps/mapslink.o \
- devices/devlink.o nand/nandlink.o
-obj-m :=
-obj-n :=
-obj- :=
+obj-$(CONFIG_MTD_CONCAT) += mtdconcat.o
+obj-$(CONFIG_MTD_REDBOOT_PARTS) += redboot.o
+obj-$(CONFIG_MTD_CMDLINE_PARTS) += cmdlinepart.o
+obj-$(CONFIG_MTD_AFS_PARTS) += afs.o
-O_TARGET := mtdlink.o
+# 'Users' - code which presents functionality to userspace.
+obj-$(CONFIG_MTD_CHAR) += mtdchar.o
+obj-$(CONFIG_MTD_BLOCK) += mtdblock.o mtd_blkdevs-24.o
+obj-$(CONFIG_MTD_BLOCK_RO) += mtdblock_ro.o mtd_blkdevs-24.o
+obj-$(CONFIG_FTL) += ftl.o mtd_blkdevs-24.o
+obj-$(CONFIG_NFTL) += nftl.o mtd_blkdevs-24.o
+obj-$(CONFIG_INFTL) += inftl.o mtd_blkdevs-24.o
-export-objs := mtdcore.o mtdpart.o redboot.o bootldr.o afs.o mtdconcat.o cmdline.o
-list-multi := nftl.o
+nftl-objs := nftlcore.o nftlmount.o
+inftl-objs := inftlcore.o inftlmount.o
-mod-subdirs :=
-subdir-y := chips maps devices nand
-subdir-m := $(subdir-y)
+export-objs := mtdcore.o mtdpart.o redboot.o cmdlinepart.o afs.o \
+ mtdconcat.o mtd_blkdevs-24.o
-# *** BIG UGLY NOTE ***
-#
-# The shiny new inter_module_xxx has introduced yet another ugly link
-# order dependency, which I'd previously taken great care to avoid.
-# We now have to ensure that the chip drivers are initialised before the
-# map drivers, and that the doc200[01] drivers are initialised before
-# docprobe.
-#
-# We'll hopefully merge the doc200[01] drivers and docprobe back into
-# a single driver some time soon, but the CFI drivers are going to have
-# to stay like that.
-#
-# Urgh.
-#
-# dwmw2 21/11/0
+mtd_blkdevs-objs := mtd_blkdevs-24.o
-# Core functionality.
-obj-$(CONFIG_MTD) += mtdcore.o
-obj-$(CONFIG_MTD_CONCAT) += mtdconcat.o
-obj-$(CONFIG_MTD_PARTITIONS) += mtdpart.o
-obj-$(CONFIG_MTD_REDBOOT_PARTS) += redboot.o
-obj-$(CONFIG_MTD_BOOTLDR_PARTS) += bootldr.o
-obj-$(CONFIG_MTD_AFS_PARTS) += afs.o
-obj-$(CONFIG_MTD_CMDLINE_PARTS) += cmdline.o
+obj-y += chips/chipslink.o maps/mapslink.o \
+ devices/devlink.o nand/nandlink.o
-# 'Users' - code which presents functionality to userspace.
-obj-$(CONFIG_MTD_CHAR) += mtdchar.o
-obj-$(CONFIG_MTD_BLOCK) += mtdblock.o
-obj-$(CONFIG_MTD_BLOCK_RO) += mtdblock_ro.o
-obj-$(CONFIG_FTL) += ftl.o
-obj-$(CONFIG_NFTL) += nftl.o
+O_TARGET := mtdlink.o
-nftl-objs := nftlcore.o nftlmount.o
+list-multi := nftl.o inftl.o mtd_blkdevs-24.o
+
+mod-subdirs :=
+subdir-y := chips maps devices nand
+subdir-m := $(subdir-y)
include $(TOPDIR)/Rules.make
nftl.o: $(nftl-objs)
$(LD) -r -o $@ $(nftl-objs)
+inftl.o: $(inftl-objs)
+ $(LD) -r -o $@ $(inftl-objs)
+
+mtd_blkdevs.o: $(mtd_blkdevs-objs)
+ $(LD) -r -o $@ $(mtd_blkdevs-objs)
+
diff --git a/linux-2.4.x/drivers/mtd/Makefile.common b/linux-2.4.x/drivers/mtd/Makefile.common
new file mode 100644
index 0000000..fc93744
--- /dev/null
+++ b/linux-2.4.x/drivers/mtd/Makefile.common
@@ -0,0 +1,28 @@
+#
+# Makefile for the memory technology device drivers.
+#
+# $Id: Makefile.common,v 1.7 2005/07/11 10:39:27 gleixner Exp $
+
+# Core functionality.
+mtd-y := mtdcore.o
+mtd-$(CONFIG_MTD_PARTITIONS) += mtdpart.o
+obj-$(CONFIG_MTD) += $(mtd-y)
+
+obj-$(CONFIG_MTD_CONCAT) += mtdconcat.o
+obj-$(CONFIG_MTD_REDBOOT_PARTS) += redboot.o
+obj-$(CONFIG_MTD_CMDLINE_PARTS) += cmdlinepart.o
+obj-$(CONFIG_MTD_AFS_PARTS) += afs.o
+
+# 'Users' - code which presents functionality to userspace.
+obj-$(CONFIG_MTD_CHAR) += mtdchar.o
+obj-$(CONFIG_MTD_BLOCK) += mtdblock.o mtd_blkdevs.o
+obj-$(CONFIG_MTD_BLOCK_RO) += mtdblock_ro.o mtd_blkdevs.o
+obj-$(CONFIG_FTL) += ftl.o mtd_blkdevs.o
+obj-$(CONFIG_NFTL) += nftl.o mtd_blkdevs.o
+obj-$(CONFIG_INFTL) += inftl.o mtd_blkdevs.o
+obj-$(CONFIG_RFD_FTL) += rfd_ftl.o mtd_blkdevs.o
+
+nftl-objs := nftlcore.o nftlmount.o
+inftl-objs := inftlcore.o inftlmount.o
+
+obj-y += chips/ maps/ devices/ nand/ onenand/
diff --git a/linux-2.4.x/drivers/mtd/afs.c b/linux-2.4.x/drivers/mtd/afs.c
index 990256e..6a45be0 100644
--- a/linux-2.4.x/drivers/mtd/afs.c
+++ b/linux-2.4.x/drivers/mtd/afs.c
@@ -1,27 +1,27 @@
/*======================================================================
drivers/mtd/afs.c: ARM Flash Layout/Partitioning
-
+
Copyright (C) 2000 ARM Limited
-
+
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
-
+
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
-
+
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-
- This is access code for flashes using ARM's flash partitioning
+
+ This is access code for flashes using ARM's flash partitioning
standards.
- $Id: afs.c,v 1.7 2001/11/01 20:04:54 rmk Exp $
+ $Id: afs.c,v 1.15 2005/11/07 11:14:19 gleixner Exp $
======================================================================*/
@@ -57,6 +57,17 @@ struct image_info_struct {
u32 checksum; /* Image checksum (inc. this struct) */
};
+static u32 word_sum(void *words, int num)
+{
+ u32 *p = words;
+ u32 sum = 0;
+
+ while (num--)
+ sum += *p++;
+
+ return sum;
+}
+
static int
afs_read_footer(struct mtd_info *mtd, u_int *img_start, u_int *iis_start,
u_int off, u_int mask)
@@ -76,17 +87,25 @@ afs_read_footer(struct mtd_info *mtd, u_int *img_start, u_int *iis_start,
return ret;
}
+ ret = 1;
+
/*
* Does it contain the magic number?
*/
if (fs.signature != 0xa0ffff9f)
- ret = 1;
+ ret = 0;
+
+ /*
+ * Check the checksum.
+ */
+ if (word_sum(&fs, sizeof(fs) / sizeof(u32)) != 0xffffffff)
+ ret = 0;
/*
* Don't touch the SIB.
*/
if (fs.type == 2)
- ret = 1;
+ ret = 0;
*iis_start = fs.image_info_base & mask;
*img_start = fs.image_start & mask;
@@ -96,14 +115,14 @@ afs_read_footer(struct mtd_info *mtd, u_int *img_start, u_int *iis_start,
* be located after the footer structure.
*/
if (*iis_start >= ptr)
- ret = 1;
+ ret = 0;
/*
* Check the start of this image. The image
* data can not be located after this block.
*/
if (*img_start > off)
- ret = 1;
+ ret = 0;
return ret;
}
@@ -112,20 +131,41 @@ static int
afs_read_iis(struct mtd_info *mtd, struct image_info_struct *iis, u_int ptr)
{
size_t sz;
- int ret;
+ int ret, i;
memset(iis, 0, sizeof(*iis));
ret = mtd->read(mtd, ptr, sizeof(*iis), &sz, (u_char *) iis);
- if (ret >= 0 && sz != sizeof(*iis))
- ret = -EINVAL;
if (ret < 0)
- printk(KERN_ERR "AFS: mtd read failed at 0x%x: %d\n",
- ptr, ret);
+ goto failed;
+
+ if (sz != sizeof(*iis)) {
+ ret = -EINVAL;
+ goto failed;
+ }
+
+ ret = 0;
+
+ /*
+ * Validate the name - it must be NUL terminated.
+ */
+ for (i = 0; i < sizeof(iis->name); i++)
+ if (iis->name[i] == '\0')
+ break;
+
+ if (i < sizeof(iis->name))
+ ret = 1;
return ret;
+
+ failed:
+ printk(KERN_ERR "AFS: mtd read failed at 0x%x: %d\n",
+ ptr, ret);
+ return ret;
}
-int parse_afs_partitions(struct mtd_info *mtd, struct mtd_partition **pparts)
+static int parse_afs_partitions(struct mtd_info *mtd,
+ struct mtd_partition **pparts,
+ unsigned long origin)
{
struct mtd_partition *parts;
u_int mask, off, idx, sz;
@@ -150,12 +190,14 @@ int parse_afs_partitions(struct mtd_info *mtd, struct mtd_partition **pparts)
ret = afs_read_footer(mtd, &img_ptr, &iis_ptr, off, mask);
if (ret < 0)
break;
- if (ret == 1)
+ if (ret == 0)
continue;
ret = afs_read_iis(mtd, &iis, iis_ptr);
if (ret < 0)
break;
+ if (ret == 0)
+ continue;
sz += sizeof(struct mtd_partition);
sz += strlen(iis.name) + 1;
@@ -169,6 +211,7 @@ int parse_afs_partitions(struct mtd_info *mtd, struct mtd_partition **pparts)
if (!parts)
return -ENOMEM;
+ memset(parts, 0, sz);
str = (char *)(parts + idx);
/*
@@ -176,36 +219,26 @@ int parse_afs_partitions(struct mtd_info *mtd, struct mtd_partition **pparts)
*/
for (idx = off = 0; off < mtd->size; off += mtd->erasesize) {
struct image_info_struct iis;
- u_int iis_ptr, img_ptr, size;
+ u_int iis_ptr, img_ptr;
/* Read the footer. */
ret = afs_read_footer(mtd, &img_ptr, &iis_ptr, off, mask);
if (ret < 0)
break;
- if (ret == 1)
+ if (ret == 0)
continue;
/* Read the image info block */
ret = afs_read_iis(mtd, &iis, iis_ptr);
if (ret < 0)
break;
+ if (ret == 0)
+ continue;
strcpy(str, iis.name);
- size = mtd->erasesize + off - img_ptr;
-
- /*
- * In order to support JFFS2 partitions on this layout,
- * we must lie to MTD about the real size of JFFS2
- * partitions; this ensures that the AFS flash footer
- * won't be erased by JFFS2. Please ensure that your
- * JFFS2 partitions are given image numbers between
- * 1000 and 2000 inclusive.
- */
- if (iis.imageNumber >= 1000 && iis.imageNumber < 2000)
- size -= mtd->erasesize;
parts[idx].name = str;
- parts[idx].size = size;
+ parts[idx].size = (iis.length + mtd->erasesize - 1) & ~(mtd->erasesize - 1);
parts[idx].offset = img_ptr;
parts[idx].mask_flags = 0;
@@ -226,7 +259,25 @@ int parse_afs_partitions(struct mtd_info *mtd, struct mtd_partition **pparts)
return idx ? idx : ret;
}
-EXPORT_SYMBOL(parse_afs_partitions);
+static struct mtd_part_parser afs_parser = {
+ .owner = THIS_MODULE,
+ .parse_fn = parse_afs_partitions,
+ .name = "afs",
+};
+
+static int __init afs_parser_init(void)
+{
+ return register_mtd_parser(&afs_parser);
+}
+
+static void __exit afs_parser_exit(void)
+{
+ deregister_mtd_parser(&afs_parser);
+}
+
+module_init(afs_parser_init);
+module_exit(afs_parser_exit);
+
MODULE_AUTHOR("ARM Ltd");
MODULE_DESCRIPTION("ARM Firmware Suite partition parser");
diff --git a/linux-2.4.x/drivers/mtd/chips/Config.in b/linux-2.4.x/drivers/mtd/chips/Config.in
index 5767a51..bae9540 100644
--- a/linux-2.4.x/drivers/mtd/chips/Config.in
+++ b/linux-2.4.x/drivers/mtd/chips/Config.in
@@ -1,24 +1,24 @@
# drivers/mtd/chips/Config.in
-# $Id: Config.in,v 1.12 2001/09/23 15:35:21 dwmw2 Exp $
+# $Id: Config.in,v 1.20 2004/08/09 18:46:03 dmarlin Exp $
mainmenu_option next_comment
comment 'RAM/ROM/Flash chip drivers'
dep_tristate ' Detect flash chips by Common Flash Interface (CFI) probe' CONFIG_MTD_CFI $CONFIG_MTD
-#dep_tristate ' Detect non-CFI Intel-compatible flash chips' CONFIG_MTD_INTELPROBE $CONFIG_MTD
-dep_tristate ' Detect non-CFI AMD/JEDEC-compatible flash chips' CONFIG_MTD_JEDECPROBE $CONFIG_MTD
+dep_tristate ' Detect JEDEC JESD21c compatible flash chips' CONFIG_MTD_JEDECPROBE $CONFIG_MTD
-if [ "$CONFIG_MTD_CFI" = "y" -o "$CONFIG_MTD_INTELPROBE" = "y" -o "$CONFIG_MTD_JEDECPROBE" = "y" ]; then
+if [ "$CONFIG_MTD_CFI" = "y" -o "$CONFIG_MTD_JEDECPROBE" = "y" ]; then
define_bool CONFIG_MTD_GEN_PROBE y
else
- if [ "$CONFIG_MTD_CFI" = "m" -o "$CONFIG_MTD_INTELPROBE" = "m" -o "$CONFIG_MTD_JEDECPROBE" = "m" ]; then
+ if [ "$CONFIG_MTD_CFI" = "m" -o "$CONFIG_MTD_JEDECPROBE" = "m" ]; then
define_bool CONFIG_MTD_GEN_PROBE m
else
define_bool CONFIG_MTD_GEN_PROBE n
fi
fi
+
if [ "$CONFIG_MTD_GEN_PROBE" = "y" -o "$CONFIG_MTD_GEN_PROBE" = "m" ]; then
bool ' Flash chip driver advanced configuration options' CONFIG_MTD_CFI_ADV_OPTIONS
if [ "$CONFIG_MTD_CFI_ADV_OPTIONS" = "y" ]; then
@@ -28,21 +28,40 @@ if [ "$CONFIG_MTD_GEN_PROBE" = "y" -o "$CONFIG_MTD_GEN_PROBE" = "m" ]; then
LITTLE_ENDIAN_BYTE CONFIG_MTD_CFI_LE_BYTE_SWAP" NO
bool ' Specific CFI Flash geometry selection' CONFIG_MTD_CFI_GEOMETRY
if [ "$CONFIG_MTD_CFI_GEOMETRY" = "y" ]; then
- bool ' Support 8-bit buswidth' CONFIG_MTD_CFI_B1
- bool ' Support 16-bit buswidth' CONFIG_MTD_CFI_B2
- bool ' Support 32-bit buswidth' CONFIG_MTD_CFI_B4
- if [ "$CONFIG_MTD_CFI_B1" = "y" ]; then
+ bool ' Support 8-bit buswidth' CONFIG_MTD_MAP_BANK_WIDTH_1
+ bool ' Support 16-bit buswidth' CONFIG_MTD_MAP_BANK_WIDTH_2
+ bool ' Support 32-bit buswidth' CONFIG_MTD_MAP_BANK_WIDTH_4
+ bool ' Support 64-bit buswidth' CONFIG_MTD_MAP_BANK_WIDTH_8
+ bool ' Support 128-bit buswidth' CONFIG_MTD_MAP_BANK_WIDTH_16
+ bool ' Support 256-bit buswidth' CONFIG_MTD_MAP_BANK_WIDTH_32
+ if [ "$CONFIG_MTD_MAP_BANK_WIDTH_1" = "y" ]; then
define_bool CONFIG_MTD_CFI_I1 y
else
bool ' Support 1-chip flash interleave' CONFIG_MTD_CFI_I1
fi
bool ' Support 2-chip flash interleave' CONFIG_MTD_CFI_I2
bool ' Support 4-chip flash interleave' CONFIG_MTD_CFI_I4
+ bool ' Support 8-chip flash interleave' CONFIG_MTD_CFI_I8
fi
fi
fi
dep_tristate ' Support for Intel/Sharp flash chips' CONFIG_MTD_CFI_INTELEXT $CONFIG_MTD_GEN_PROBE
dep_tristate ' Support for AMD/Fujitsu flash chips' CONFIG_MTD_CFI_AMDSTD $CONFIG_MTD_GEN_PROBE
+dep_tristate ' Support for ST (Advanced Architecture) flash chips' CONFIG_MTD_CFI_STAA $CONFIG_MTD_GEN_PROBE
+
+if [ "$CONFIG_MTD_CFI_INTELEXT" = "y" \
+ -o "$CONFIG_MTD_CFI_AMDSTD" = "y" \
+ -o "$CONFIG_MTD_CFI_STAA" = "y" ]; then
+ define_bool CONFIG_MTD_CFI_UTIL y
+else
+ if [ "$CONFIG_MTD_CFI_INTELEXT" = "m" \
+ -o "$CONFIG_MTD_CFI_AMDSTD" = "m" \
+ -o "$CONFIG_MTD_CFI_STAA" = "m" ]; then
+ define_bool CONFIG_MTD_CFI_UTIL m
+ else
+ define_bool CONFIG_MTD_CFI_UTIL n
+ fi
+fi
dep_tristate ' Support for RAM chips in bus mapping' CONFIG_MTD_RAM $CONFIG_MTD
dep_tristate ' Support for ROM chips in bus mapping' CONFIG_MTD_ROM $CONFIG_MTD
diff --git a/linux-2.4.x/drivers/mtd/chips/Makefile b/linux-2.4.x/drivers/mtd/chips/Makefile
index 03fcf81..fc905d5 100644
--- a/linux-2.4.x/drivers/mtd/chips/Makefile
+++ b/linux-2.4.x/drivers/mtd/chips/Makefile
@@ -1,30 +1,12 @@
#
-# linux/drivers/chips/Makefile
+# linux/drivers/chips/Makefile.24
+# Makefile for obsolete kernels.
#
-# $Id: Makefile,v 1.8 2002/01/10 20:27:40 eric Exp $
+# $Id: Makefile.24,v 1.1 2004/07/12 16:08:16 dwmw2 Exp $
O_TARGET := chipslink.o
+export-objs := chipreg.o gen_probe.o cfi_util.o
-export-objs := chipreg.o gen_probe.o
-
-# *** BIG UGLY NOTE ***
-#
-# The removal of get_module_symbol() and replacement with
-# inter_module_register() et al has introduced a link order dependency
-# here where previously there was none. We now have to ensure that
-# the CFI command set drivers are linked before cfi_probe.o
-
-obj-$(CONFIG_MTD) += chipreg.o
-obj-$(CONFIG_MTD_AMDSTD) += amd_flash.o
-obj-$(CONFIG_MTD_CFI) += cfi_probe.o
-obj-$(CONFIG_MTD_CFI_AMDSTD) += cfi_cmdset_0002.o
-obj-$(CONFIG_MTD_CFI_INTELEXT) += cfi_cmdset_0001.o
-obj-$(CONFIG_MTD_GEN_PROBE) += gen_probe.o
-obj-$(CONFIG_MTD_JEDEC) += jedec.o
-obj-$(CONFIG_MTD_JEDECPROBE) += jedec_probe.o
-obj-$(CONFIG_MTD_RAM) += map_ram.o
-obj-$(CONFIG_MTD_ROM) += map_rom.o
-obj-$(CONFIG_MTD_SHARP) += sharp.o
-obj-$(CONFIG_MTD_ABSENT) += map_absent.o
+include Makefile.common
include $(TOPDIR)/Rules.make
diff --git a/linux-2.4.x/drivers/mtd/chips/Makefile.common b/linux-2.4.x/drivers/mtd/chips/Makefile.common
new file mode 100644
index 0000000..8afe309
--- /dev/null
+++ b/linux-2.4.x/drivers/mtd/chips/Makefile.common
@@ -0,0 +1,26 @@
+#
+# linux/drivers/chips/Makefile
+#
+# $Id: Makefile.common,v 1.5 2005/11/07 11:14:22 gleixner Exp $
+
+# *** BIG UGLY NOTE ***
+#
+# The removal of get_module_symbol() and replacement with
+# inter_module_register() et al has introduced a link order dependency
+# here where previously there was none. We now have to ensure that
+# the CFI command set drivers are linked before gen_probe.o
+
+obj-$(CONFIG_MTD) += chipreg.o
+obj-$(CONFIG_MTD_AMDSTD) += amd_flash.o
+obj-$(CONFIG_MTD_CFI) += cfi_probe.o
+obj-$(CONFIG_MTD_CFI_UTIL) += cfi_util.o
+obj-$(CONFIG_MTD_CFI_STAA) += cfi_cmdset_0020.o
+obj-$(CONFIG_MTD_CFI_AMDSTD) += cfi_cmdset_0002.o
+obj-$(CONFIG_MTD_CFI_INTELEXT) += cfi_cmdset_0001.o
+obj-$(CONFIG_MTD_GEN_PROBE) += gen_probe.o
+obj-$(CONFIG_MTD_JEDEC) += jedec.o
+obj-$(CONFIG_MTD_JEDECPROBE) += jedec_probe.o
+obj-$(CONFIG_MTD_RAM) += map_ram.o
+obj-$(CONFIG_MTD_ROM) += map_rom.o
+obj-$(CONFIG_MTD_SHARP) += sharp.o
+obj-$(CONFIG_MTD_ABSENT) += map_absent.o
diff --git a/linux-2.4.x/drivers/mtd/chips/amd_flash.c b/linux-2.4.x/drivers/mtd/chips/amd_flash.c
index 860b8a0..d3d2e13 100644
--- a/linux-2.4.x/drivers/mtd/chips/amd_flash.c
+++ b/linux-2.4.x/drivers/mtd/chips/amd_flash.c
@@ -3,7 +3,7 @@
*
* Author: Jonas Holmberg <jonas.holmberg@axis.com>
*
- * $Id: amd_flash.c,v 1.17 2002/03/05 17:00:37 jonashg Exp $
+ * $Id: amd_flash.c,v 1.29 2006/03/29 08:31:10 dwmw2 Exp $
*
* Copyright (c) 2001 Axis Communications AB
*
@@ -19,6 +19,7 @@
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
+#include <linux/init.h>
#include <linux/mtd/map.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/flashchip.h>
@@ -66,7 +67,6 @@
#define AM29LV160DT 0x22C4
#define AM29LV160DB 0x2249
#define AM29BDS323D 0x22D1
-#define AM29BDS643D 0x227E
/* Atmel */
#define AT49xV16x 0x00C0
@@ -75,6 +75,7 @@
/* Fujitsu */
#define MBM29LV160TE 0x22C4
#define MBM29LV160BE 0x2249
+#define MBM29LV800BB 0x225B
/* ST - www.st.com */
#define M29W800T 0x00D7
@@ -92,9 +93,9 @@
#define D6_MASK 0x40
struct amd_flash_private {
- int device_type;
- int interleave;
- int numchips;
+ int device_type;
+ int interleave;
+ int numchips;
unsigned long chipshift;
// const char *im_name;
struct flchip chips[0];
@@ -124,10 +125,10 @@ static struct mtd_info *amd_flash_probe(struct map_info *map);
static struct mtd_chip_driver amd_flash_chipdrv = {
- probe: amd_flash_probe,
- destroy: amd_flash_destroy,
- name: "amd_flash",
- module: THIS_MODULE
+ .probe = amd_flash_probe,
+ .destroy = amd_flash_destroy,
+ .name = "amd_flash",
+ .module = THIS_MODULE
};
@@ -139,11 +140,11 @@ static const char im_name[] = "amd_flash";
static inline __u32 wide_read(struct map_info *map, __u32 addr)
{
if (map->buswidth == 1) {
- return map->read8(map, addr);
+ return map_read8(map, addr);
} else if (map->buswidth == 2) {
- return map->read16(map, addr);
+ return map_read16(map, addr);
} else if (map->buswidth == 4) {
- return map->read32(map, addr);
+ return map_read32(map, addr);
}
return 0;
@@ -152,11 +153,11 @@ static inline __u32 wide_read(struct map_info *map, __u32 addr)
static inline void wide_write(struct map_info *map, __u32 val, __u32 addr)
{
if (map->buswidth == 1) {
- map->write8(map, val, addr);
+ map_write8(map, val, addr);
} else if (map->buswidth == 2) {
- map->write16(map, val, addr);
+ map_write16(map, val, addr);
} else if (map->buswidth == 4) {
- map->write32(map, val, addr);
+ map_write32(map, val, addr);
}
}
@@ -252,7 +253,7 @@ static int amd_flash_do_unlock(struct mtd_info *mtd, loff_t ofs, size_t len,
int i;
int retval = 0;
int lock_status;
-
+
map = mtd->priv;
/* Pass the whole chip through sector by sector and check for each
@@ -272,7 +273,7 @@ static int amd_flash_do_unlock(struct mtd_info *mtd, loff_t ofs, size_t len,
unlock_sector(map, eraseoffset, is_unlock);
lock_status = is_sector_locked(map, eraseoffset);
-
+
if (is_unlock && lock_status) {
printk("Cannot unlock sector at address %x length %xx\n",
eraseoffset, merip->erasesize);
@@ -304,7 +305,7 @@ static int amd_flash_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
/*
* Reads JEDEC manufacturer ID and device ID and returns the index of the first
* matching table entry (-1 if not found or alias for already found chip).
- */
+ */
static int probe_new_chip(struct mtd_info *mtd, __u32 base,
struct flchip *chips,
struct amd_flash_private *private,
@@ -423,221 +424,219 @@ static int probe_new_chip(struct mtd_info *mtd, __u32 base,
static struct mtd_info *amd_flash_probe(struct map_info *map)
{
- /* Keep this table on the stack so that it gets deallocated after the
- * probe is done.
- */
- const struct amd_flash_info table[] = {
+ static const struct amd_flash_info table[] = {
{
- mfr_id: MANUFACTURER_AMD,
- dev_id: AM29LV160DT,
- name: "AMD AM29LV160DT",
- size: 0x00200000,
- numeraseregions: 4,
- regions: {
- { offset: 0x000000, erasesize: 0x10000, numblocks: 31 },
- { offset: 0x1F0000, erasesize: 0x08000, numblocks: 1 },
- { offset: 0x1F8000, erasesize: 0x02000, numblocks: 2 },
- { offset: 0x1FC000, erasesize: 0x04000, numblocks: 1 }
+ .mfr_id = MANUFACTURER_AMD,
+ .dev_id = AM29LV160DT,
+ .name = "AMD AM29LV160DT",
+ .size = 0x00200000,
+ .numeraseregions = 4,
+ .regions = {
+ { .offset = 0x000000, .erasesize = 0x10000, .numblocks = 31 },
+ { .offset = 0x1F0000, .erasesize = 0x08000, .numblocks = 1 },
+ { .offset = 0x1F8000, .erasesize = 0x02000, .numblocks = 2 },
+ { .offset = 0x1FC000, .erasesize = 0x04000, .numblocks = 1 }
}
}, {
- mfr_id: MANUFACTURER_AMD,
- dev_id: AM29LV160DB,
- name: "AMD AM29LV160DB",
- size: 0x00200000,
- numeraseregions: 4,
- regions: {
- { offset: 0x000000, erasesize: 0x04000, numblocks: 1 },
- { offset: 0x004000, erasesize: 0x02000, numblocks: 2 },
- { offset: 0x008000, erasesize: 0x08000, numblocks: 1 },
- { offset: 0x010000, erasesize: 0x10000, numblocks: 31 }
+ .mfr_id = MANUFACTURER_AMD,
+ .dev_id = AM29LV160DB,
+ .name = "AMD AM29LV160DB",
+ .size = 0x00200000,
+ .numeraseregions = 4,
+ .regions = {
+ { .offset = 0x000000, .erasesize = 0x04000, .numblocks = 1 },
+ { .offset = 0x004000, .erasesize = 0x02000, .numblocks = 2 },
+ { .offset = 0x008000, .erasesize = 0x08000, .numblocks = 1 },
+ { .offset = 0x010000, .erasesize = 0x10000, .numblocks = 31 }
}
}, {
- mfr_id: MANUFACTURER_TOSHIBA,
- dev_id: TC58FVT160,
- name: "Toshiba TC58FVT160",
- size: 0x00200000,
- numeraseregions: 4,
- regions: {
- { offset: 0x000000, erasesize: 0x10000, numblocks: 31 },
- { offset: 0x1F0000, erasesize: 0x08000, numblocks: 1 },
- { offset: 0x1F8000, erasesize: 0x02000, numblocks: 2 },
- { offset: 0x1FC000, erasesize: 0x04000, numblocks: 1 }
+ .mfr_id = MANUFACTURER_TOSHIBA,
+ .dev_id = TC58FVT160,
+ .name = "Toshiba TC58FVT160",
+ .size = 0x00200000,
+ .numeraseregions = 4,
+ .regions = {
+ { .offset = 0x000000, .erasesize = 0x10000, .numblocks = 31 },
+ { .offset = 0x1F0000, .erasesize = 0x08000, .numblocks = 1 },
+ { .offset = 0x1F8000, .erasesize = 0x02000, .numblocks = 2 },
+ { .offset = 0x1FC000, .erasesize = 0x04000, .numblocks = 1 }
}
}, {
- mfr_id: MANUFACTURER_FUJITSU,
- dev_id: MBM29LV160TE,
- name: "Fujitsu MBM29LV160TE",
- size: 0x00200000,
- numeraseregions: 4,
- regions: {
- { offset: 0x000000, erasesize: 0x10000, numblocks: 31 },
- { offset: 0x1F0000, erasesize: 0x08000, numblocks: 1 },
- { offset: 0x1F8000, erasesize: 0x02000, numblocks: 2 },
- { offset: 0x1FC000, erasesize: 0x04000, numblocks: 1 }
+ .mfr_id = MANUFACTURER_FUJITSU,
+ .dev_id = MBM29LV160TE,
+ .name = "Fujitsu MBM29LV160TE",
+ .size = 0x00200000,
+ .numeraseregions = 4,
+ .regions = {
+ { .offset = 0x000000, .erasesize = 0x10000, .numblocks = 31 },
+ { .offset = 0x1F0000, .erasesize = 0x08000, .numblocks = 1 },
+ { .offset = 0x1F8000, .erasesize = 0x02000, .numblocks = 2 },
+ { .offset = 0x1FC000, .erasesize = 0x04000, .numblocks = 1 }
}
}, {
- mfr_id: MANUFACTURER_TOSHIBA,
- dev_id: TC58FVB160,
- name: "Toshiba TC58FVB160",
- size: 0x00200000,
- numeraseregions: 4,
- regions: {
- { offset: 0x000000, erasesize: 0x04000, numblocks: 1 },
- { offset: 0x004000, erasesize: 0x02000, numblocks: 2 },
- { offset: 0x008000, erasesize: 0x08000, numblocks: 1 },
- { offset: 0x010000, erasesize: 0x10000, numblocks: 31 }
+ .mfr_id = MANUFACTURER_TOSHIBA,
+ .dev_id = TC58FVB160,
+ .name = "Toshiba TC58FVB160",
+ .size = 0x00200000,
+ .numeraseregions = 4,
+ .regions = {
+ { .offset = 0x000000, .erasesize = 0x04000, .numblocks = 1 },
+ { .offset = 0x004000, .erasesize = 0x02000, .numblocks = 2 },
+ { .offset = 0x008000, .erasesize = 0x08000, .numblocks = 1 },
+ { .offset = 0x010000, .erasesize = 0x10000, .numblocks = 31 }
}
}, {
- mfr_id: MANUFACTURER_FUJITSU,
- dev_id: MBM29LV160BE,
- name: "Fujitsu MBM29LV160BE",
- size: 0x00200000,
- numeraseregions: 4,
- regions: {
- { offset: 0x000000, erasesize: 0x04000, numblocks: 1 },
- { offset: 0x004000, erasesize: 0x02000, numblocks: 2 },
- { offset: 0x008000, erasesize: 0x08000, numblocks: 1 },
- { offset: 0x010000, erasesize: 0x10000, numblocks: 31 }
+ .mfr_id = MANUFACTURER_FUJITSU,
+ .dev_id = MBM29LV160BE,
+ .name = "Fujitsu MBM29LV160BE",
+ .size = 0x00200000,
+ .numeraseregions = 4,
+ .regions = {
+ { .offset = 0x000000, .erasesize = 0x04000, .numblocks = 1 },
+ { .offset = 0x004000, .erasesize = 0x02000, .numblocks = 2 },
+ { .offset = 0x008000, .erasesize = 0x08000, .numblocks = 1 },
+ { .offset = 0x010000, .erasesize = 0x10000, .numblocks = 31 }
}
}, {
- mfr_id: MANUFACTURER_AMD,
- dev_id: AM29LV800BB,
- name: "AMD AM29LV800BB",
- size: 0x00100000,
- numeraseregions: 4,
- regions: {
- { offset: 0x000000, erasesize: 0x04000, numblocks: 1 },
- { offset: 0x004000, erasesize: 0x02000, numblocks: 2 },
- { offset: 0x008000, erasesize: 0x08000, numblocks: 1 },
- { offset: 0x010000, erasesize: 0x10000, numblocks: 15 }
+ .mfr_id = MANUFACTURER_AMD,
+ .dev_id = AM29LV800BB,
+ .name = "AMD AM29LV800BB",
+ .size = 0x00100000,
+ .numeraseregions = 4,
+ .regions = {
+ { .offset = 0x000000, .erasesize = 0x04000, .numblocks = 1 },
+ { .offset = 0x004000, .erasesize = 0x02000, .numblocks = 2 },
+ { .offset = 0x008000, .erasesize = 0x08000, .numblocks = 1 },
+ { .offset = 0x010000, .erasesize = 0x10000, .numblocks = 15 }
}
}, {
- mfr_id: MANUFACTURER_AMD,
- dev_id: AM29F800BB,
- name: "AMD AM29F800BB",
- size: 0x00100000,
- numeraseregions: 4,
- regions: {
- { offset: 0x000000, erasesize: 0x04000, numblocks: 1 },
- { offset: 0x004000, erasesize: 0x02000, numblocks: 2 },
- { offset: 0x008000, erasesize: 0x08000, numblocks: 1 },
- { offset: 0x010000, erasesize: 0x10000, numblocks: 15 }
+ .mfr_id = MANUFACTURER_AMD,
+ .dev_id = AM29F800BB,
+ .name = "AMD AM29F800BB",
+ .size = 0x00100000,
+ .numeraseregions = 4,
+ .regions = {
+ { .offset = 0x000000, .erasesize = 0x04000, .numblocks = 1 },
+ { .offset = 0x004000, .erasesize = 0x02000, .numblocks = 2 },
+ { .offset = 0x008000, .erasesize = 0x08000, .numblocks = 1 },
+ { .offset = 0x010000, .erasesize = 0x10000, .numblocks = 15 }
}
}, {
- mfr_id: MANUFACTURER_AMD,
- dev_id: AM29LV800BT,
- name: "AMD AM29LV800BT",
- size: 0x00100000,
- numeraseregions: 4,
- regions: {
- { offset: 0x000000, erasesize: 0x10000, numblocks: 15 },
- { offset: 0x0F0000, erasesize: 0x08000, numblocks: 1 },
- { offset: 0x0F8000, erasesize: 0x02000, numblocks: 2 },
- { offset: 0x0FC000, erasesize: 0x04000, numblocks: 1 }
+ .mfr_id = MANUFACTURER_AMD,
+ .dev_id = AM29LV800BT,
+ .name = "AMD AM29LV800BT",
+ .size = 0x00100000,
+ .numeraseregions = 4,
+ .regions = {
+ { .offset = 0x000000, .erasesize = 0x10000, .numblocks = 15 },
+ { .offset = 0x0F0000, .erasesize = 0x08000, .numblocks = 1 },
+ { .offset = 0x0F8000, .erasesize = 0x02000, .numblocks = 2 },
+ { .offset = 0x0FC000, .erasesize = 0x04000, .numblocks = 1 }
}
}, {
- mfr_id: MANUFACTURER_AMD,
- dev_id: AM29F800BT,
- name: "AMD AM29F800BT",
- size: 0x00100000,
- numeraseregions: 4,
- regions: {
- { offset: 0x000000, erasesize: 0x10000, numblocks: 15 },
- { offset: 0x0F0000, erasesize: 0x08000, numblocks: 1 },
- { offset: 0x0F8000, erasesize: 0x02000, numblocks: 2 },
- { offset: 0x0FC000, erasesize: 0x04000, numblocks: 1 }
+ .mfr_id = MANUFACTURER_AMD,
+ .dev_id = AM29F800BT,
+ .name = "AMD AM29F800BT",
+ .size = 0x00100000,
+ .numeraseregions = 4,
+ .regions = {
+ { .offset = 0x000000, .erasesize = 0x10000, .numblocks = 15 },
+ { .offset = 0x0F0000, .erasesize = 0x08000, .numblocks = 1 },
+ { .offset = 0x0F8000, .erasesize = 0x02000, .numblocks = 2 },
+ { .offset = 0x0FC000, .erasesize = 0x04000, .numblocks = 1 }
}
}, {
- mfr_id: MANUFACTURER_AMD,
- dev_id: AM29LV800BB,
- name: "AMD AM29LV800BB",
- size: 0x00100000,
- numeraseregions: 4,
- regions: {
- { offset: 0x000000, erasesize: 0x10000, numblocks: 15 },
- { offset: 0x0F0000, erasesize: 0x08000, numblocks: 1 },
- { offset: 0x0F8000, erasesize: 0x02000, numblocks: 2 },
- { offset: 0x0FC000, erasesize: 0x04000, numblocks: 1 }
+ .mfr_id = MANUFACTURER_AMD,
+ .dev_id = AM29LV800BB,
+ .name = "AMD AM29LV800BB",
+ .size = 0x00100000,
+ .numeraseregions = 4,
+ .regions = {
+ { .offset = 0x000000, .erasesize = 0x10000, .numblocks = 15 },
+ { .offset = 0x0F0000, .erasesize = 0x08000, .numblocks = 1 },
+ { .offset = 0x0F8000, .erasesize = 0x02000, .numblocks = 2 },
+ { .offset = 0x0FC000, .erasesize = 0x04000, .numblocks = 1 }
}
}, {
- mfr_id: MANUFACTURER_ST,
- dev_id: M29W800T,
- name: "ST M29W800T",
- size: 0x00100000,
- numeraseregions: 4,
- regions: {
- { offset: 0x000000, erasesize: 0x10000, numblocks: 15 },
- { offset: 0x0F0000, erasesize: 0x08000, numblocks: 1 },
- { offset: 0x0F8000, erasesize: 0x02000, numblocks: 2 },
- { offset: 0x0FC000, erasesize: 0x04000, numblocks: 1 }
+ .mfr_id = MANUFACTURER_FUJITSU,
+ .dev_id = MBM29LV800BB,
+ .name = "Fujitsu MBM29LV800BB",
+ .size = 0x00100000,
+ .numeraseregions = 4,
+ .regions = {
+ { .offset = 0x000000, .erasesize = 0x04000, .numblocks = 1 },
+ { .offset = 0x004000, .erasesize = 0x02000, .numblocks = 2 },
+ { .offset = 0x008000, .erasesize = 0x08000, .numblocks = 1 },
+ { .offset = 0x010000, .erasesize = 0x10000, .numblocks = 15 }
}
}, {
- mfr_id: MANUFACTURER_ST,
- dev_id: M29W160DT,
- name: "ST M29W160DT",
- size: 0x00200000,
- numeraseregions: 4,
- regions: {
- { offset: 0x000000, erasesize: 0x10000, numblocks: 31 },
- { offset: 0x1F0000, erasesize: 0x08000, numblocks: 1 },
- { offset: 0x1F8000, erasesize: 0x02000, numblocks: 2 },
- { offset: 0x1FC000, erasesize: 0x04000, numblocks: 1 }
+ .mfr_id = MANUFACTURER_ST,
+ .dev_id = M29W800T,
+ .name = "ST M29W800T",
+ .size = 0x00100000,
+ .numeraseregions = 4,
+ .regions = {
+ { .offset = 0x000000, .erasesize = 0x10000, .numblocks = 15 },
+ { .offset = 0x0F0000, .erasesize = 0x08000, .numblocks = 1 },
+ { .offset = 0x0F8000, .erasesize = 0x02000, .numblocks = 2 },
+ { .offset = 0x0FC000, .erasesize = 0x04000, .numblocks = 1 }
}
}, {
- mfr_id: MANUFACTURER_ST,
- dev_id: M29W160DB,
- name: "ST M29W160DB",
- size: 0x00200000,
- numeraseregions: 4,
- regions: {
- { offset: 0x000000, erasesize: 0x04000, numblocks: 1 },
- { offset: 0x004000, erasesize: 0x02000, numblocks: 2 },
- { offset: 0x008000, erasesize: 0x08000, numblocks: 1 },
- { offset: 0x010000, erasesize: 0x10000, numblocks: 31 }
+ .mfr_id = MANUFACTURER_ST,
+ .dev_id = M29W160DT,
+ .name = "ST M29W160DT",
+ .size = 0x00200000,
+ .numeraseregions = 4,
+ .regions = {
+ { .offset = 0x000000, .erasesize = 0x10000, .numblocks = 31 },
+ { .offset = 0x1F0000, .erasesize = 0x08000, .numblocks = 1 },
+ { .offset = 0x1F8000, .erasesize = 0x02000, .numblocks = 2 },
+ { .offset = 0x1FC000, .erasesize = 0x04000, .numblocks = 1 }
}
}, {
- mfr_id: MANUFACTURER_AMD,
- dev_id: AM29BDS323D,
- name: "AMD AM29BDS323D",
- size: 0x00400000,
- numeraseregions: 3,
- regions: {
- { offset: 0x000000, erasesize: 0x10000, numblocks: 48 },
- { offset: 0x300000, erasesize: 0x10000, numblocks: 15 },
- { offset: 0x3f0000, erasesize: 0x02000, numblocks: 8 },
+ .mfr_id = MANUFACTURER_ST,
+ .dev_id = M29W160DB,
+ .name = "ST M29W160DB",
+ .size = 0x00200000,
+ .numeraseregions = 4,
+ .regions = {
+ { .offset = 0x000000, .erasesize = 0x04000, .numblocks = 1 },
+ { .offset = 0x004000, .erasesize = 0x02000, .numblocks = 2 },
+ { .offset = 0x008000, .erasesize = 0x08000, .numblocks = 1 },
+ { .offset = 0x010000, .erasesize = 0x10000, .numblocks = 31 }
}
}, {
- mfr_id: MANUFACTURER_AMD,
- dev_id: AM29BDS643D,
- name: "AMD AM29BDS643D",
- size: 0x00800000,
- numeraseregions: 3,
- regions: {
- { offset: 0x000000, erasesize: 0x10000, numblocks: 96 },
- { offset: 0x600000, erasesize: 0x10000, numblocks: 31 },
- { offset: 0x7f0000, erasesize: 0x02000, numblocks: 8 },
+ .mfr_id = MANUFACTURER_AMD,
+ .dev_id = AM29BDS323D,
+ .name = "AMD AM29BDS323D",
+ .size = 0x00400000,
+ .numeraseregions = 3,
+ .regions = {
+ { .offset = 0x000000, .erasesize = 0x10000, .numblocks = 48 },
+ { .offset = 0x300000, .erasesize = 0x10000, .numblocks = 15 },
+ { .offset = 0x3f0000, .erasesize = 0x02000, .numblocks = 8 },
}
}, {
- mfr_id: MANUFACTURER_ATMEL,
- dev_id: AT49xV16x,
- name: "Atmel AT49xV16x",
- size: 0x00200000,
- numeraseregions: 2,
- regions: {
- { offset: 0x000000, erasesize: 0x02000, numblocks: 8 },
- { offset: 0x010000, erasesize: 0x10000, numblocks: 31 }
+ .mfr_id = MANUFACTURER_ATMEL,
+ .dev_id = AT49xV16x,
+ .name = "Atmel AT49xV16x",
+ .size = 0x00200000,
+ .numeraseregions = 2,
+ .regions = {
+ { .offset = 0x000000, .erasesize = 0x02000, .numblocks = 8 },
+ { .offset = 0x010000, .erasesize = 0x10000, .numblocks = 31 }
}
}, {
- mfr_id: MANUFACTURER_ATMEL,
- dev_id: AT49xV16xT,
- name: "Atmel AT49xV16xT",
- size: 0x00200000,
- numeraseregions: 2,
- regions: {
- { offset: 0x000000, erasesize: 0x10000, numblocks: 31 },
- { offset: 0x1F0000, erasesize: 0x02000, numblocks: 8 }
+ .mfr_id = MANUFACTURER_ATMEL,
+ .dev_id = AT49xV16xT,
+ .name = "Atmel AT49xV16xT",
+ .size = 0x00200000,
+ .numeraseregions = 2,
+ .regions = {
+ { .offset = 0x000000, .erasesize = 0x10000, .numblocks = 31 },
+ { .offset = 0x1F0000, .erasesize = 0x02000, .numblocks = 8 }
}
- }
+ }
};
struct mtd_info *mtd;
@@ -665,7 +664,7 @@ static struct mtd_info *amd_flash_probe(struct map_info *map)
printk("%s: Probing for AMD compatible flash...\n", map->name);
if ((table_pos[0] = probe_new_chip(mtd, 0, NULL, &temp, table,
- sizeof(table)/sizeof(table[0])))
+ ARRAY_SIZE(table)))
== -1) {
printk(KERN_WARNING
"%s: Found no AMD compatible device at location zero\n",
@@ -697,17 +696,17 @@ static struct mtd_info *amd_flash_probe(struct map_info *map)
base += (1 << temp.chipshift)) {
int numchips = temp.numchips;
table_pos[numchips] = probe_new_chip(mtd, base, chips,
- &temp, table, sizeof(table)/sizeof(table[0]));
+ &temp, table, ARRAY_SIZE(table));
}
mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info) *
mtd->numeraseregions, GFP_KERNEL);
- if (!mtd->eraseregions) {
+ if (!mtd->eraseregions) {
printk(KERN_WARNING "%s: Failed to allocate "
"memory for MTD erase region info\n", map->name);
kfree(mtd);
map->fldrv_priv = NULL;
- return 0;
+ return NULL;
}
reg_idx = 0;
@@ -740,12 +739,12 @@ static struct mtd_info *amd_flash_probe(struct map_info *map)
mtd->type = MTD_NORFLASH;
mtd->flags = MTD_CAP_NORFLASH;
mtd->name = map->name;
- mtd->erase = amd_flash_erase;
- mtd->read = amd_flash_read;
- mtd->write = amd_flash_write;
- mtd->sync = amd_flash_sync;
- mtd->suspend = amd_flash_suspend;
- mtd->resume = amd_flash_resume;
+ mtd->erase = amd_flash_erase;
+ mtd->read = amd_flash_read;
+ mtd->write = amd_flash_write;
+ mtd->sync = amd_flash_sync;
+ mtd->suspend = amd_flash_suspend;
+ mtd->resume = amd_flash_resume;
mtd->lock = amd_flash_lock;
mtd->unlock = amd_flash_unlock;
@@ -769,8 +768,8 @@ static struct mtd_info *amd_flash_probe(struct map_info *map)
map->fldrv_priv = private;
map->fldrv = &amd_flash_chipdrv;
- MOD_INC_USE_COUNT;
+ __module_get(THIS_MODULE);
return mtd;
}
@@ -790,7 +789,7 @@ retry:
map->name, chip->state);
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
-
+
spin_unlock_bh(chip->mutex);
schedule();
@@ -803,13 +802,13 @@ retry:
timeo = jiffies + HZ;
goto retry;
- }
+ }
adr += chip->start;
chip->state = FL_READY;
- map->copy_from(map, buf, adr, len);
+ map_copy_from(map, buf, adr, len);
wake_up(&chip->wq);
spin_unlock_bh(chip->mutex);
@@ -890,7 +889,7 @@ retry:
map->name, chip->state);
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
-
+
spin_unlock_bh(chip->mutex);
schedule();
@@ -902,7 +901,7 @@ retry:
timeo = jiffies + HZ;
goto retry;
- }
+ }
chip->state = FL_WRITING;
@@ -912,8 +911,8 @@ retry:
wide_write(map, datum, adr);
times_left = 500000;
- while (times_left-- && flash_is_busy(map, adr, private->interleave)) {
- if (current->need_resched) {
+ while (times_left-- && flash_is_busy(map, adr, private->interleave)) {
+ if (need_resched()) {
spin_unlock_bh(chip->mutex);
schedule();
spin_lock_bh(chip->mutex);
@@ -971,7 +970,7 @@ static int amd_flash_write(struct mtd_info *mtd, loff_t to , size_t len,
u_char tmp_buf[4];
__u32 datum;
- map->copy_from(map, tmp_buf,
+ map_copy_from(map, tmp_buf,
bus_ofs + private->chips[chipnum].start,
map->buswidth);
while (len && i < map->buswidth)
@@ -990,7 +989,7 @@ static int amd_flash_write(struct mtd_info *mtd, loff_t to , size_t len,
if (ret) {
return ret;
}
-
+
ofs += n;
buf += n;
(*retlen) += n;
@@ -1003,7 +1002,7 @@ static int amd_flash_write(struct mtd_info *mtd, loff_t to , size_t len,
}
}
}
-
+
/* We are now aligned, write as much as possible. */
while(len >= map->buswidth) {
__u32 datum;
@@ -1044,7 +1043,7 @@ static int amd_flash_write(struct mtd_info *mtd, loff_t to , size_t len,
u_char tmp_buf[2];
__u32 datum;
- map->copy_from(map, tmp_buf,
+ map_copy_from(map, tmp_buf,
ofs + private->chips[chipnum].start,
map->buswidth);
while (len--) {
@@ -1064,7 +1063,7 @@ static int amd_flash_write(struct mtd_info *mtd, loff_t to , size_t len,
if (ret) {
return ret;
}
-
+
(*retlen) += n;
}
@@ -1086,7 +1085,7 @@ retry:
if (chip->state != FL_READY){
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
-
+
spin_unlock_bh(chip->mutex);
schedule();
@@ -1099,7 +1098,7 @@ retry:
timeo = jiffies + HZ;
goto retry;
- }
+ }
chip->state = FL_ERASING;
@@ -1107,30 +1106,30 @@ retry:
ENABLE_VPP(map);
send_cmd(map, chip->start, CMD_SECTOR_ERASE_UNLOCK_DATA);
send_cmd_to_addr(map, chip->start, CMD_SECTOR_ERASE_UNLOCK_DATA_2, adr);
-
+
timeo = jiffies + (HZ * 20);
spin_unlock_bh(chip->mutex);
- schedule_timeout(HZ);
+ msleep(1000);
spin_lock_bh(chip->mutex);
-
+
while (flash_is_busy(map, adr, private->interleave)) {
if (chip->state != FL_ERASING) {
/* Someone's suspended the erase. Sleep */
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
-
+
spin_unlock_bh(chip->mutex);
printk(KERN_INFO "%s: erase suspended. Sleeping\n",
map->name);
schedule();
remove_wait_queue(&chip->wq, &wait);
-
+
if (signal_pending(current)) {
return -EINTR;
}
-
+
timeo = jiffies + (HZ*2); /* FIXME */
spin_lock_bh(chip->mutex);
continue;
@@ -1146,15 +1145,15 @@ retry:
return -EIO;
}
-
+
/* Latency issues. Drop the lock, wait a while and retry */
spin_unlock_bh(chip->mutex);
- if (current->need_resched)
+ if (need_resched())
schedule();
else
udelay(1);
-
+
spin_lock_bh(chip->mutex);
}
@@ -1165,7 +1164,7 @@ retry:
__u8 verify;
for (address = adr; address < (adr + size); address++) {
- if ((verify = map->read8(map, address)) != 0xFF) {
+ if ((verify = map_read8(map, address)) != 0xFF) {
error = 1;
break;
}
@@ -1181,7 +1180,7 @@ retry:
return -EIO;
}
}
-
+
DISABLE_VPP(map);
chip->state = FL_READY;
wake_up(&chip->wq);
@@ -1247,7 +1246,7 @@ static int amd_flash_erase(struct mtd_info *mtd, struct erase_info *instr)
* with the erase region at that address.
*/
- while ((i < mtd->numeraseregions) &&
+ while ((i < mtd->numeraseregions) &&
((instr->addr + instr->len) >= regions[i].offset)) {
i++;
}
@@ -1294,12 +1293,10 @@ static int amd_flash_erase(struct mtd_info *mtd, struct erase_info *instr)
}
}
}
-
+
instr->state = MTD_ERASE_DONE;
- if (instr->callback) {
- instr->callback(instr);
- }
-
+ mtd_erase_callback(instr);
+
return 0;
}
@@ -1327,7 +1324,7 @@ static void amd_flash_sync(struct mtd_info *mtd)
case FL_JEDEC_QUERY:
chip->oldstate = chip->state;
chip->state = FL_SYNCING;
- /* No need to wake_up() on this state change -
+ /* No need to wake_up() on this state change -
* as the whole point is that nobody can do anything
* with the chip now anyway.
*/
@@ -1338,13 +1335,13 @@ static void amd_flash_sync(struct mtd_info *mtd)
default:
/* Not an idle state */
add_wait_queue(&chip->wq, &wait);
-
+
spin_unlock_bh(chip->mutex);
schedule();
remove_wait_queue(&chip->wq, &wait);
-
+
goto retry;
}
}
@@ -1354,7 +1351,7 @@ static void amd_flash_sync(struct mtd_info *mtd)
chip = &private->chips[i];
spin_lock_bh(chip->mutex);
-
+
if (chip->state == FL_SYNCING) {
chip->state = chip->oldstate;
wake_up(&chip->wq);
diff --git a/linux-2.4.x/drivers/mtd/chips/cfi_cmdset_0001.c b/linux-2.4.x/drivers/mtd/chips/cfi_cmdset_0001.c
index 58dfc35..1cff34e 100644
--- a/linux-2.4.x/drivers/mtd/chips/cfi_cmdset_0001.c
+++ b/linux-2.4.x/drivers/mtd/chips/cfi_cmdset_0001.c
@@ -4,21 +4,24 @@
*
* (C) 2000 Red Hat. GPL'd
*
- * $Id: cfi_cmdset_0001.c,v 1.87 2001/10/02 15:05:11 dwmw2 Exp $
+ * $Id: cfi_cmdset_0001.c,v 1.191 2006/03/30 14:52:37 nico Exp $
+ *
*
- *
* 10/10/2000 Nicolas Pitre <nico@cam.org>
* - completely revamped method functions so they are aware and
* independent of the flash geometry (buswidth, interleave, etc.)
* - scalability vs code size is completely set at compile-time
* (see include/linux/mtd/cfi.h for selection)
* - optimized write buffer method
+ * 02/05/2002 Christopher Hoover <ch@hpl.hp.com>/<ch@murgatroid.com>
+ * - reworked lock/unlock/erase support for var size flash
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/sched.h>
+#include <linux/init.h>
#include <asm/io.h>
#include <asm/byteorder.h>
@@ -26,31 +29,74 @@
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
+#include <linux/reboot.h>
+#include <linux/mtd/xip.h>
#include <linux/mtd/map.h>
-#include <linux/mtd/cfi.h>
+#include <linux/mtd/mtd.h>
#include <linux/mtd/compatmac.h>
+#include <linux/mtd/cfi.h>
+
+/* #define CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE */
+/* #define CMDSET0001_DISABLE_WRITE_SUSPEND */
+
+// debugging, turns off buffer write mode if set to 1
+#define FORCE_WORD_WRITE 0
+
+#define MANUFACTURER_INTEL 0x0089
+#define I82802AB 0x00ad
+#define I82802AC 0x00ac
+#define MANUFACTURER_ST 0x0020
+#define M50LPW080 0x002F
static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
static int cfi_intelext_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
static int cfi_intelext_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
+static int cfi_intelext_writev(struct mtd_info *, const struct kvec *, unsigned long, loff_t, size_t *);
static int cfi_intelext_erase_varsize(struct mtd_info *, struct erase_info *);
static void cfi_intelext_sync (struct mtd_info *);
static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len);
static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len);
+#ifdef CONFIG_MTD_OTP
+static int cfi_intelext_read_fact_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
+static int cfi_intelext_read_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
+static int cfi_intelext_write_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
+static int cfi_intelext_lock_user_prot_reg (struct mtd_info *, loff_t, size_t);
+static int cfi_intelext_get_fact_prot_info (struct mtd_info *,
+ struct otp_info *, size_t);
+static int cfi_intelext_get_user_prot_info (struct mtd_info *,
+ struct otp_info *, size_t);
+#endif
static int cfi_intelext_suspend (struct mtd_info *);
static void cfi_intelext_resume (struct mtd_info *);
+static int cfi_intelext_reboot (struct notifier_block *, unsigned long, void *);
static void cfi_intelext_destroy(struct mtd_info *);
struct mtd_info *cfi_cmdset_0001(struct map_info *, int);
-static struct mtd_info *cfi_intelext_setup (struct map_info *);
+static struct mtd_info *cfi_intelext_setup (struct mtd_info *);
+static int cfi_intelext_partition_fixup(struct mtd_info *, struct cfi_private **);
+
+static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, u_char **mtdbuf);
+static void cfi_intelext_unpoint (struct mtd_info *mtd, u_char *addr, loff_t from,
+ size_t len);
+
+static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
+static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
+#include "fwh_lock.h"
+
+
+
+/*
+ * *********** SETUP AND PROBE BITS ***********
+ */
static struct mtd_chip_driver cfi_intelext_chipdrv = {
- probe: NULL, /* Not usable directly */
- destroy: cfi_intelext_destroy,
- name: "cfi_cmdset_0001",
- module: THIS_MODULE
+ .probe = NULL, /* Not usable directly */
+ .destroy = cfi_intelext_destroy,
+ .name = "cfi_cmdset_0001",
+ .module = THIS_MODULE
};
/* #define DEBUG_LOCK_BITS */
@@ -59,44 +105,232 @@ static struct mtd_chip_driver cfi_intelext_chipdrv = {
#ifdef DEBUG_CFI_FEATURES
static void cfi_tell_features(struct cfi_pri_intelext *extp)
{
- printk(" Feature/Command Support: %4.4X\n", extp->FeatureSupport);
- printk(" - Chip Erase: %s\n", extp->FeatureSupport&1?"supported":"unsupported");
- printk(" - Suspend Erase: %s\n", extp->FeatureSupport&2?"supported":"unsupported");
- printk(" - Suspend Program: %s\n", extp->FeatureSupport&4?"supported":"unsupported");
- printk(" - Legacy Lock/Unlock: %s\n", extp->FeatureSupport&8?"supported":"unsupported");
- printk(" - Queued Erase: %s\n", extp->FeatureSupport&16?"supported":"unsupported");
- printk(" - Instant block lock: %s\n", extp->FeatureSupport&32?"supported":"unsupported");
- printk(" - Protection Bits: %s\n", extp->FeatureSupport&64?"supported":"unsupported");
- printk(" - Page-mode read: %s\n", extp->FeatureSupport&128?"supported":"unsupported");
- printk(" - Synchronous read: %s\n", extp->FeatureSupport&256?"supported":"unsupported");
- for (i=9; i<32; i++) {
- if (extp->FeatureSupport & (1<<i))
+ int i;
+ printk(" Extended Query version %c.%c\n", extp->MajorVersion, extp->MinorVersion);
+ printk(" Feature/Command Support: %4.4X\n", extp->FeatureSupport);
+ printk(" - Chip Erase: %s\n", extp->FeatureSupport&1?"supported":"unsupported");
+ printk(" - Suspend Erase: %s\n", extp->FeatureSupport&2?"supported":"unsupported");
+ printk(" - Suspend Program: %s\n", extp->FeatureSupport&4?"supported":"unsupported");
+ printk(" - Legacy Lock/Unlock: %s\n", extp->FeatureSupport&8?"supported":"unsupported");
+ printk(" - Queued Erase: %s\n", extp->FeatureSupport&16?"supported":"unsupported");
+ printk(" - Instant block lock: %s\n", extp->FeatureSupport&32?"supported":"unsupported");
+ printk(" - Protection Bits: %s\n", extp->FeatureSupport&64?"supported":"unsupported");
+ printk(" - Page-mode read: %s\n", extp->FeatureSupport&128?"supported":"unsupported");
+ printk(" - Synchronous read: %s\n", extp->FeatureSupport&256?"supported":"unsupported");
+ printk(" - Simultaneous operations: %s\n", extp->FeatureSupport&512?"supported":"unsupported");
+ printk(" - Extended Flash Array: %s\n", extp->FeatureSupport&1024?"supported":"unsupported");
+ for (i=11; i<32; i++) {
+ if (extp->FeatureSupport & (1<<i))
printk(" - Unknown Bit %X: supported\n", i);
}
-
+
printk(" Supported functions after Suspend: %2.2X\n", extp->SuspendCmdSupport);
printk(" - Program after Erase Suspend: %s\n", extp->SuspendCmdSupport&1?"supported":"unsupported");
for (i=1; i<8; i++) {
if (extp->SuspendCmdSupport & (1<<i))
printk(" - Unknown Bit %X: supported\n", i);
}
-
+
printk(" Block Status Register Mask: %4.4X\n", extp->BlkStatusRegMask);
printk(" - Lock Bit Active: %s\n", extp->BlkStatusRegMask&1?"yes":"no");
- printk(" - Valid Bit Active: %s\n", extp->BlkStatusRegMask&2?"yes":"no");
- for (i=2; i<16; i++) {
+ printk(" - Lock-Down Bit Active: %s\n", extp->BlkStatusRegMask&2?"yes":"no");
+ for (i=2; i<3; i++) {
if (extp->BlkStatusRegMask & (1<<i))
printk(" - Unknown Bit %X Active: yes\n",i);
}
-
- printk(" Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n",
- extp->VccOptimal >> 8, extp->VccOptimal & 0xf);
+ printk(" - EFA Lock Bit: %s\n", extp->BlkStatusRegMask&16?"yes":"no");
+ printk(" - EFA Lock-Down Bit: %s\n", extp->BlkStatusRegMask&32?"yes":"no");
+ for (i=6; i<16; i++) {
+ if (extp->BlkStatusRegMask & (1<<i))
+ printk(" - Unknown Bit %X Active: yes\n",i);
+ }
+
+ printk(" Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n",
+ extp->VccOptimal >> 4, extp->VccOptimal & 0xf);
if (extp->VppOptimal)
- printk(" Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n",
- extp->VppOptimal >> 8, extp->VppOptimal & 0xf);
+ printk(" Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n",
+ extp->VppOptimal >> 4, extp->VppOptimal & 0xf);
+}
+#endif
+
+#ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
+/* Some Intel Strata Flash prior to FPO revision C has bugs in this area */
+static void fixup_intel_strataflash(struct mtd_info *mtd, void* param)
+{
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+ struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
+
+ printk(KERN_WARNING "cfi_cmdset_0001: Suspend "
+ "erase on write disabled.\n");
+ extp->SuspendCmdSupport &= ~1;
}
#endif
+#ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
+static void fixup_no_write_suspend(struct mtd_info *mtd, void* param)
+{
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+ struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
+
+ if (cfip && (cfip->FeatureSupport&4)) {
+ cfip->FeatureSupport &= ~4;
+ printk(KERN_WARNING "cfi_cmdset_0001: write suspend disabled\n");
+ }
+}
+#endif
+
+static void fixup_st_m28w320ct(struct mtd_info *mtd, void* param)
+{
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+
+ cfi->cfiq->BufWriteTimeoutTyp = 0; /* Not supported */
+ cfi->cfiq->BufWriteTimeoutMax = 0; /* Not supported */
+}
+
+static void fixup_st_m28w320cb(struct mtd_info *mtd, void* param)
+{
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+
+ /* Note this is done after the region info is endian swapped */
+ cfi->cfiq->EraseRegionInfo[1] =
+ (cfi->cfiq->EraseRegionInfo[1] & 0xffff0000) | 0x3e;
+};
+
+static void fixup_use_point(struct mtd_info *mtd, void *param)
+{
+ struct map_info *map = mtd->priv;
+ if (!mtd->point && map_is_linear(map)) {
+ mtd->point = cfi_intelext_point;
+ mtd->unpoint = cfi_intelext_unpoint;
+ }
+}
+
+static void fixup_use_write_buffers(struct mtd_info *mtd, void *param)
+{
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+ if (cfi->cfiq->BufWriteTimeoutTyp) {
+ printk(KERN_INFO "Using buffer write method\n" );
+ mtd->write = cfi_intelext_write_buffers;
+ mtd->writev = cfi_intelext_writev;
+ }
+}
+
+static struct cfi_fixup cfi_fixup_table[] = {
+#ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
+ { CFI_MFR_ANY, CFI_ID_ANY, fixup_intel_strataflash, NULL },
+#endif
+#ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
+ { CFI_MFR_ANY, CFI_ID_ANY, fixup_no_write_suspend, NULL },
+#endif
+#if !FORCE_WORD_WRITE
+ { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers, NULL },
+#endif
+ { CFI_MFR_ST, 0x00ba, /* M28W320CT */ fixup_st_m28w320ct, NULL },
+ { CFI_MFR_ST, 0x00bb, /* M28W320CB */ fixup_st_m28w320cb, NULL },
+ { 0, 0, NULL, NULL }
+};
+
+static struct cfi_fixup jedec_fixup_table[] = {
+ { MANUFACTURER_INTEL, I82802AB, fixup_use_fwh_lock, NULL, },
+ { MANUFACTURER_INTEL, I82802AC, fixup_use_fwh_lock, NULL, },
+ { MANUFACTURER_ST, M50LPW080, fixup_use_fwh_lock, NULL, },
+ { 0, 0, NULL, NULL }
+};
+static struct cfi_fixup fixup_table[] = {
+ /* The CFI vendor ids and the JEDEC vendor IDs appear
+ * to be common. It is like the devices id's are as
+ * well. This table is to pick all cases where
+ * we know that is the case.
+ */
+ { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_point, NULL },
+ { 0, 0, NULL, NULL }
+};
+
+static inline struct cfi_pri_intelext *
+read_pri_intelext(struct map_info *map, __u16 adr)
+{
+ struct cfi_pri_intelext *extp;
+ unsigned int extp_size = sizeof(*extp);
+
+ again:
+ extp = (struct cfi_pri_intelext *)cfi_read_pri(map, adr, extp_size, "Intel/Sharp");
+ if (!extp)
+ return NULL;
+
+ if (extp->MajorVersion != '1' ||
+ (extp->MinorVersion < '0' || extp->MinorVersion > '4')) {
+ printk(KERN_ERR " Unknown Intel/Sharp Extended Query "
+ "version %c.%c.\n", extp->MajorVersion,
+ extp->MinorVersion);
+ kfree(extp);
+ return NULL;
+ }
+
+ /* Do some byteswapping if necessary */
+ extp->FeatureSupport = le32_to_cpu(extp->FeatureSupport);
+ extp->BlkStatusRegMask = le16_to_cpu(extp->BlkStatusRegMask);
+ extp->ProtRegAddr = le16_to_cpu(extp->ProtRegAddr);
+
+ if (extp->MajorVersion == '1' && extp->MinorVersion >= '3') {
+ unsigned int extra_size = 0;
+ int nb_parts, i;
+
+ /* Protection Register info */
+ extra_size += (extp->NumProtectionFields - 1) *
+ sizeof(struct cfi_intelext_otpinfo);
+
+ /* Burst Read info */
+ extra_size += 2;
+ if (extp_size < sizeof(*extp) + extra_size)
+ goto need_more;
+ extra_size += extp->extra[extra_size-1];
+
+ /* Number of hardware-partitions */
+ extra_size += 1;
+ if (extp_size < sizeof(*extp) + extra_size)
+ goto need_more;
+ nb_parts = extp->extra[extra_size - 1];
+
+ /* skip the sizeof(partregion) field in CFI 1.4 */
+ if (extp->MinorVersion >= '4')
+ extra_size += 2;
+
+ for (i = 0; i < nb_parts; i++) {
+ struct cfi_intelext_regioninfo *rinfo;
+ rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[extra_size];
+ extra_size += sizeof(*rinfo);
+ if (extp_size < sizeof(*extp) + extra_size)
+ goto need_more;
+ rinfo->NumIdentPartitions=le16_to_cpu(rinfo->NumIdentPartitions);
+ extra_size += (rinfo->NumBlockTypes - 1)
+ * sizeof(struct cfi_intelext_blockinfo);
+ }
+
+ if (extp->MinorVersion >= '4')
+ extra_size += sizeof(struct cfi_intelext_programming_regioninfo);
+
+ if (extp_size < sizeof(*extp) + extra_size) {
+ need_more:
+ extp_size = sizeof(*extp) + extra_size;
+ kfree(extp);
+ if (extp_size > 4096) {
+ printk(KERN_ERR
+ "%s: cfi_pri_intelext is too fat\n",
+ __FUNCTION__);
+ return NULL;
+ }
+ goto again;
+ }
+ }
+
+ return extp;
+}
+
/* This routine is made available to other mtd code via
* inter_module_register. It must only be accessed through
* inter_module_get which will bump the use count of this module. The
@@ -107,105 +341,100 @@ static void cfi_tell_features(struct cfi_pri_intelext *extp)
struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary)
{
struct cfi_private *cfi = map->fldrv_priv;
+ struct mtd_info *mtd;
int i;
- __u32 base = cfi->chips[0].start;
- if (cfi->cfi_mode) {
- /*
+ mtd = kmalloc(sizeof(*mtd), GFP_KERNEL);
+ if (!mtd) {
+ printk(KERN_ERR "Failed to allocate memory for MTD device\n");
+ return NULL;
+ }
+ memset(mtd, 0, sizeof(*mtd));
+ mtd->priv = map;
+ mtd->type = MTD_NORFLASH;
+
+ /* Fill in the default mtd operations */
+ mtd->erase = cfi_intelext_erase_varsize;
+ mtd->read = cfi_intelext_read;
+ mtd->write = cfi_intelext_write_words;
+ mtd->sync = cfi_intelext_sync;
+ mtd->lock = cfi_intelext_lock;
+ mtd->unlock = cfi_intelext_unlock;
+ mtd->suspend = cfi_intelext_suspend;
+ mtd->resume = cfi_intelext_resume;
+ mtd->flags = MTD_CAP_NORFLASH;
+ mtd->name = map->name;
+
+ mtd->reboot_notifier.notifier_call = cfi_intelext_reboot;
+
+ if (cfi->cfi_mode == CFI_MODE_CFI) {
+ /*
* It's a real CFI chip, not one for which the probe
* routine faked a CFI structure. So we read the feature
* table from it.
*/
__u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
struct cfi_pri_intelext *extp;
- int ofs_factor = cfi->interleave * cfi->device_type;
- //printk(" Intel/Sharp Extended Query Table at 0x%4.4X\n", adr);
- if (!adr)
- return NULL;
-
- /* Switch it into Query Mode */
- cfi_send_gen_cmd(0x98, 0x55, base, map, cfi, cfi->device_type, NULL);
-
- extp = kmalloc(sizeof(*extp), GFP_KERNEL);
+ extp = read_pri_intelext(map, adr);
if (!extp) {
- printk(KERN_ERR "Failed to allocate memory\n");
- return NULL;
- }
-
- /* Read in the Extended Query Table */
- for (i=0; i<sizeof(*extp); i++) {
- ((unsigned char *)extp)[i] =
- cfi_read_query(map, (base+((adr+i)*ofs_factor)));
- }
-
- if (extp->MajorVersion != '1' ||
- (extp->MinorVersion < '0' || extp->MinorVersion > '2')) {
- printk(KERN_WARNING " Unknown IntelExt Extended Query "
- "version %c.%c.\n", extp->MajorVersion,
- extp->MinorVersion);
- kfree(extp);
+ kfree(mtd);
return NULL;
}
-
- /* Do some byteswapping if necessary */
- extp->FeatureSupport = cfi32_to_cpu(extp->FeatureSupport);
- extp->BlkStatusRegMask = cfi32_to_cpu(extp->BlkStatusRegMask);
-
+
+ /* Install our own private info structure */
+ cfi->cmdset_priv = extp;
+
+ cfi_fixup(mtd, cfi_fixup_table);
+
#ifdef DEBUG_CFI_FEATURES
/* Tell the user about it in lots of lovely detail */
cfi_tell_features(extp);
-#endif
+#endif
- /* Install our own private info structure */
- cfi->cmdset_priv = extp;
- }
+ if(extp->SuspendCmdSupport & 1) {
+ printk(KERN_NOTICE "cfi_cmdset_0001: Erase suspend on write enabled\n");
+ }
+ }
+ else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
+ /* Apply jedec specific fixups */
+ cfi_fixup(mtd, jedec_fixup_table);
+ }
+ /* Apply generic fixups */
+ cfi_fixup(mtd, fixup_table);
for (i=0; i< cfi->numchips; i++) {
- cfi->chips[i].word_write_time = 128;
- cfi->chips[i].buffer_write_time = 128;
- cfi->chips[i].erase_time = 1024;
- }
+ cfi->chips[i].word_write_time = 1<<cfi->cfiq->WordWriteTimeoutTyp;
+ cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp;
+ cfi->chips[i].erase_time = 1000<<cfi->cfiq->BlockEraseTimeoutTyp;
+ cfi->chips[i].ref_point_counter = 0;
+ }
map->fldrv = &cfi_intelext_chipdrv;
- MOD_INC_USE_COUNT;
-
- /* Make sure it's in read mode */
- cfi_send_gen_cmd(0xff, 0x55, base, map, cfi, cfi->device_type, NULL);
- return cfi_intelext_setup(map);
+
+ return cfi_intelext_setup(mtd);
}
-static struct mtd_info *cfi_intelext_setup(struct map_info *map)
+static struct mtd_info *cfi_intelext_setup(struct mtd_info *mtd)
{
+ struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
- struct mtd_info *mtd;
unsigned long offset = 0;
int i,j;
unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
- mtd = kmalloc(sizeof(*mtd), GFP_KERNEL);
//printk(KERN_DEBUG "number of CFI chips: %d\n", cfi->numchips);
- if (!mtd) {
- printk(KERN_ERR "Failed to allocate memory for MTD device\n");
- kfree(cfi->cmdset_priv);
- return NULL;
- }
-
- memset(mtd, 0, sizeof(*mtd));
- mtd->priv = map;
- mtd->type = MTD_NORFLASH;
mtd->size = devsize * cfi->numchips;
mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
- mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
+ mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
* mtd->numeraseregions, GFP_KERNEL);
- if (!mtd->eraseregions) {
+ if (!mtd->eraseregions) {
printk(KERN_ERR "Failed to allocate memory for MTD erase region info\n");
- kfree(cfi->cmdset_priv);
- return NULL;
+ goto setup_err;
}
-
+
for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
unsigned long ernum, ersize;
ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
@@ -220,177 +449,811 @@ static struct mtd_info *cfi_intelext_setup(struct map_info *map)
mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
}
offset += (ersize * ernum);
+ }
+
+ if (offset != devsize) {
+ /* Argh */
+ printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
+ goto setup_err;
+ }
+
+ for (i=0; i<mtd->numeraseregions;i++){
+ printk(KERN_DEBUG "erase region %d: offset=0x%x,size=0x%x,blocks=%d\n",
+ i,mtd->eraseregions[i].offset,
+ mtd->eraseregions[i].erasesize,
+ mtd->eraseregions[i].numblocks);
+ }
+
+#ifdef CONFIG_MTD_OTP
+ mtd->read_fact_prot_reg = cfi_intelext_read_fact_prot_reg;
+ mtd->read_user_prot_reg = cfi_intelext_read_user_prot_reg;
+ mtd->write_user_prot_reg = cfi_intelext_write_user_prot_reg;
+ mtd->lock_user_prot_reg = cfi_intelext_lock_user_prot_reg;
+ mtd->get_fact_prot_info = cfi_intelext_get_fact_prot_info;
+ mtd->get_user_prot_info = cfi_intelext_get_user_prot_info;
+#endif
+
+ /* This function has the potential to distort the reality
+ a bit and therefore should be called last. */
+ if (cfi_intelext_partition_fixup(mtd, &cfi) != 0)
+ goto setup_err;
+
+ __module_get(THIS_MODULE);
+ register_reboot_notifier(&mtd->reboot_notifier);
+ return mtd;
+
+ setup_err:
+ if(mtd) {
+ kfree(mtd->eraseregions);
+ kfree(mtd);
+ }
+ kfree(cfi->cmdset_priv);
+ return NULL;
+}
+
+static int cfi_intelext_partition_fixup(struct mtd_info *mtd,
+ struct cfi_private **pcfi)
+{
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = *pcfi;
+ struct cfi_pri_intelext *extp = cfi->cmdset_priv;
+
+ /*
+ * Probing of multi-partition flash ships.
+ *
+ * To support multiple partitions when available, we simply arrange
+ * for each of them to have their own flchip structure even if they
+ * are on the same physical chip. This means completely recreating
+ * a new cfi_private structure right here which is a blatent code
+ * layering violation, but this is still the least intrusive
+ * arrangement at this point. This can be rearranged in the future
+ * if someone feels motivated enough. --nico
+ */
+ if (extp && extp->MajorVersion == '1' && extp->MinorVersion >= '3'
+ && extp->FeatureSupport & (1 << 9)) {
+ struct cfi_private *newcfi;
+ struct flchip *chip;
+ struct flchip_shared *shared;
+ int offs, numregions, numparts, partshift, numvirtchips, i, j;
+
+ /* Protection Register info */
+ offs = (extp->NumProtectionFields - 1) *
+ sizeof(struct cfi_intelext_otpinfo);
+
+ /* Burst Read info */
+ offs += extp->extra[offs+1]+2;
+
+ /* Number of partition regions */
+ numregions = extp->extra[offs];
+ offs += 1;
+
+ /* skip the sizeof(partregion) field in CFI 1.4 */
+ if (extp->MinorVersion >= '4')
+ offs += 2;
+
+ /* Number of hardware partitions */
+ numparts = 0;
+ for (i = 0; i < numregions; i++) {
+ struct cfi_intelext_regioninfo *rinfo;
+ rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[offs];
+ numparts += rinfo->NumIdentPartitions;
+ offs += sizeof(*rinfo)
+ + (rinfo->NumBlockTypes - 1) *
+ sizeof(struct cfi_intelext_blockinfo);
}
- if (offset != devsize) {
- /* Argh */
- printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
- kfree(mtd->eraseregions);
- kfree(cfi->cmdset_priv);
- return NULL;
+ /* Programming Region info */
+ if (extp->MinorVersion >= '4') {
+ struct cfi_intelext_programming_regioninfo *prinfo;
+ prinfo = (struct cfi_intelext_programming_regioninfo *)&extp->extra[offs];
+ MTD_PROGREGION_SIZE(mtd) = cfi->interleave << prinfo->ProgRegShift;
+ MTD_PROGREGION_CTRLMODE_VALID(mtd) = cfi->interleave * prinfo->ControlValid;
+ MTD_PROGREGION_CTRLMODE_INVALID(mtd) = cfi->interleave * prinfo->ControlInvalid;
+ mtd->flags |= MTD_PROGRAM_REGIONS;
+ printk(KERN_DEBUG "%s: program region size/ctrl_valid/ctrl_inval = %d/%d/%d\n",
+ map->name, MTD_PROGREGION_SIZE(mtd),
+ MTD_PROGREGION_CTRLMODE_VALID(mtd),
+ MTD_PROGREGION_CTRLMODE_INVALID(mtd));
}
- for (i=0; i<mtd->numeraseregions;i++){
- printk(KERN_DEBUG "%d: offset=0x%x,size=0x%x,blocks=%d\n",
- i,mtd->eraseregions[i].offset,
- mtd->eraseregions[i].erasesize,
- mtd->eraseregions[i].numblocks);
+ /*
+ * All functions below currently rely on all chips having
+ * the same geometry so we'll just assume that all hardware
+ * partitions are of the same size too.
+ */
+ partshift = cfi->chipshift - __ffs(numparts);
+
+ if ((1 << partshift) < mtd->erasesize) {
+ printk( KERN_ERR
+ "%s: bad number of hw partitions (%d)\n",
+ __FUNCTION__, numparts);
+ return -EINVAL;
}
- /* Also select the correct geometry setup too */
- mtd->erase = cfi_intelext_erase_varsize;
- mtd->read = cfi_intelext_read;
- if ( cfi->cfiq->BufWriteTimeoutTyp ) {
- //printk(KERN_INFO "Using buffer write method\n" );
- mtd->write = cfi_intelext_write_buffers;
- } else {
- //printk(KERN_INFO "Using word write method\n" );
- mtd->write = cfi_intelext_write_words;
+ numvirtchips = cfi->numchips * numparts;
+ newcfi = kmalloc(sizeof(struct cfi_private) + numvirtchips * sizeof(struct flchip), GFP_KERNEL);
+ if (!newcfi)
+ return -ENOMEM;
+ shared = kmalloc(sizeof(struct flchip_shared) * cfi->numchips, GFP_KERNEL);
+ if (!shared) {
+ kfree(newcfi);
+ return -ENOMEM;
+ }
+ memcpy(newcfi, cfi, sizeof(struct cfi_private));
+ newcfi->numchips = numvirtchips;
+ newcfi->chipshift = partshift;
+
+ chip = &newcfi->chips[0];
+ for (i = 0; i < cfi->numchips; i++) {
+ shared[i].writing = shared[i].erasing = NULL;
+ spin_lock_init(&shared[i].lock);
+ for (j = 0; j < numparts; j++) {
+ *chip = cfi->chips[i];
+ chip->start += j << partshift;
+ chip->priv = &shared[i];
+ /* those should be reset too since
+ they create memory references. */
+ init_waitqueue_head(&chip->wq);
+ spin_lock_init(&chip->_spinlock);
+ chip->mutex = &chip->_spinlock;
+ chip++;
+ }
+ }
+
+ printk(KERN_DEBUG "%s: %d set(s) of %d interleaved chips "
+ "--> %d partitions of %d KiB\n",
+ map->name, cfi->numchips, cfi->interleave,
+ newcfi->numchips, 1<<(newcfi->chipshift-10));
+
+ map->fldrv_priv = newcfi;
+ *pcfi = newcfi;
+ kfree(cfi);
}
- mtd->sync = cfi_intelext_sync;
- mtd->lock = cfi_intelext_lock;
- mtd->unlock = cfi_intelext_unlock;
- mtd->suspend = cfi_intelext_suspend;
- mtd->resume = cfi_intelext_resume;
- mtd->flags = MTD_CAP_NORFLASH;
- map->fldrv = &cfi_intelext_chipdrv;
- MOD_INC_USE_COUNT;
- mtd->name = map->name;
- return mtd;
+
+ return 0;
}
+/*
+ * *********** CHIP ACCESS FUNCTIONS ***********
+ */
-static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
+static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
{
- __u32 status, status_OK;
- unsigned long timeo;
DECLARE_WAITQUEUE(wait, current);
- int suspended = 0;
- unsigned long cmd_addr;
struct cfi_private *cfi = map->fldrv_priv;
+ map_word status, status_OK = CMD(0x80), status_PWS = CMD(0x01);
+ unsigned long timeo;
+ struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
- adr += chip->start;
-
- /* Ensure cmd read/writes are aligned. */
- cmd_addr = adr & ~(CFIDEV_BUSWIDTH-1);
-
- /* Let's determine this according to the interleave only once */
- status_OK = CMD(0x80);
-
+ resettime:
timeo = jiffies + HZ;
retry:
- spin_lock_bh(chip->mutex);
+ if (chip->priv && (mode == FL_WRITING || mode == FL_ERASING || mode == FL_OTP_WRITE)) {
+ /*
+ * OK. We have possibility for contension on the write/erase
+ * operations which are global to the real chip and not per
+ * partition. So let's fight it over in the partition which
+ * currently has authority on the operation.
+ *
+ * The rules are as follows:
+ *
+ * - any write operation must own shared->writing.
+ *
+ * - any erase operation must own _both_ shared->writing and
+ * shared->erasing.
+ *
+ * - contension arbitration is handled in the owner's context.
+ *
+ * The 'shared' struct can be read and/or written only when
+ * its lock is taken.
+ */
+ struct flchip_shared *shared = chip->priv;
+ struct flchip *contender;
+ spin_lock(&shared->lock);
+ contender = shared->writing;
+ if (contender && contender != chip) {
+ /*
+ * The engine to perform desired operation on this
+ * partition is already in use by someone else.
+ * Let's fight over it in the context of the chip
+ * currently using it. If it is possible to suspend,
+ * that other partition will do just that, otherwise
+ * it'll happily send us to sleep. In any case, when
+ * get_chip returns success we're clear to go ahead.
+ */
+ int ret = spin_trylock(contender->mutex);
+ spin_unlock(&shared->lock);
+ if (!ret)
+ goto retry;
+ spin_unlock(chip->mutex);
+ ret = get_chip(map, contender, contender->start, mode);
+ spin_lock(chip->mutex);
+ if (ret) {
+ spin_unlock(contender->mutex);
+ return ret;
+ }
+ timeo = jiffies + HZ;
+ spin_lock(&shared->lock);
+ spin_unlock(contender->mutex);
+ }
+
+ /* We now own it */
+ shared->writing = chip;
+ if (mode == FL_ERASING)
+ shared->erasing = chip;
+ spin_unlock(&shared->lock);
+ }
- /* Check that the chip's ready to talk to us.
- * If it's in FL_ERASING state, suspend it and make it talk now.
- */
switch (chip->state) {
+
+ case FL_STATUS:
+ for (;;) {
+ status = map_read(map, adr);
+ if (map_word_andequal(map, status, status_OK, status_OK))
+ break;
+
+ /* At this point we're fine with write operations
+ in other partitions as they don't conflict. */
+ if (chip->priv && map_word_andequal(map, status, status_PWS, status_PWS))
+ break;
+
+ if (time_after(jiffies, timeo)) {
+ printk(KERN_ERR "%s: Waiting for chip to be ready timed out. Status %lx\n",
+ map->name, status.x[0]);
+ return -EIO;
+ }
+ spin_unlock(chip->mutex);
+ cfi_udelay(1);
+ spin_lock(chip->mutex);
+ /* Someone else might have been playing with it. */
+ goto retry;
+ }
+
+ case FL_READY:
+ case FL_CFI_QUERY:
+ case FL_JEDEC_QUERY:
+ return 0;
+
case FL_ERASING:
- if (!((struct cfi_pri_intelext *)cfi->cmdset_priv)->FeatureSupport & 2)
- goto sleep; /* We don't support erase suspend */
-
- cfi_write (map, CMD(0xb0), cmd_addr);
+ if (!cfip ||
+ !(cfip->FeatureSupport & 2) ||
+ !(mode == FL_READY || mode == FL_POINT ||
+ (mode == FL_WRITING && (cfip->SuspendCmdSupport & 1))))
+ goto sleep;
+
+
+ /* Erase suspend */
+ map_write(map, CMD(0xB0), adr);
+
/* If the flash has finished erasing, then 'erase suspend'
* appears to make some (28F320) flash devices switch to
* 'read' mode. Make sure that we switch to 'read status'
* mode so we get the right data. --rmk
*/
- cfi_write(map, CMD(0x70), cmd_addr);
+ map_write(map, CMD(0x70), adr);
chip->oldstate = FL_ERASING;
chip->state = FL_ERASE_SUSPENDING;
- // printk("Erase suspending at 0x%lx\n", cmd_addr);
+ chip->erase_suspended = 1;
for (;;) {
- status = cfi_read(map, cmd_addr);
- if ((status & status_OK) == status_OK)
- break;
-
+ status = map_read(map, adr);
+ if (map_word_andequal(map, status, status_OK, status_OK))
+ break;
+
if (time_after(jiffies, timeo)) {
- /* Urgh */
- cfi_write(map, CMD(0xd0), cmd_addr);
- /* make sure we're in 'read status' mode */
- cfi_write(map, CMD(0x70), cmd_addr);
+ /* Urgh. Resume and pretend we weren't here. */
+ map_write(map, CMD(0xd0), adr);
+ /* Make sure we're in 'read status' mode if it had finished */
+ map_write(map, CMD(0x70), adr);
chip->state = FL_ERASING;
- spin_unlock_bh(chip->mutex);
- printk(KERN_ERR "Chip not ready after erase "
- "suspended: status = 0x%x\n", status);
+ chip->oldstate = FL_READY;
+ printk(KERN_ERR "%s: Chip not ready after erase "
+ "suspended: status = 0x%lx\n", map->name, status.x[0]);
return -EIO;
}
-
- spin_unlock_bh(chip->mutex);
+
+ spin_unlock(chip->mutex);
cfi_udelay(1);
- spin_lock_bh(chip->mutex);
+ spin_lock(chip->mutex);
+ /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
+ So we can just loop here. */
}
-
- suspended = 1;
- cfi_write(map, CMD(0xff), cmd_addr);
- chip->state = FL_READY;
- break;
-
-#if 0
- case FL_WRITING:
- /* Not quite yet */
-#endif
-
- case FL_READY:
- break;
-
- case FL_CFI_QUERY:
- case FL_JEDEC_QUERY:
- cfi_write(map, CMD(0x70), cmd_addr);
chip->state = FL_STATUS;
+ return 0;
- case FL_STATUS:
- status = cfi_read(map, cmd_addr);
- if ((status & status_OK) == status_OK) {
- cfi_write(map, CMD(0xff), cmd_addr);
- chip->state = FL_READY;
- break;
- }
-
- /* Urgh. Chip not yet ready to talk to us. */
- if (time_after(jiffies, timeo)) {
- spin_unlock_bh(chip->mutex);
- printk(KERN_ERR "waiting for chip to be ready timed out in read. WSM status = %x\n", status);
- return -EIO;
- }
+ case FL_XIP_WHILE_ERASING:
+ if (mode != FL_READY && mode != FL_POINT &&
+ (mode != FL_WRITING || !cfip || !(cfip->SuspendCmdSupport&1)))
+ goto sleep;
+ chip->oldstate = chip->state;
+ chip->state = FL_READY;
+ return 0;
- /* Latency issues. Drop the lock, wait a while and retry */
- spin_unlock_bh(chip->mutex);
- cfi_udelay(1);
- goto retry;
+ case FL_POINT:
+ /* Only if there's no operation suspended... */
+ if (mode == FL_READY && chip->oldstate == FL_READY)
+ return 0;
default:
sleep:
- /* Stick ourselves on a wait queue to be woken when
- someone changes the status */
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
- spin_unlock_bh(chip->mutex);
+ spin_unlock(chip->mutex);
schedule();
remove_wait_queue(&chip->wq, &wait);
- timeo = jiffies + HZ;
- goto retry;
+ spin_lock(chip->mutex);
+ goto resettime;
}
+}
- map->copy_from(map, buf, adr, len);
+static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
+{
+ struct cfi_private *cfi = map->fldrv_priv;
+
+ if (chip->priv) {
+ struct flchip_shared *shared = chip->priv;
+ spin_lock(&shared->lock);
+ if (shared->writing == chip && chip->oldstate == FL_READY) {
+ /* We own the ability to write, but we're done */
+ shared->writing = shared->erasing;
+ if (shared->writing && shared->writing != chip) {
+ /* give back ownership to who we loaned it from */
+ struct flchip *loaner = shared->writing;
+ spin_lock(loaner->mutex);
+ spin_unlock(&shared->lock);
+ spin_unlock(chip->mutex);
+ put_chip(map, loaner, loaner->start);
+ spin_lock(chip->mutex);
+ spin_unlock(loaner->mutex);
+ wake_up(&chip->wq);
+ return;
+ }
+ shared->erasing = NULL;
+ shared->writing = NULL;
+ } else if (shared->erasing == chip && shared->writing != chip) {
+ /*
+ * We own the ability to erase without the ability
+ * to write, which means the erase was suspended
+ * and some other partition is currently writing.
+ * Don't let the switch below mess things up since
+ * we don't have ownership to resume anything.
+ */
+ spin_unlock(&shared->lock);
+ wake_up(&chip->wq);
+ return;
+ }
+ spin_unlock(&shared->lock);
+ }
- if (suspended) {
+ switch(chip->oldstate) {
+ case FL_ERASING:
chip->state = chip->oldstate;
- /* What if one interleaved chip has finished and the
+ /* What if one interleaved chip has finished and the
other hasn't? The old code would leave the finished
- one in READY mode. That's bad, and caused -EROFS
+ one in READY mode. That's bad, and caused -EROFS
errors to be returned from do_erase_oneblock because
that's the only bit it checked for at the time.
- As the state machine appears to explicitly allow
+ As the state machine appears to explicitly allow
sending the 0x70 (Read Status) command to an erasing
- chip and expecting it to be ignored, that's what we
+ chip and expecting it to be ignored, that's what we
do. */
- cfi_write(map, CMD(0xd0), cmd_addr);
- cfi_write(map, CMD(0x70), cmd_addr);
- }
+ map_write(map, CMD(0xd0), adr);
+ map_write(map, CMD(0x70), adr);
+ chip->oldstate = FL_READY;
+ chip->state = FL_ERASING;
+ break;
+
+ case FL_XIP_WHILE_ERASING:
+ chip->state = chip->oldstate;
+ chip->oldstate = FL_READY;
+ break;
+ case FL_READY:
+ case FL_STATUS:
+ case FL_JEDEC_QUERY:
+ /* We should really make set_vpp() count, rather than doing this */
+ DISABLE_VPP(map);
+ break;
+ default:
+ printk(KERN_ERR "%s: put_chip() called with oldstate %d!!\n", map->name, chip->oldstate);
+ }
wake_up(&chip->wq);
- spin_unlock_bh(chip->mutex);
+}
+
+#ifdef CONFIG_MTD_XIP
+
+/*
+ * No interrupt what so ever can be serviced while the flash isn't in array
+ * mode. This is ensured by the xip_disable() and xip_enable() functions
+ * enclosing any code path where the flash is known not to be in array mode.
+ * And within a XIP disabled code path, only functions marked with __xipram
+ * may be called and nothing else (it's a good thing to inspect generated
+ * assembly to make sure inline functions were actually inlined and that gcc
+ * didn't emit calls to its own support functions). Also configuring MTD CFI
+ * support to a single buswidth and a single interleave is also recommended.
+ */
+
+static void xip_disable(struct map_info *map, struct flchip *chip,
+ unsigned long adr)
+{
+ /* TODO: chips with no XIP use should ignore and return */
+ (void) map_read(map, adr); /* ensure mmu mapping is up to date */
+ local_irq_disable();
+}
+
+static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
+ unsigned long adr)
+{
+ struct cfi_private *cfi = map->fldrv_priv;
+ if (chip->state != FL_POINT && chip->state != FL_READY) {
+ map_write(map, CMD(0xff), adr);
+ chip->state = FL_READY;
+ }
+ (void) map_read(map, adr);
+ xip_iprefetch();
+ local_irq_enable();
+}
+
+/*
+ * When a delay is required for the flash operation to complete, the
+ * xip_wait_for_operation() function is polling for both the given timeout
+ * and pending (but still masked) hardware interrupts. Whenever there is an
+ * interrupt pending then the flash erase or write operation is suspended,
+ * array mode restored and interrupts unmasked. Task scheduling might also
+ * happen at that point. The CPU eventually returns from the interrupt or
+ * the call to schedule() and the suspended flash operation is resumed for
+ * the remaining of the delay period.
+ *
+ * Warning: this function _will_ fool interrupt latency tracing tools.
+ */
+
+static int __xipram xip_wait_for_operation(
+ struct map_info *map, struct flchip *chip,
+ unsigned long adr, int *chip_op_time )
+{
+ struct cfi_private *cfi = map->fldrv_priv;
+ struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
+ map_word status, OK = CMD(0x80);
+ unsigned long usec, suspended, start, done;
+ flstate_t oldstate, newstate;
+
+ start = xip_currtime();
+ usec = *chip_op_time * 8;
+ if (usec == 0)
+ usec = 500000;
+ done = 0;
+
+ do {
+ cpu_relax();
+ if (xip_irqpending() && cfip &&
+ ((chip->state == FL_ERASING && (cfip->FeatureSupport&2)) ||
+ (chip->state == FL_WRITING && (cfip->FeatureSupport&4))) &&
+ (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) {
+ /*
+ * Let's suspend the erase or write operation when
+ * supported. Note that we currently don't try to
+ * suspend interleaved chips if there is already
+ * another operation suspended (imagine what happens
+ * when one chip was already done with the current
+ * operation while another chip suspended it, then
+ * we resume the whole thing at once). Yes, it
+ * can happen!
+ */
+ usec -= done;
+ map_write(map, CMD(0xb0), adr);
+ map_write(map, CMD(0x70), adr);
+ suspended = xip_currtime();
+ do {
+ if (xip_elapsed_since(suspended) > 100000) {
+ /*
+ * The chip doesn't want to suspend
+ * after waiting for 100 msecs.
+ * This is a critical error but there
+ * is not much we can do here.
+ */
+ return -EIO;
+ }
+ status = map_read(map, adr);
+ } while (!map_word_andequal(map, status, OK, OK));
+
+ /* Suspend succeeded */
+ oldstate = chip->state;
+ if (oldstate == FL_ERASING) {
+ if (!map_word_bitsset(map, status, CMD(0x40)))
+ break;
+ newstate = FL_XIP_WHILE_ERASING;
+ chip->erase_suspended = 1;
+ } else {
+ if (!map_word_bitsset(map, status, CMD(0x04)))
+ break;
+ newstate = FL_XIP_WHILE_WRITING;
+ chip->write_suspended = 1;
+ }
+ chip->state = newstate;
+ map_write(map, CMD(0xff), adr);
+ (void) map_read(map, adr);
+ asm volatile (".rep 8; nop; .endr");
+ local_irq_enable();
+ spin_unlock(chip->mutex);
+ asm volatile (".rep 8; nop; .endr");
+ cond_resched();
+
+ /*
+ * We're back. However someone else might have
+ * decided to go write to the chip if we are in
+ * a suspended erase state. If so let's wait
+ * until it's done.
+ */
+ spin_lock(chip->mutex);
+ while (chip->state != newstate) {
+ DECLARE_WAITQUEUE(wait, current);
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ add_wait_queue(&chip->wq, &wait);
+ spin_unlock(chip->mutex);
+ schedule();
+ remove_wait_queue(&chip->wq, &wait);
+ spin_lock(chip->mutex);
+ }
+ /* Disallow XIP again */
+ local_irq_disable();
+
+ /* Resume the write or erase operation */
+ map_write(map, CMD(0xd0), adr);
+ map_write(map, CMD(0x70), adr);
+ chip->state = oldstate;
+ start = xip_currtime();
+ } else if (usec >= 1000000/HZ) {
+ /*
+ * Try to save on CPU power when waiting delay
+ * is at least a system timer tick period.
+ * No need to be extremely accurate here.
+ */
+ xip_cpu_idle();
+ }
+ status = map_read(map, adr);
+ done = xip_elapsed_since(start);
+ } while (!map_word_andequal(map, status, OK, OK)
+ && done < usec);
+
+ return (done >= usec) ? -ETIME : 0;
+}
+
+/*
+ * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
+ * the flash is actively programming or erasing since we have to poll for
+ * the operation to complete anyway. We can't do that in a generic way with
+ * a XIP setup so do it before the actual flash operation in this case
+ * and stub it out from INVAL_CACHE_AND_WAIT.
+ */
+#define XIP_INVAL_CACHED_RANGE(map, from, size) \
+ INVALIDATE_CACHED_RANGE(map, from, size)
+
+#define INVAL_CACHE_AND_WAIT(map, chip, cmd_adr, inval_adr, inval_len, p_usec) \
+ xip_wait_for_operation(map, chip, cmd_adr, p_usec)
+
+#else
+
+#define xip_disable(map, chip, adr)
+#define xip_enable(map, chip, adr)
+#define XIP_INVAL_CACHED_RANGE(x...)
+#define INVAL_CACHE_AND_WAIT inval_cache_and_wait_for_operation
+
+static int inval_cache_and_wait_for_operation(
+ struct map_info *map, struct flchip *chip,
+ unsigned long cmd_adr, unsigned long inval_adr, int inval_len,
+ int *chip_op_time )
+{
+ struct cfi_private *cfi = map->fldrv_priv;
+ map_word status, status_OK = CMD(0x80);
+ int z, chip_state = chip->state;
+ unsigned long timeo;
+
+ spin_unlock(chip->mutex);
+ if (inval_len)
+ INVALIDATE_CACHED_RANGE(map, inval_adr, inval_len);
+ if (*chip_op_time)
+ cfi_udelay(*chip_op_time);
+ spin_lock(chip->mutex);
+
+ timeo = *chip_op_time * 8 * HZ / 1000000;
+ if (timeo < HZ/2)
+ timeo = HZ/2;
+ timeo += jiffies;
+
+ z = 0;
+ for (;;) {
+ if (chip->state != chip_state) {
+ /* Someone's suspended the operation: sleep */
+ DECLARE_WAITQUEUE(wait, current);
+
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ add_wait_queue(&chip->wq, &wait);
+ spin_unlock(chip->mutex);
+ schedule();
+ remove_wait_queue(&chip->wq, &wait);
+ timeo = jiffies + (HZ / 2); /* FIXME */
+ spin_lock(chip->mutex);
+ continue;
+ }
+
+ status = map_read(map, cmd_adr);
+ if (map_word_andequal(map, status, status_OK, status_OK))
+ break;
+
+ /* OK Still waiting */
+ if (time_after(jiffies, timeo)) {
+ map_write(map, CMD(0x70), cmd_adr);
+ chip->state = FL_STATUS;
+ return -ETIME;
+ }
+
+ /* Latency issues. Drop the lock, wait a while and retry */
+ z++;
+ spin_unlock(chip->mutex);
+ cfi_udelay(1);
+ spin_lock(chip->mutex);
+ }
+
+ if (!z) {
+ if (!--(*chip_op_time))
+ *chip_op_time = 1;
+ } else if (z > 1)
+ ++(*chip_op_time);
+
+ /* Done and happy. */
+ chip->state = FL_STATUS;
+ return 0;
+}
+
+#endif
+
+#define WAIT_TIMEOUT(map, chip, adr, udelay) \
+ ({ int __udelay = (udelay); \
+ INVAL_CACHE_AND_WAIT(map, chip, adr, 0, 0, &__udelay); })
+
+
+static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t adr, size_t len)
+{
+ unsigned long cmd_addr;
+ struct cfi_private *cfi = map->fldrv_priv;
+ int ret = 0;
+
+ adr += chip->start;
+
+ /* Ensure cmd read/writes are aligned. */
+ cmd_addr = adr & ~(map_bankwidth(map)-1);
+
+ spin_lock(chip->mutex);
+
+ ret = get_chip(map, chip, cmd_addr, FL_POINT);
+
+ if (!ret) {
+ if (chip->state != FL_POINT && chip->state != FL_READY)
+ map_write(map, CMD(0xff), cmd_addr);
+
+ chip->state = FL_POINT;
+ chip->ref_point_counter++;
+ }
+ spin_unlock(chip->mutex);
+
+ return ret;
+}
+
+static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char **mtdbuf)
+{
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+ unsigned long ofs;
+ int chipnum;
+ int ret = 0;
+
+ if (!map->virt || (from + len > mtd->size))
+ return -EINVAL;
+
+ *mtdbuf = (void *)map->virt + from;
+ *retlen = 0;
+
+ /* Now lock the chip(s) to POINT state */
+
+ /* ofs: offset within the first chip that the first read should start */
+ chipnum = (from >> cfi->chipshift);
+ ofs = from - (chipnum << cfi->chipshift);
+
+ while (len) {
+ unsigned long thislen;
+
+ if (chipnum >= cfi->numchips)
+ break;
+
+ if ((len + ofs -1) >> cfi->chipshift)
+ thislen = (1<<cfi->chipshift) - ofs;
+ else
+ thislen = len;
+
+ ret = do_point_onechip(map, &cfi->chips[chipnum], ofs, thislen);
+ if (ret)
+ break;
+
+ *retlen += thislen;
+ len -= thislen;
+
+ ofs = 0;
+ chipnum++;
+ }
+ return 0;
+}
+
+static void cfi_intelext_unpoint (struct mtd_info *mtd, u_char *addr, loff_t from, size_t len)
+{
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+ unsigned long ofs;
+ int chipnum;
+
+ /* Now unlock the chip(s) POINT state */
+
+ /* ofs: offset within the first chip that the first read should start */
+ chipnum = (from >> cfi->chipshift);
+ ofs = from - (chipnum << cfi->chipshift);
+
+ while (len) {
+ unsigned long thislen;
+ struct flchip *chip;
+
+ chip = &cfi->chips[chipnum];
+ if (chipnum >= cfi->numchips)
+ break;
+
+ if ((len + ofs -1) >> cfi->chipshift)
+ thislen = (1<<cfi->chipshift) - ofs;
+ else
+ thislen = len;
+
+ spin_lock(chip->mutex);
+ if (chip->state == FL_POINT) {
+ chip->ref_point_counter--;
+ if(chip->ref_point_counter == 0)
+ chip->state = FL_READY;
+ } else
+ printk(KERN_ERR "%s: Warning: unpoint called on non pointed region\n", map->name); /* Should this give an error? */
+
+ put_chip(map, chip, chip->start);
+ spin_unlock(chip->mutex);
+
+ len -= thislen;
+ ofs = 0;
+ chipnum++;
+ }
+}
+
+static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
+{
+ unsigned long cmd_addr;
+ struct cfi_private *cfi = map->fldrv_priv;
+ int ret;
+
+ adr += chip->start;
+
+ /* Ensure cmd read/writes are aligned. */
+ cmd_addr = adr & ~(map_bankwidth(map)-1);
+
+ spin_lock(chip->mutex);
+ ret = get_chip(map, chip, cmd_addr, FL_READY);
+ if (ret) {
+ spin_unlock(chip->mutex);
+ return ret;
+ }
+
+ if (chip->state != FL_POINT && chip->state != FL_READY) {
+ map_write(map, CMD(0xff), cmd_addr);
+
+ chip->state = FL_READY;
+ }
+
+ map_copy_from(map, buf, adr, len);
+
+ put_chip(map, chip, cmd_addr);
+
+ spin_unlock(chip->mutex);
return 0;
}
@@ -426,140 +1289,83 @@ static int cfi_intelext_read (struct mtd_info *mtd, loff_t from, size_t len, siz
*retlen += thislen;
len -= thislen;
buf += thislen;
-
+
ofs = 0;
chipnum++;
}
return ret;
}
-static int do_write_oneword(struct map_info *map, struct flchip *chip, unsigned long adr, __u32 datum)
+static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
+ unsigned long adr, map_word datum, int mode)
{
struct cfi_private *cfi = map->fldrv_priv;
- __u32 status, status_OK;
- unsigned long timeo;
- DECLARE_WAITQUEUE(wait, current);
- int z;
+ map_word status, write_cmd;
+ int ret=0;
adr += chip->start;
- /* Let's determine this according to the interleave only once */
- status_OK = CMD(0x80);
-
- timeo = jiffies + HZ;
- retry:
- spin_lock_bh(chip->mutex);
-
- /* Check that the chip's ready to talk to us.
- * Later, we can actually think about interrupting it
- * if it's in FL_ERASING state.
- * Not just yet, though.
- */
- switch (chip->state) {
- case FL_READY:
+ switch (mode) {
+ case FL_WRITING:
+ write_cmd = (cfi->cfiq->P_ID != 0x0200) ? CMD(0x40) : CMD(0x41);
+ break;
+ case FL_OTP_WRITE:
+ write_cmd = CMD(0xc0);
break;
-
- case FL_CFI_QUERY:
- case FL_JEDEC_QUERY:
- cfi_write(map, CMD(0x70), adr);
- chip->state = FL_STATUS;
-
- case FL_STATUS:
- status = cfi_read(map, adr);
- if ((status & status_OK) == status_OK)
- break;
-
- /* Urgh. Chip not yet ready to talk to us. */
- if (time_after(jiffies, timeo)) {
- spin_unlock_bh(chip->mutex);
- printk(KERN_ERR "waiting for chip to be ready timed out in read\n");
- return -EIO;
- }
-
- /* Latency issues. Drop the lock, wait a while and retry */
- spin_unlock_bh(chip->mutex);
- cfi_udelay(1);
- goto retry;
-
default:
- /* Stick ourselves on a wait queue to be woken when
- someone changes the status */
- set_current_state(TASK_UNINTERRUPTIBLE);
- add_wait_queue(&chip->wq, &wait);
- spin_unlock_bh(chip->mutex);
- schedule();
- remove_wait_queue(&chip->wq, &wait);
- timeo = jiffies + HZ;
- goto retry;
+ return -EINVAL;
}
+ spin_lock(chip->mutex);
+ ret = get_chip(map, chip, adr, mode);
+ if (ret) {
+ spin_unlock(chip->mutex);
+ return ret;
+ }
+
+ XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
ENABLE_VPP(map);
- cfi_write(map, CMD(0x40), adr);
- cfi_write(map, datum, adr);
- chip->state = FL_WRITING;
+ xip_disable(map, chip, adr);
+ map_write(map, write_cmd, adr);
+ map_write(map, datum, adr);
+ chip->state = mode;
+
+ ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
+ adr, map_bankwidth(map),
+ &chip->word_write_time);
+ if (ret) {
+ xip_enable(map, chip, adr);
+ printk(KERN_ERR "%s: word write error (status timeout)\n", map->name);
+ goto out;
+ }
- spin_unlock_bh(chip->mutex);
- cfi_udelay(chip->word_write_time);
- spin_lock_bh(chip->mutex);
+ /* check for errors */
+ status = map_read(map, adr);
+ if (map_word_bitsset(map, status, CMD(0x1a))) {
+ unsigned long chipstatus = MERGESTATUS(status);
- timeo = jiffies + (HZ/2);
- z = 0;
- for (;;) {
- if (chip->state != FL_WRITING) {
- /* Someone's suspended the write. Sleep */
- set_current_state(TASK_UNINTERRUPTIBLE);
- add_wait_queue(&chip->wq, &wait);
- spin_unlock_bh(chip->mutex);
- schedule();
- remove_wait_queue(&chip->wq, &wait);
- timeo = jiffies + (HZ / 2); /* FIXME */
- spin_lock_bh(chip->mutex);
- continue;
- }
+ /* reset status */
+ map_write(map, CMD(0x50), adr);
+ map_write(map, CMD(0x70), adr);
+ xip_enable(map, chip, adr);
- status = cfi_read(map, adr);
- if ((status & status_OK) == status_OK)
- break;
-
- /* OK Still waiting */
- if (time_after(jiffies, timeo)) {
- chip->state = FL_STATUS;
- DISABLE_VPP(map);
- spin_unlock_bh(chip->mutex);
- printk(KERN_ERR "waiting for chip to be ready timed out in word write\n");
- return -EIO;
+ if (chipstatus & 0x02) {
+ ret = -EROFS;
+ } else if (chipstatus & 0x08) {
+ printk(KERN_ERR "%s: word write error (bad VPP)\n", map->name);
+ ret = -EIO;
+ } else {
+ printk(KERN_ERR "%s: word write error (status 0x%lx)\n", map->name, chipstatus);
+ ret = -EINVAL;
}
- /* Latency issues. Drop the lock, wait a while and retry */
- spin_unlock_bh(chip->mutex);
- z++;
- cfi_udelay(1);
- spin_lock_bh(chip->mutex);
- }
- if (!z) {
- chip->word_write_time--;
- if (!chip->word_write_time)
- chip->word_write_time++;
+ goto out;
}
- if (z > 1)
- chip->word_write_time++;
- /* Done and happy. */
- DISABLE_VPP(map);
- chip->state = FL_STATUS;
- /* check for lock bit */
- if (status & CMD(0x02)) {
- /* clear status */
- cfi_write(map, CMD(0x50), adr);
- /* put back into read status register mode */
- cfi_write(map, CMD(0x70), adr);
- wake_up(&chip->wq);
- spin_unlock_bh(chip->mutex);
- return -EROFS;
- }
- wake_up(&chip->wq);
- spin_unlock_bh(chip->mutex);
- return 0;
+ xip_enable(map, chip, adr);
+ out: put_chip(map, chip, adr);
+ spin_unlock(chip->mutex);
+ return ret;
}
@@ -579,597 +1385,392 @@ static int cfi_intelext_write_words (struct mtd_info *mtd, loff_t to , size_t le
ofs = to - (chipnum << cfi->chipshift);
/* If it's not bus-aligned, do the first byte write */
- if (ofs & (CFIDEV_BUSWIDTH-1)) {
- unsigned long bus_ofs = ofs & ~(CFIDEV_BUSWIDTH-1);
+ if (ofs & (map_bankwidth(map)-1)) {
+ unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
int gap = ofs - bus_ofs;
- int i = 0, n = 0;
- u_char tmp_buf[4];
- __u32 datum;
-
- while (gap--)
- tmp_buf[i++] = 0xff;
- while (len && i < CFIDEV_BUSWIDTH)
- tmp_buf[i++] = buf[n++], len--;
- while (i < CFIDEV_BUSWIDTH)
- tmp_buf[i++] = 0xff;
-
- if (cfi_buswidth_is_2()) {
- datum = *(__u16*)tmp_buf;
- } else if (cfi_buswidth_is_4()) {
- datum = *(__u32*)tmp_buf;
- } else {
- return -EINVAL; /* should never happen, but be safe */
- }
+ int n;
+ map_word datum;
+
+ n = min_t(int, len, map_bankwidth(map)-gap);
+ datum = map_word_ff(map);
+ datum = map_word_load_partial(map, datum, buf, gap, n);
ret = do_write_oneword(map, &cfi->chips[chipnum],
- bus_ofs, datum);
- if (ret)
+ bus_ofs, datum, FL_WRITING);
+ if (ret)
return ret;
-
+
+ len -= n;
ofs += n;
buf += n;
(*retlen) += n;
if (ofs >> cfi->chipshift) {
- chipnum ++;
+ chipnum ++;
ofs = 0;
if (chipnum == cfi->numchips)
return 0;
}
}
-
- while(len >= CFIDEV_BUSWIDTH) {
- __u32 datum;
-
- if (cfi_buswidth_is_1()) {
- datum = *(__u8*)buf;
- } else if (cfi_buswidth_is_2()) {
- datum = *(__u16*)buf;
- } else if (cfi_buswidth_is_4()) {
- datum = *(__u32*)buf;
- } else {
- return -EINVAL;
- }
+
+ while(len >= map_bankwidth(map)) {
+ map_word datum = map_word_load(map, buf);
ret = do_write_oneword(map, &cfi->chips[chipnum],
- ofs, datum);
+ ofs, datum, FL_WRITING);
if (ret)
return ret;
- ofs += CFIDEV_BUSWIDTH;
- buf += CFIDEV_BUSWIDTH;
- (*retlen) += CFIDEV_BUSWIDTH;
- len -= CFIDEV_BUSWIDTH;
+ ofs += map_bankwidth(map);
+ buf += map_bankwidth(map);
+ (*retlen) += map_bankwidth(map);
+ len -= map_bankwidth(map);
if (ofs >> cfi->chipshift) {
- chipnum ++;
+ chipnum ++;
ofs = 0;
if (chipnum == cfi->numchips)
return 0;
}
}
- if (len & (CFIDEV_BUSWIDTH-1)) {
- int i = 0, n = 0;
- u_char tmp_buf[4];
- __u32 datum;
+ if (len & (map_bankwidth(map)-1)) {
+ map_word datum;
- while (len--)
- tmp_buf[i++] = buf[n++];
- while (i < CFIDEV_BUSWIDTH)
- tmp_buf[i++] = 0xff;
-
- if (cfi_buswidth_is_2()) {
- datum = *(__u16*)tmp_buf;
- } else if (cfi_buswidth_is_4()) {
- datum = *(__u32*)tmp_buf;
- } else {
- return -EINVAL; /* should never happen, but be safe */
- }
+ datum = map_word_ff(map);
+ datum = map_word_load_partial(map, datum, buf, 0, len);
ret = do_write_oneword(map, &cfi->chips[chipnum],
- ofs, datum);
- if (ret)
+ ofs, datum, FL_WRITING);
+ if (ret)
return ret;
-
- (*retlen) += n;
+
+ (*retlen) += len;
}
return 0;
}
-static inline int do_write_buffer(struct map_info *map, struct flchip *chip,
- unsigned long adr, const u_char *buf, int len)
+static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
+ unsigned long adr, const struct kvec **pvec,
+ unsigned long *pvec_seek, int len)
{
struct cfi_private *cfi = map->fldrv_priv;
- __u32 status, status_OK;
- unsigned long cmd_adr, timeo;
- DECLARE_WAITQUEUE(wait, current);
- int wbufsize, z;
+ map_word status, write_cmd, datum;
+ unsigned long cmd_adr;
+ int ret, wbufsize, word_gap, words;
+ const struct kvec *vec;
+ unsigned long vec_seek;
- wbufsize = CFIDEV_INTERLEAVE << cfi->cfiq->MaxBufWriteSize;
+ wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
adr += chip->start;
cmd_adr = adr & ~(wbufsize-1);
-
+
/* Let's determine this according to the interleave only once */
- status_OK = CMD(0x80);
+ write_cmd = (cfi->cfiq->P_ID != 0x0200) ? CMD(0xe8) : CMD(0xe9);
- timeo = jiffies + HZ;
- retry:
- spin_lock_bh(chip->mutex);
+ spin_lock(chip->mutex);
+ ret = get_chip(map, chip, cmd_adr, FL_WRITING);
+ if (ret) {
+ spin_unlock(chip->mutex);
+ return ret;
+ }
- /* Check that the chip's ready to talk to us.
- * Later, we can actually think about interrupting it
- * if it's in FL_ERASING state.
- * Not just yet, though.
- */
- switch (chip->state) {
- case FL_READY:
- break;
-
- case FL_CFI_QUERY:
- case FL_JEDEC_QUERY:
- cfi_write(map, CMD(0x70), cmd_adr);
+ XIP_INVAL_CACHED_RANGE(map, adr, len);
+ ENABLE_VPP(map);
+ xip_disable(map, chip, cmd_adr);
+
+ /* §4.8 of the 28FxxxJ3A datasheet says "Any time SR.4 and/or SR.5 is set
+ [...], the device will not accept any more Write to Buffer commands".
+ So we must check here and reset those bits if they're set. Otherwise
+ we're just pissing in the wind */
+ if (chip->state != FL_STATUS) {
+ map_write(map, CMD(0x70), cmd_adr);
chip->state = FL_STATUS;
-
- case FL_STATUS:
- status = cfi_read(map, cmd_adr);
- if ((status & status_OK) == status_OK)
- break;
- /* Urgh. Chip not yet ready to talk to us. */
- if (time_after(jiffies, timeo)) {
- spin_unlock_bh(chip->mutex);
- printk(KERN_ERR "waiting for chip to be ready timed out in buffer write\n");
- return -EIO;
- }
-
- /* Latency issues. Drop the lock, wait a while and retry */
- spin_unlock_bh(chip->mutex);
- cfi_udelay(1);
- goto retry;
-
- default:
- /* Stick ourselves on a wait queue to be woken when
- someone changes the status */
- set_current_state(TASK_UNINTERRUPTIBLE);
- add_wait_queue(&chip->wq, &wait);
- spin_unlock_bh(chip->mutex);
- schedule();
- remove_wait_queue(&chip->wq, &wait);
- timeo = jiffies + HZ;
- goto retry;
+ }
+ status = map_read(map, cmd_adr);
+ if (map_word_bitsset(map, status, CMD(0x30))) {
+ xip_enable(map, chip, cmd_adr);
+ printk(KERN_WARNING "SR.4 or SR.5 bits set in buffer write (status %lx). Clearing.\n", status.x[0]);
+ xip_disable(map, chip, cmd_adr);
+ map_write(map, CMD(0x50), cmd_adr);
+ map_write(map, CMD(0x70), cmd_adr);
}
- ENABLE_VPP(map);
- cfi_write(map, CMD(0xe8), cmd_adr);
chip->state = FL_WRITING_TO_BUFFER;
+ map_write(map, write_cmd, cmd_adr);
+ ret = WAIT_TIMEOUT(map, chip, cmd_adr, 0);
+ if (ret) {
+ /* Argh. Not ready for write to buffer */
+ map_word Xstatus = map_read(map, cmd_adr);
+ map_write(map, CMD(0x70), cmd_adr);
+ chip->state = FL_STATUS;
+ status = map_read(map, cmd_adr);
+ map_write(map, CMD(0x50), cmd_adr);
+ map_write(map, CMD(0x70), cmd_adr);
+ xip_enable(map, chip, cmd_adr);
+ printk(KERN_ERR "%s: Chip not ready for buffer write. Xstatus = %lx, status = %lx\n",
+ map->name, Xstatus.x[0], status.x[0]);
+ goto out;
+ }
- z = 0;
- for (;;) {
- status = cfi_read(map, cmd_adr);
- if ((status & status_OK) == status_OK)
- break;
-
- spin_unlock_bh(chip->mutex);
- cfi_udelay(1);
- spin_lock_bh(chip->mutex);
-
- if (++z > 20) {
- /* Argh. Not ready for write to buffer */
- cfi_write(map, CMD(0x70), cmd_adr);
- chip->state = FL_STATUS;
- DISABLE_VPP(map);
- spin_unlock_bh(chip->mutex);
- printk(KERN_ERR "Chip not ready for buffer write. Xstatus = %x, status = %x\n", status, cfi_read(map, cmd_adr));
- return -EIO;
- }
+ /* Figure out the number of words to write */
+ word_gap = (-adr & (map_bankwidth(map)-1));
+ words = (len - word_gap + map_bankwidth(map) - 1) / map_bankwidth(map);
+ if (!word_gap) {
+ words--;
+ } else {
+ word_gap = map_bankwidth(map) - word_gap;
+ adr -= word_gap;
+ datum = map_word_ff(map);
}
/* Write length of data to come */
- cfi_write(map, CMD(len/CFIDEV_BUSWIDTH-1), cmd_adr );
+ map_write(map, CMD(words), cmd_adr );
/* Write data */
- for (z = 0; z < len; z += CFIDEV_BUSWIDTH) {
- if (cfi_buswidth_is_1()) {
- map->write8 (map, *((__u8*)buf)++, adr+z);
- } else if (cfi_buswidth_is_2()) {
- map->write16 (map, *((__u16*)buf)++, adr+z);
- } else if (cfi_buswidth_is_4()) {
- map->write32 (map, *((__u32*)buf)++, adr+z);
- } else {
- DISABLE_VPP(map);
- return -EINVAL;
+ vec = *pvec;
+ vec_seek = *pvec_seek;
+ do {
+ int n = map_bankwidth(map) - word_gap;
+ if (n > vec->iov_len - vec_seek)
+ n = vec->iov_len - vec_seek;
+ if (n > len)
+ n = len;
+
+ if (!word_gap && len < map_bankwidth(map))
+ datum = map_word_ff(map);
+
+ datum = map_word_load_partial(map, datum,
+ vec->iov_base + vec_seek,
+ word_gap, n);
+
+ len -= n;
+ word_gap += n;
+ if (!len || word_gap == map_bankwidth(map)) {
+ map_write(map, datum, adr);
+ adr += map_bankwidth(map);
+ word_gap = 0;
}
- }
+
+ vec_seek += n;
+ if (vec_seek == vec->iov_len) {
+ vec++;
+ vec_seek = 0;
+ }
+ } while (len);
+ *pvec = vec;
+ *pvec_seek = vec_seek;
+
/* GO GO GO */
- cfi_write(map, CMD(0xd0), cmd_adr);
+ map_write(map, CMD(0xd0), cmd_adr);
chip->state = FL_WRITING;
- spin_unlock_bh(chip->mutex);
- cfi_udelay(chip->buffer_write_time);
- spin_lock_bh(chip->mutex);
+ ret = INVAL_CACHE_AND_WAIT(map, chip, cmd_adr,
+ adr, len,
+ &chip->buffer_write_time);
+ if (ret) {
+ map_write(map, CMD(0x70), cmd_adr);
+ chip->state = FL_STATUS;
+ xip_enable(map, chip, cmd_adr);
+ printk(KERN_ERR "%s: buffer write error (status timeout)\n", map->name);
+ goto out;
+ }
- timeo = jiffies + (HZ/2);
- z = 0;
- for (;;) {
- if (chip->state != FL_WRITING) {
- /* Someone's suspended the write. Sleep */
- set_current_state(TASK_UNINTERRUPTIBLE);
- add_wait_queue(&chip->wq, &wait);
- spin_unlock_bh(chip->mutex);
- schedule();
- remove_wait_queue(&chip->wq, &wait);
- timeo = jiffies + (HZ / 2); /* FIXME */
- spin_lock_bh(chip->mutex);
- continue;
- }
+ /* check for errors */
+ status = map_read(map, cmd_adr);
+ if (map_word_bitsset(map, status, CMD(0x1a))) {
+ unsigned long chipstatus = MERGESTATUS(status);
- status = cfi_read(map, cmd_adr);
- if ((status & status_OK) == status_OK)
- break;
+ /* reset status */
+ map_write(map, CMD(0x50), cmd_adr);
+ map_write(map, CMD(0x70), cmd_adr);
+ xip_enable(map, chip, cmd_adr);
- /* OK Still waiting */
- if (time_after(jiffies, timeo)) {
- chip->state = FL_STATUS;
- DISABLE_VPP(map);
- spin_unlock_bh(chip->mutex);
- printk(KERN_ERR "waiting for chip to be ready timed out in bufwrite\n");
- return -EIO;
+ if (chipstatus & 0x02) {
+ ret = -EROFS;
+ } else if (chipstatus & 0x08) {
+ printk(KERN_ERR "%s: buffer write error (bad VPP)\n", map->name);
+ ret = -EIO;
+ } else {
+ printk(KERN_ERR "%s: buffer write error (status 0x%lx)\n", map->name, chipstatus);
+ ret = -EINVAL;
}
-
- /* Latency issues. Drop the lock, wait a while and retry */
- spin_unlock_bh(chip->mutex);
- cfi_udelay(1);
- z++;
- spin_lock_bh(chip->mutex);
- }
- if (!z) {
- chip->buffer_write_time--;
- if (!chip->buffer_write_time)
- chip->buffer_write_time++;
- }
- if (z > 1)
- chip->buffer_write_time++;
- /* Done and happy. */
- DISABLE_VPP(map);
- chip->state = FL_STATUS;
- /* check for lock bit */
- if (status & CMD(0x02)) {
- /* clear status */
- cfi_write(map, CMD(0x50), cmd_adr);
- /* put back into read status register mode */
- cfi_write(map, CMD(0x70), adr);
- wake_up(&chip->wq);
- spin_unlock_bh(chip->mutex);
- return -EROFS;
+ goto out;
}
- wake_up(&chip->wq);
- spin_unlock_bh(chip->mutex);
- return 0;
+
+ xip_enable(map, chip, cmd_adr);
+ out: put_chip(map, chip, cmd_adr);
+ spin_unlock(chip->mutex);
+ return ret;
}
-static int cfi_intelext_write_buffers (struct mtd_info *mtd, loff_t to,
- size_t len, size_t *retlen, const u_char *buf)
+static int cfi_intelext_writev (struct mtd_info *mtd, const struct kvec *vecs,
+ unsigned long count, loff_t to, size_t *retlen)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
- int wbufsize = CFIDEV_INTERLEAVE << cfi->cfiq->MaxBufWriteSize;
+ int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
int ret = 0;
int chipnum;
- unsigned long ofs;
+ unsigned long ofs, vec_seek, i;
+ size_t len = 0;
+
+ for (i = 0; i < count; i++)
+ len += vecs[i].iov_len;
*retlen = 0;
if (!len)
return 0;
chipnum = to >> cfi->chipshift;
- ofs = to - (chipnum << cfi->chipshift);
-
- /* If it's not bus-aligned, do the first word write */
- if (ofs & (CFIDEV_BUSWIDTH-1)) {
- size_t local_len = (-ofs)&(CFIDEV_BUSWIDTH-1);
- if (local_len > len)
- local_len = len;
- ret = cfi_intelext_write_words(mtd, to, local_len,
- retlen, buf);
- if (ret)
- return ret;
- ofs += local_len;
- buf += local_len;
- len -= local_len;
-
- if (ofs >> cfi->chipshift) {
- chipnum ++;
- ofs = 0;
- if (chipnum == cfi->numchips)
- return 0;
- }
- }
+ ofs = to - (chipnum << cfi->chipshift);
+ vec_seek = 0;
- /* Write buffer is worth it only if more than one word to write... */
- while(len > CFIDEV_BUSWIDTH) {
+ do {
/* We must not cross write block boundaries */
int size = wbufsize - (ofs & (wbufsize-1));
if (size > len)
- size = len & ~(CFIDEV_BUSWIDTH-1);
- ret = do_write_buffer(map, &cfi->chips[chipnum],
- ofs, buf, size);
+ size = len;
+ ret = do_write_buffer(map, &cfi->chips[chipnum],
+ ofs, &vecs, &vec_seek, size);
if (ret)
return ret;
ofs += size;
- buf += size;
(*retlen) += size;
len -= size;
if (ofs >> cfi->chipshift) {
- chipnum ++;
+ chipnum ++;
ofs = 0;
if (chipnum == cfi->numchips)
return 0;
}
- }
- /* ... and write the remaining bytes */
- if (len > 0) {
- size_t local_retlen;
- ret = cfi_intelext_write_words(mtd, ofs + (chipnum << cfi->chipshift),
- len, &local_retlen, buf);
- if (ret)
- return ret;
- (*retlen) += local_retlen;
- }
+ /* Be nice and reschedule with the chip in a usable state for other
+ processes. */
+ cond_resched();
+
+ } while (len);
return 0;
}
+static int cfi_intelext_write_buffers (struct mtd_info *mtd, loff_t to,
+ size_t len, size_t *retlen, const u_char *buf)
+{
+ struct kvec vec;
+
+ vec.iov_base = (void *) buf;
+ vec.iov_len = len;
-static inline int do_erase_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr)
+ return cfi_intelext_writev(mtd, &vec, 1, to, retlen);
+}
+
+static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
+ unsigned long adr, int len, void *thunk)
{
struct cfi_private *cfi = map->fldrv_priv;
- __u32 status, status_OK;
- unsigned long timeo;
+ map_word status;
int retries = 3;
- DECLARE_WAITQUEUE(wait, current);
- int ret = 0;
+ int ret;
adr += chip->start;
- /* Let's determine this according to the interleave only once */
- status_OK = CMD(0x80);
-
- timeo = jiffies + HZ;
-retry:
- spin_lock_bh(chip->mutex);
-
- /* Check that the chip's ready to talk to us. */
- switch (chip->state) {
- case FL_CFI_QUERY:
- case FL_JEDEC_QUERY:
- case FL_READY:
- cfi_write(map, CMD(0x70), adr);
- chip->state = FL_STATUS;
-
- case FL_STATUS:
- status = cfi_read(map, adr);
- if ((status & status_OK) == status_OK)
- break;
-
- /* Urgh. Chip not yet ready to talk to us. */
- if (time_after(jiffies, timeo)) {
- spin_unlock_bh(chip->mutex);
- printk(KERN_ERR "waiting for chip to be ready timed out in erase\n");
- return -EIO;
- }
-
- /* Latency issues. Drop the lock, wait a while and retry */
- spin_unlock_bh(chip->mutex);
- cfi_udelay(1);
- goto retry;
-
- default:
- /* Stick ourselves on a wait queue to be woken when
- someone changes the status */
- set_current_state(TASK_UNINTERRUPTIBLE);
- add_wait_queue(&chip->wq, &wait);
- spin_unlock_bh(chip->mutex);
- schedule();
- remove_wait_queue(&chip->wq, &wait);
- timeo = jiffies + HZ;
- goto retry;
+ retry:
+ spin_lock(chip->mutex);
+ ret = get_chip(map, chip, adr, FL_ERASING);
+ if (ret) {
+ spin_unlock(chip->mutex);
+ return ret;
}
+ XIP_INVAL_CACHED_RANGE(map, adr, len);
ENABLE_VPP(map);
+ xip_disable(map, chip, adr);
+
/* Clear the status register first */
- cfi_write(map, CMD(0x50), adr);
+ map_write(map, CMD(0x50), adr);
/* Now erase */
- cfi_write(map, CMD(0x20), adr);
- cfi_write(map, CMD(0xD0), adr);
+ map_write(map, CMD(0x20), adr);
+ map_write(map, CMD(0xD0), adr);
chip->state = FL_ERASING;
-
- spin_unlock_bh(chip->mutex);
- schedule_timeout(HZ);
- spin_lock_bh(chip->mutex);
-
- /* FIXME. Use a timer to check this, and return immediately. */
- /* Once the state machine's known to be working I'll do that */
-
- timeo = jiffies + (HZ*20);
- for (;;) {
- if (chip->state != FL_ERASING) {
- /* Someone's suspended the erase. Sleep */
- set_current_state(TASK_UNINTERRUPTIBLE);
- add_wait_queue(&chip->wq, &wait);
- spin_unlock_bh(chip->mutex);
- schedule();
- remove_wait_queue(&chip->wq, &wait);
- timeo = jiffies + (HZ*20); /* FIXME */
- spin_lock_bh(chip->mutex);
- continue;
- }
+ chip->erase_suspended = 0;
- status = cfi_read(map, adr);
- if ((status & status_OK) == status_OK)
- break;
-
- /* OK Still waiting */
- if (time_after(jiffies, timeo)) {
- cfi_write(map, CMD(0x70), adr);
- chip->state = FL_STATUS;
- printk(KERN_ERR "waiting for erase to complete timed out. Xstatus = %x, status = %x.\n", status, cfi_read(map, adr));
- DISABLE_VPP(map);
- spin_unlock_bh(chip->mutex);
- return -EIO;
- }
-
- /* Latency issues. Drop the lock, wait a while and retry */
- spin_unlock_bh(chip->mutex);
- cfi_udelay(1);
- spin_lock_bh(chip->mutex);
+ ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
+ adr, len,
+ &chip->erase_time);
+ if (ret) {
+ map_write(map, CMD(0x70), adr);
+ chip->state = FL_STATUS;
+ xip_enable(map, chip, adr);
+ printk(KERN_ERR "%s: block erase error: (status timeout)\n", map->name);
+ goto out;
}
-
- DISABLE_VPP(map);
- ret = 0;
/* We've broken this before. It doesn't hurt to be safe */
- cfi_write(map, CMD(0x70), adr);
+ map_write(map, CMD(0x70), adr);
chip->state = FL_STATUS;
- status = cfi_read(map, adr);
-
- /* check for lock bit */
- if (status & CMD(0x3a)) {
- unsigned char chipstatus = status;
- if (status != CMD(status & 0xff)) {
- int i;
- for (i = 1; i<CFIDEV_INTERLEAVE; i++) {
- chipstatus |= status >> (cfi->device_type * 8);
- }
- printk(KERN_WARNING "Status is not identical for all chips: 0x%x. Merging to give 0x%02x\n", status, chipstatus);
- }
+ status = map_read(map, adr);
+
+ /* check for errors */
+ if (map_word_bitsset(map, status, CMD(0x3a))) {
+ unsigned long chipstatus = MERGESTATUS(status);
+
/* Reset the error bits */
- cfi_write(map, CMD(0x50), adr);
- cfi_write(map, CMD(0x70), adr);
-
+ map_write(map, CMD(0x50), adr);
+ map_write(map, CMD(0x70), adr);
+ xip_enable(map, chip, adr);
+
if ((chipstatus & 0x30) == 0x30) {
- printk(KERN_NOTICE "Chip reports improper command sequence: status 0x%x\n", status);
- ret = -EIO;
+ printk(KERN_ERR "%s: block erase error: (bad command sequence, status 0x%lx)\n", map->name, chipstatus);
+ ret = -EINVAL;
} else if (chipstatus & 0x02) {
/* Protection bit set */
ret = -EROFS;
} else if (chipstatus & 0x8) {
/* Voltage */
- printk(KERN_WARNING "Chip reports voltage low on erase: status 0x%x\n", status);
+ printk(KERN_ERR "%s: block erase error: (bad VPP)\n", map->name);
ret = -EIO;
- } else if (chipstatus & 0x20) {
- if (retries--) {
- printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%x. Retrying...\n", adr, status);
- timeo = jiffies + HZ;
- chip->state = FL_STATUS;
- spin_unlock_bh(chip->mutex);
- goto retry;
- }
- printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%x\n", adr, status);
+ } else if (chipstatus & 0x20 && retries--) {
+ printk(KERN_DEBUG "block erase failed at 0x%08lx: status 0x%lx. Retrying...\n", adr, chipstatus);
+ put_chip(map, chip, adr);
+ spin_unlock(chip->mutex);
+ goto retry;
+ } else {
+ printk(KERN_ERR "%s: block erase failed at 0x%08lx (status 0x%lx)\n", map->name, adr, chipstatus);
ret = -EIO;
}
+
+ goto out;
}
- wake_up(&chip->wq);
- spin_unlock_bh(chip->mutex);
+ xip_enable(map, chip, adr);
+ out: put_chip(map, chip, adr);
+ spin_unlock(chip->mutex);
return ret;
}
int cfi_intelext_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
-{ struct map_info *map = mtd->priv;
- struct cfi_private *cfi = map->fldrv_priv;
- unsigned long adr, len;
- int chipnum, ret = 0;
- int i, first;
- struct mtd_erase_region_info *regions = mtd->eraseregions;
-
- if (instr->addr > mtd->size)
- return -EINVAL;
-
- if ((instr->len + instr->addr) > mtd->size)
- return -EINVAL;
-
- /* Check that both start and end of the requested erase are
- * aligned with the erasesize at the appropriate addresses.
- */
-
- i = 0;
-
- /* Skip all erase regions which are ended before the start of
- the requested erase. Actually, to save on the calculations,
- we skip to the first erase region which starts after the
- start of the requested erase, and then go back one.
- */
-
- while (i < mtd->numeraseregions && instr->addr >= regions[i].offset)
- i++;
- i--;
-
- /* OK, now i is pointing at the erase region in which this
- erase request starts. Check the start of the requested
- erase range is aligned with the erase size which is in
- effect here.
- */
-
- if (instr->addr & (regions[i].erasesize-1))
- return -EINVAL;
-
- /* Remember the erase region we start on */
- first = i;
-
- /* Next, check that the end of the requested erase is aligned
- * with the erase region at that address.
- */
-
- while (i<mtd->numeraseregions && (instr->addr + instr->len) >= regions[i].offset)
- i++;
-
- /* As before, drop back one to point at the region in which
- the address actually falls
- */
- i--;
-
- if ((instr->addr + instr->len) & (regions[i].erasesize-1))
- return -EINVAL;
+{
+ unsigned long ofs, len;
+ int ret;
- chipnum = instr->addr >> cfi->chipshift;
- adr = instr->addr - (chipnum << cfi->chipshift);
+ ofs = instr->addr;
len = instr->len;
- i=first;
+ ret = cfi_varsize_frob(mtd, do_erase_oneblock, ofs, len, NULL);
+ if (ret)
+ return ret;
- while(len) {
- ret = do_erase_oneblock(map, &cfi->chips[chipnum], adr);
-
- if (ret)
- return ret;
-
- adr += regions[i].erasesize;
- len -= regions[i].erasesize;
-
- if (adr % (1<< cfi->chipshift) == ((regions[i].offset + (regions[i].erasesize * regions[i].numblocks)) %( 1<< cfi->chipshift)))
- i++;
-
- if (adr >> cfi->chipshift) {
- adr = 0;
- chipnum++;
-
- if (chipnum >= cfi->numchips)
- break;
- }
- }
-
instr->state = MTD_ERASE_DONE;
- if (instr->callback)
- instr->callback(instr);
-
+ mtd_erase_callback(instr);
+
return 0;
}
@@ -1180,39 +1781,22 @@ static void cfi_intelext_sync (struct mtd_info *mtd)
int i;
struct flchip *chip;
int ret = 0;
- DECLARE_WAITQUEUE(wait, current);
for (i=0; !ret && i<cfi->numchips; i++) {
chip = &cfi->chips[i];
- retry:
- spin_lock_bh(chip->mutex);
+ spin_lock(chip->mutex);
+ ret = get_chip(map, chip, chip->start, FL_SYNCING);
- switch(chip->state) {
- case FL_READY:
- case FL_STATUS:
- case FL_CFI_QUERY:
- case FL_JEDEC_QUERY:
+ if (!ret) {
chip->oldstate = chip->state;
chip->state = FL_SYNCING;
- /* No need to wake_up() on this state change -
+ /* No need to wake_up() on this state change -
* as the whole point is that nobody can do anything
* with the chip now anyway.
*/
- case FL_SYNCING:
- spin_unlock_bh(chip->mutex);
- break;
-
- default:
- /* Not an idle state */
- add_wait_queue(&chip->wq, &wait);
-
- spin_unlock_bh(chip->mutex);
- schedule();
- remove_wait_queue(&chip->wq, &wait);
-
- goto retry;
}
+ spin_unlock(chip->mutex);
}
/* Unlock the chips again */
@@ -1220,299 +1804,408 @@ static void cfi_intelext_sync (struct mtd_info *mtd)
for (i--; i >=0; i--) {
chip = &cfi->chips[i];
- spin_lock_bh(chip->mutex);
-
+ spin_lock(chip->mutex);
+
if (chip->state == FL_SYNCING) {
chip->state = chip->oldstate;
+ chip->oldstate = FL_READY;
wake_up(&chip->wq);
}
- spin_unlock_bh(chip->mutex);
+ spin_unlock(chip->mutex);
}
}
-static inline int do_lock_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr)
+#ifdef DEBUG_LOCK_BITS
+static int __xipram do_printlockstatus_oneblock(struct map_info *map,
+ struct flchip *chip,
+ unsigned long adr,
+ int len, void *thunk)
{
struct cfi_private *cfi = map->fldrv_priv;
- __u32 status, status_OK;
- unsigned long timeo = jiffies + HZ;
- DECLARE_WAITQUEUE(wait, current);
+ int status, ofs_factor = cfi->interleave * cfi->device_type;
adr += chip->start;
+ xip_disable(map, chip, adr+(2*ofs_factor));
+ map_write(map, CMD(0x90), adr+(2*ofs_factor));
+ chip->state = FL_JEDEC_QUERY;
+ status = cfi_read_query(map, adr+(2*ofs_factor));
+ xip_enable(map, chip, 0);
+ printk(KERN_DEBUG "block status register for 0x%08lx is %x\n",
+ adr, status);
+ return 0;
+}
+#endif
- /* Let's determine this according to the interleave only once */
- status_OK = CMD(0x80);
-
- timeo = jiffies + HZ;
-retry:
- spin_lock_bh(chip->mutex);
-
- /* Check that the chip's ready to talk to us. */
- switch (chip->state) {
- case FL_CFI_QUERY:
- case FL_JEDEC_QUERY:
- case FL_READY:
- cfi_write(map, CMD(0x70), adr);
- chip->state = FL_STATUS;
+#define DO_XXLOCK_ONEBLOCK_LOCK ((void *) 1)
+#define DO_XXLOCK_ONEBLOCK_UNLOCK ((void *) 2)
- case FL_STATUS:
- status = cfi_read(map, adr);
- if ((status & status_OK) == status_OK)
- break;
-
- /* Urgh. Chip not yet ready to talk to us. */
- if (time_after(jiffies, timeo)) {
- spin_unlock_bh(chip->mutex);
- printk(KERN_ERR "waiting for chip to be ready timed out in lock\n");
- return -EIO;
- }
+static int __xipram do_xxlock_oneblock(struct map_info *map, struct flchip *chip,
+ unsigned long adr, int len, void *thunk)
+{
+ struct cfi_private *cfi = map->fldrv_priv;
+ struct cfi_pri_intelext *extp = cfi->cmdset_priv;
+ int udelay;
+ int ret;
- /* Latency issues. Drop the lock, wait a while and retry */
- spin_unlock_bh(chip->mutex);
- cfi_udelay(1);
- goto retry;
+ adr += chip->start;
- default:
- /* Stick ourselves on a wait queue to be woken when
- someone changes the status */
- set_current_state(TASK_UNINTERRUPTIBLE);
- add_wait_queue(&chip->wq, &wait);
- spin_unlock_bh(chip->mutex);
- schedule();
- remove_wait_queue(&chip->wq, &wait);
- timeo = jiffies + HZ;
- goto retry;
+ spin_lock(chip->mutex);
+ ret = get_chip(map, chip, adr, FL_LOCKING);
+ if (ret) {
+ spin_unlock(chip->mutex);
+ return ret;
}
ENABLE_VPP(map);
- cfi_write(map, CMD(0x60), adr);
- cfi_write(map, CMD(0x01), adr);
- chip->state = FL_LOCKING;
-
- spin_unlock_bh(chip->mutex);
- schedule_timeout(HZ);
- spin_lock_bh(chip->mutex);
-
- /* FIXME. Use a timer to check this, and return immediately. */
- /* Once the state machine's known to be working I'll do that */
-
- timeo = jiffies + (HZ*2);
- for (;;) {
+ xip_disable(map, chip, adr);
+
+ map_write(map, CMD(0x60), adr);
+ if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) {
+ map_write(map, CMD(0x01), adr);
+ chip->state = FL_LOCKING;
+ } else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) {
+ map_write(map, CMD(0xD0), adr);
+ chip->state = FL_UNLOCKING;
+ } else
+ BUG();
+
+ /*
+ * If Instant Individual Block Locking supported then no need
+ * to delay.
+ */
+ udelay = (!extp || !(extp->FeatureSupport & (1 << 5))) ? 1000000/HZ : 0;
- status = cfi_read(map, adr);
- if ((status & status_OK) == status_OK)
- break;
-
- /* OK Still waiting */
- if (time_after(jiffies, timeo)) {
- cfi_write(map, CMD(0x70), adr);
- chip->state = FL_STATUS;
- printk(KERN_ERR "waiting for lock to complete timed out. Xstatus = %x, status = %x.\n", status, cfi_read(map, adr));
- DISABLE_VPP(map);
- spin_unlock_bh(chip->mutex);
- return -EIO;
- }
-
- /* Latency issues. Drop the lock, wait a while and retry */
- spin_unlock_bh(chip->mutex);
- cfi_udelay(1);
- spin_lock_bh(chip->mutex);
+ ret = WAIT_TIMEOUT(map, chip, adr, udelay);
+ if (ret) {
+ map_write(map, CMD(0x70), adr);
+ chip->state = FL_STATUS;
+ xip_enable(map, chip, adr);
+ printk(KERN_ERR "%s: block unlock error: (status timeout)\n", map->name);
+ goto out;
}
-
- /* Done and happy. */
- chip->state = FL_STATUS;
- DISABLE_VPP(map);
- wake_up(&chip->wq);
- spin_unlock_bh(chip->mutex);
- return 0;
+
+ xip_enable(map, chip, adr);
+out: put_chip(map, chip, adr);
+ spin_unlock(chip->mutex);
+ return ret;
}
+
static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
{
- struct map_info *map = mtd->priv;
- struct cfi_private *cfi = map->fldrv_priv;
- unsigned long adr;
- int chipnum, ret = 0;
+ int ret;
+
#ifdef DEBUG_LOCK_BITS
- int ofs_factor = cfi->interleave * cfi->device_type;
+ printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
+ __FUNCTION__, ofs, len);
+ cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
+ ofs, len, 0);
#endif
- if (ofs & (mtd->erasesize - 1))
- return -EINVAL;
-
- if (len & (mtd->erasesize -1))
- return -EINVAL;
+ ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
+ ofs, len, DO_XXLOCK_ONEBLOCK_LOCK);
- if ((len + ofs) > mtd->size)
- return -EINVAL;
+#ifdef DEBUG_LOCK_BITS
+ printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
+ __FUNCTION__, ret);
+ cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
+ ofs, len, 0);
+#endif
- chipnum = ofs >> cfi->chipshift;
- adr = ofs - (chipnum << cfi->chipshift);
+ return ret;
+}
- while(len) {
+static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len)
+{
+ int ret;
#ifdef DEBUG_LOCK_BITS
- cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL);
- printk("before lock: block status register is %x\n",cfi_read_query(map, adr+(2*ofs_factor)));
- cfi_send_gen_cmd(0xff, 0x55, 0, map, cfi, cfi->device_type, NULL);
+ printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
+ __FUNCTION__, ofs, len);
+ cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
+ ofs, len, 0);
#endif
- ret = do_lock_oneblock(map, &cfi->chips[chipnum], adr);
+ ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
+ ofs, len, DO_XXLOCK_ONEBLOCK_UNLOCK);
#ifdef DEBUG_LOCK_BITS
- cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL);
- printk("after lock: block status register is %x\n",cfi_read_query(map, adr+(2*ofs_factor)));
- cfi_send_gen_cmd(0xff, 0x55, 0, map, cfi, cfi->device_type, NULL);
-#endif
-
+ printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
+ __FUNCTION__, ret);
+ cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
+ ofs, len, 0);
+#endif
+
+ return ret;
+}
+
+#ifdef CONFIG_MTD_OTP
+
+typedef int (*otp_op_t)(struct map_info *map, struct flchip *chip,
+ u_long data_offset, u_char *buf, u_int size,
+ u_long prot_offset, u_int groupno, u_int groupsize);
+
+static int __xipram
+do_otp_read(struct map_info *map, struct flchip *chip, u_long offset,
+ u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
+{
+ struct cfi_private *cfi = map->fldrv_priv;
+ int ret;
+
+ spin_lock(chip->mutex);
+ ret = get_chip(map, chip, chip->start, FL_JEDEC_QUERY);
+ if (ret) {
+ spin_unlock(chip->mutex);
+ return ret;
+ }
+
+ /* let's ensure we're not reading back cached data from array mode */
+ INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
+
+ xip_disable(map, chip, chip->start);
+ if (chip->state != FL_JEDEC_QUERY) {
+ map_write(map, CMD(0x90), chip->start);
+ chip->state = FL_JEDEC_QUERY;
+ }
+ map_copy_from(map, buf, chip->start + offset, size);
+ xip_enable(map, chip, chip->start);
+
+ /* then ensure we don't keep OTP data in the cache */
+ INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
+
+ put_chip(map, chip, chip->start);
+ spin_unlock(chip->mutex);
+ return 0;
+}
+
+static int
+do_otp_write(struct map_info *map, struct flchip *chip, u_long offset,
+ u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
+{
+ int ret;
+
+ while (size) {
+ unsigned long bus_ofs = offset & ~(map_bankwidth(map)-1);
+ int gap = offset - bus_ofs;
+ int n = min_t(int, size, map_bankwidth(map)-gap);
+ map_word datum = map_word_ff(map);
+
+ datum = map_word_load_partial(map, datum, buf, gap, n);
+ ret = do_write_oneword(map, chip, bus_ofs, datum, FL_OTP_WRITE);
if (ret)
return ret;
- adr += mtd->erasesize;
- len -= mtd->erasesize;
-
- if (adr >> cfi->chipshift) {
- adr = 0;
- chipnum++;
-
- if (chipnum >= cfi->numchips)
- break;
- }
+ offset += n;
+ buf += n;
+ size -= n;
}
+
return 0;
}
-static inline int do_unlock_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr)
+
+static int
+do_otp_lock(struct map_info *map, struct flchip *chip, u_long offset,
+ u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
{
struct cfi_private *cfi = map->fldrv_priv;
- __u32 status, status_OK;
- unsigned long timeo = jiffies + HZ;
- DECLARE_WAITQUEUE(wait, current);
+ map_word datum;
- adr += chip->start;
+ /* make sure area matches group boundaries */
+ if (size != grpsz)
+ return -EXDEV;
- /* Let's determine this according to the interleave only once */
- status_OK = CMD(0x80);
+ datum = map_word_ff(map);
+ datum = map_word_clr(map, datum, CMD(1 << grpno));
+ return do_write_oneword(map, chip, prot, datum, FL_OTP_WRITE);
+}
- timeo = jiffies + HZ;
-retry:
- spin_lock_bh(chip->mutex);
+static int cfi_intelext_otp_walk(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, u_char *buf,
+ otp_op_t action, int user_regs)
+{
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+ struct cfi_pri_intelext *extp = cfi->cmdset_priv;
+ struct flchip *chip;
+ struct cfi_intelext_otpinfo *otp;
+ u_long devsize, reg_prot_offset, data_offset;
+ u_int chip_num, chip_step, field, reg_fact_size, reg_user_size;
+ u_int groups, groupno, groupsize, reg_fact_groups, reg_user_groups;
+ int ret;
- /* Check that the chip's ready to talk to us. */
- switch (chip->state) {
- case FL_CFI_QUERY:
- case FL_JEDEC_QUERY:
- case FL_READY:
- cfi_write(map, CMD(0x70), adr);
- chip->state = FL_STATUS;
+ *retlen = 0;
- case FL_STATUS:
- status = cfi_read(map, adr);
- if ((status & status_OK) == status_OK)
- break;
-
- /* Urgh. Chip not yet ready to talk to us. */
- if (time_after(jiffies, timeo)) {
- spin_unlock_bh(chip->mutex);
- printk(KERN_ERR "waiting for chip to be ready timed out in unlock\n");
- return -EIO;
+ /* Check that we actually have some OTP registers */
+ if (!extp || !(extp->FeatureSupport & 64) || !extp->NumProtectionFields)
+ return -ENODATA;
+
+ /* we need real chips here not virtual ones */
+ devsize = (1 << cfi->cfiq->DevSize) * cfi->interleave;
+ chip_step = devsize >> cfi->chipshift;
+ chip_num = 0;
+
+ /* Some chips have OTP located in the _top_ partition only.
+ For example: Intel 28F256L18T (T means top-parameter device) */
+ if (cfi->mfr == MANUFACTURER_INTEL) {
+ switch (cfi->id) {
+ case 0x880b:
+ case 0x880c:
+ case 0x880d:
+ chip_num = chip_step - 1;
}
-
- /* Latency issues. Drop the lock, wait a while and retry */
- spin_unlock_bh(chip->mutex);
- cfi_udelay(1);
- goto retry;
-
- default:
- /* Stick ourselves on a wait queue to be woken when
- someone changes the status */
- set_current_state(TASK_UNINTERRUPTIBLE);
- add_wait_queue(&chip->wq, &wait);
- spin_unlock_bh(chip->mutex);
- schedule();
- remove_wait_queue(&chip->wq, &wait);
- timeo = jiffies + HZ;
- goto retry;
}
- ENABLE_VPP(map);
- cfi_write(map, CMD(0x60), adr);
- cfi_write(map, CMD(0xD0), adr);
- chip->state = FL_UNLOCKING;
-
- spin_unlock_bh(chip->mutex);
- schedule_timeout(HZ);
- spin_lock_bh(chip->mutex);
-
- /* FIXME. Use a timer to check this, and return immediately. */
- /* Once the state machine's known to be working I'll do that */
-
- timeo = jiffies + (HZ*2);
- for (;;) {
+ for ( ; chip_num < cfi->numchips; chip_num += chip_step) {
+ chip = &cfi->chips[chip_num];
+ otp = (struct cfi_intelext_otpinfo *)&extp->extra[0];
+
+ /* first OTP region */
+ field = 0;
+ reg_prot_offset = extp->ProtRegAddr;
+ reg_fact_groups = 1;
+ reg_fact_size = 1 << extp->FactProtRegSize;
+ reg_user_groups = 1;
+ reg_user_size = 1 << extp->UserProtRegSize;
+
+ while (len > 0) {
+ /* flash geometry fixup */
+ data_offset = reg_prot_offset + 1;
+ data_offset *= cfi->interleave * cfi->device_type;
+ reg_prot_offset *= cfi->interleave * cfi->device_type;
+ reg_fact_size *= cfi->interleave;
+ reg_user_size *= cfi->interleave;
+
+ if (user_regs) {
+ groups = reg_user_groups;
+ groupsize = reg_user_size;
+ /* skip over factory reg area */
+ groupno = reg_fact_groups;
+ data_offset += reg_fact_groups * reg_fact_size;
+ } else {
+ groups = reg_fact_groups;
+ groupsize = reg_fact_size;
+ groupno = 0;
+ }
- status = cfi_read(map, adr);
- if ((status & status_OK) == status_OK)
- break;
-
- /* OK Still waiting */
- if (time_after(jiffies, timeo)) {
- cfi_write(map, CMD(0x70), adr);
- chip->state = FL_STATUS;
- printk(KERN_ERR "waiting for unlock to complete timed out. Xstatus = %x, status = %x.\n", status, cfi_read(map, adr));
- DISABLE_VPP(map);
- spin_unlock_bh(chip->mutex);
- return -EIO;
+ while (len > 0 && groups > 0) {
+ if (!action) {
+ /*
+ * Special case: if action is NULL
+ * we fill buf with otp_info records.
+ */
+ struct otp_info *otpinfo;
+ map_word lockword;
+ len -= sizeof(struct otp_info);
+ if (len <= 0)
+ return -ENOSPC;
+ ret = do_otp_read(map, chip,
+ reg_prot_offset,
+ (u_char *)&lockword,
+ map_bankwidth(map),
+ 0, 0, 0);
+ if (ret)
+ return ret;
+ otpinfo = (struct otp_info *)buf;
+ otpinfo->start = from;
+ otpinfo->length = groupsize;
+ otpinfo->locked =
+ !map_word_bitsset(map, lockword,
+ CMD(1 << groupno));
+ from += groupsize;
+ buf += sizeof(*otpinfo);
+ *retlen += sizeof(*otpinfo);
+ } else if (from >= groupsize) {
+ from -= groupsize;
+ data_offset += groupsize;
+ } else {
+ int size = groupsize;
+ data_offset += from;
+ size -= from;
+ from = 0;
+ if (size > len)
+ size = len;
+ ret = action(map, chip, data_offset,
+ buf, size, reg_prot_offset,
+ groupno, groupsize);
+ if (ret < 0)
+ return ret;
+ buf += size;
+ len -= size;
+ *retlen += size;
+ data_offset += size;
+ }
+ groupno++;
+ groups--;
+ }
+
+ /* next OTP region */
+ if (++field == extp->NumProtectionFields)
+ break;
+ reg_prot_offset = otp->ProtRegAddr;
+ reg_fact_groups = otp->FactGroups;
+ reg_fact_size = 1 << otp->FactProtRegSize;
+ reg_user_groups = otp->UserGroups;
+ reg_user_size = 1 << otp->UserProtRegSize;
+ otp++;
}
-
- /* Latency issues. Drop the unlock, wait a while and retry */
- spin_unlock_bh(chip->mutex);
- cfi_udelay(1);
- spin_lock_bh(chip->mutex);
}
-
- /* Done and happy. */
- chip->state = FL_STATUS;
- DISABLE_VPP(map);
- wake_up(&chip->wq);
- spin_unlock_bh(chip->mutex);
+
return 0;
}
-static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len)
+
+static int cfi_intelext_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
+ size_t len, size_t *retlen,
+ u_char *buf)
{
- struct map_info *map = mtd->priv;
- struct cfi_private *cfi = map->fldrv_priv;
- unsigned long adr;
- int chipnum, ret = 0;
-#ifdef DEBUG_LOCK_BITS
- int ofs_factor = cfi->interleave * cfi->device_type;
-#endif
+ return cfi_intelext_otp_walk(mtd, from, len, retlen,
+ buf, do_otp_read, 0);
+}
+
+static int cfi_intelext_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
+ size_t len, size_t *retlen,
+ u_char *buf)
+{
+ return cfi_intelext_otp_walk(mtd, from, len, retlen,
+ buf, do_otp_read, 1);
+}
- chipnum = ofs >> cfi->chipshift;
- adr = ofs - (chipnum << cfi->chipshift);
+static int cfi_intelext_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
+ size_t len, size_t *retlen,
+ u_char *buf)
+{
+ return cfi_intelext_otp_walk(mtd, from, len, retlen,
+ buf, do_otp_write, 1);
+}
-#ifdef DEBUG_LOCK_BITS
- {
- unsigned long temp_adr = adr;
- unsigned long temp_len = len;
-
- cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL);
- while (temp_len) {
- printk("before unlock %x: block status register is %x\n",temp_adr,cfi_read_query(map, temp_adr+(2*ofs_factor)));
- temp_adr += mtd->erasesize;
- temp_len -= mtd->erasesize;
- }
- cfi_send_gen_cmd(0xff, 0x55, 0, map, cfi, cfi->device_type, NULL);
- }
-#endif
+static int cfi_intelext_lock_user_prot_reg(struct mtd_info *mtd,
+ loff_t from, size_t len)
+{
+ size_t retlen;
+ return cfi_intelext_otp_walk(mtd, from, len, &retlen,
+ NULL, do_otp_lock, 1);
+}
- ret = do_unlock_oneblock(map, &cfi->chips[chipnum], adr);
+static int cfi_intelext_get_fact_prot_info(struct mtd_info *mtd,
+ struct otp_info *buf, size_t len)
+{
+ size_t retlen;
+ int ret;
-#ifdef DEBUG_LOCK_BITS
- cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL);
- printk("after unlock: block status register is %x\n",cfi_read_query(map, adr+(2*ofs_factor)));
- cfi_send_gen_cmd(0xff, 0x55, 0, map, cfi, cfi->device_type, NULL);
-#endif
-
- return ret;
+ ret = cfi_intelext_otp_walk(mtd, 0, len, &retlen, (u_char *)buf, NULL, 0);
+ return ret ? : retlen;
+}
+
+static int cfi_intelext_get_user_prot_info(struct mtd_info *mtd,
+ struct otp_info *buf, size_t len)
+{
+ size_t retlen;
+ int ret;
+
+ ret = cfi_intelext_otp_walk(mtd, 0, len, &retlen, (u_char *)buf, NULL, 1);
+ return ret ? : retlen;
}
+#endif
+
static int cfi_intelext_suspend(struct mtd_info *mtd)
{
struct map_info *map = mtd->priv;
@@ -1524,27 +2217,37 @@ static int cfi_intelext_suspend(struct mtd_info *mtd)
for (i=0; !ret && i<cfi->numchips; i++) {
chip = &cfi->chips[i];
- spin_lock_bh(chip->mutex);
+ spin_lock(chip->mutex);
- switch(chip->state) {
+ switch (chip->state) {
case FL_READY:
case FL_STATUS:
case FL_CFI_QUERY:
case FL_JEDEC_QUERY:
- chip->oldstate = chip->state;
- chip->state = FL_PM_SUSPENDED;
- /* No need to wake_up() on this state change -
- * as the whole point is that nobody can do anything
- * with the chip now anyway.
- */
- case FL_PM_SUSPENDED:
+ if (chip->oldstate == FL_READY) {
+ chip->oldstate = chip->state;
+ chip->state = FL_PM_SUSPENDED;
+ /* No need to wake_up() on this state change -
+ * as the whole point is that nobody can do anything
+ * with the chip now anyway.
+ */
+ } else {
+ /* There seems to be an operation pending. We must wait for it. */
+ printk(KERN_NOTICE "Flash device refused suspend due to pending operation (oldstate %d)\n", chip->oldstate);
+ ret = -EAGAIN;
+ }
break;
-
default:
+ /* Should we actually wait? Once upon a time these routines weren't
+ allowed to. Or should we return -EAGAIN, because the upper layers
+ ought to have already shut down anything which was using the device
+ anyway? The latter for now. */
+ printk(KERN_NOTICE "Flash device refused suspend due to active operation (state %d)\n", chip->oldstate);
ret = -EAGAIN;
+ case FL_PM_SUSPENDED:
break;
}
- spin_unlock_bh(chip->mutex);
+ spin_unlock(chip->mutex);
}
/* Unlock the chips again */
@@ -1552,20 +2255,21 @@ static int cfi_intelext_suspend(struct mtd_info *mtd)
if (ret) {
for (i--; i >=0; i--) {
chip = &cfi->chips[i];
-
- spin_lock_bh(chip->mutex);
-
+
+ spin_lock(chip->mutex);
+
if (chip->state == FL_PM_SUSPENDED) {
/* No need to force it into a known state here,
because we're returning failure, and it didn't
get power cycled */
chip->state = chip->oldstate;
+ chip->oldstate = FL_READY;
wake_up(&chip->wq);
}
- spin_unlock_bh(chip->mutex);
+ spin_unlock(chip->mutex);
}
- }
-
+ }
+
return ret;
}
@@ -1577,44 +2281,86 @@ static void cfi_intelext_resume(struct mtd_info *mtd)
struct flchip *chip;
for (i=0; i<cfi->numchips; i++) {
-
+
chip = &cfi->chips[i];
- spin_lock_bh(chip->mutex);
-
+ spin_lock(chip->mutex);
+
/* Go to known state. Chip may have been power cycled */
if (chip->state == FL_PM_SUSPENDED) {
- cfi_write(map, CMD(0xFF), 0);
- chip->state = FL_READY;
+ map_write(map, CMD(0xFF), cfi->chips[i].start);
+ chip->oldstate = chip->state = FL_READY;
wake_up(&chip->wq);
}
- spin_unlock_bh(chip->mutex);
+ spin_unlock(chip->mutex);
+ }
+}
+
+static int cfi_intelext_reset(struct mtd_info *mtd)
+{
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+ int i, ret;
+
+ for (i=0; i < cfi->numchips; i++) {
+ struct flchip *chip = &cfi->chips[i];
+
+ /* force the completion of any ongoing operation
+ and switch to array mode so any bootloader in
+ flash is accessible for soft reboot. */
+ spin_lock(chip->mutex);
+ ret = get_chip(map, chip, chip->start, FL_SYNCING);
+ if (!ret) {
+ map_write(map, CMD(0xff), chip->start);
+ chip->state = FL_READY;
+ }
+ spin_unlock(chip->mutex);
}
+
+ return 0;
+}
+
+static int cfi_intelext_reboot(struct notifier_block *nb, unsigned long val,
+ void *v)
+{
+ struct mtd_info *mtd;
+
+ mtd = container_of(nb, struct mtd_info, reboot_notifier);
+ cfi_intelext_reset(mtd);
+ return NOTIFY_DONE;
}
static void cfi_intelext_destroy(struct mtd_info *mtd)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
+ cfi_intelext_reset(mtd);
+ unregister_reboot_notifier(&mtd->reboot_notifier);
kfree(cfi->cmdset_priv);
+ kfree(cfi->cfiq);
+ kfree(cfi->chips[0].priv);
kfree(cfi);
+ kfree(mtd->eraseregions);
}
-static char im_name_1[]="cfi_cmdset_0001";
-static char im_name_3[]="cfi_cmdset_0003";
+static char im_name_0001[] = "cfi_cmdset_0001";
+static char im_name_0003[] = "cfi_cmdset_0003";
+static char im_name_0200[] = "cfi_cmdset_0200";
-int __init cfi_intelext_init(void)
+static int __init cfi_intelext_init(void)
{
- inter_module_register(im_name_1, THIS_MODULE, &cfi_cmdset_0001);
- inter_module_register(im_name_3, THIS_MODULE, &cfi_cmdset_0001);
+ inter_module_register(im_name_0001, THIS_MODULE, &cfi_cmdset_0001);
+ inter_module_register(im_name_0003, THIS_MODULE, &cfi_cmdset_0001);
+ inter_module_register(im_name_0200, THIS_MODULE, &cfi_cmdset_0001);
return 0;
}
static void __exit cfi_intelext_exit(void)
{
- inter_module_unregister(im_name_1);
- inter_module_unregister(im_name_3);
+ inter_module_unregister(im_name_0001);
+ inter_module_unregister(im_name_0003);
+ inter_module_unregister(im_name_0200);
}
module_init(cfi_intelext_init);
diff --git a/linux-2.4.x/drivers/mtd/chips/cfi_cmdset_0002.c b/linux-2.4.x/drivers/mtd/chips/cfi_cmdset_0002.c
index 0a9c105..f3c4ef1 100644
--- a/linux-2.4.x/drivers/mtd/chips/cfi_cmdset_0002.c
+++ b/linux-2.4.x/drivers/mtd/chips/cfi_cmdset_0002.c
@@ -3,19 +3,30 @@
* AMD & Fujitsu Standard Vendor Command Set (ID 0x0002)
*
* Copyright (C) 2000 Crossnet Co. <info@crossnet.co.jp>
+ * Copyright (C) 2004 Arcom Control Systems Ltd <linux@arcom.com>
+ * Copyright (C) 2005 MontaVista Software Inc. <source@mvista.com>
*
* 2_by_8 routines added by Simon Munton
*
+ * 4_by_16 work by Carolyn J. Smith
+ *
+ * XIP support hooks by Vitaly Wool (based on code for Intel flash
+ * by Nicolas Pitre)
+ *
+ * Occasionally maintained by Thayne Harbaugh tharbaugh at lnxi dot com
+ *
* This code is GPL
*
- * $Id: cfi_cmdset_0002.c,v 1.52 2001/10/24 09:37:30 dwmw2 Exp $
+ * $Id: cfi_cmdset_0002.c,v 1.123 2005/11/29 20:01:26 gleixner Exp $
*
*/
+#include <linux/config.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/sched.h>
+#include <linux/init.h>
#include <asm/io.h>
#include <asm/byteorder.h>
@@ -23,265 +34,775 @@
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
+#include <linux/mtd/compatmac.h>
#include <linux/mtd/map.h>
+#include <linux/mtd/mtd.h>
#include <linux/mtd/cfi.h>
+#include <linux/mtd/xip.h>
#define AMD_BOOTLOC_BUG
+#define FORCE_WORD_WRITE 0
+
+#define MAX_WORD_RETRIES 3
+
+#define MANUFACTURER_AMD 0x0001
+#define MANUFACTURER_SST 0x00BF
+#define SST49LF004B 0x0060
+#define SST49LF008A 0x005a
static int cfi_amdstd_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
-static int cfi_amdstd_write(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
-static int cfi_amdstd_erase_onesize(struct mtd_info *, struct erase_info *);
+static int cfi_amdstd_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
+static int cfi_amdstd_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
+static int cfi_amdstd_erase_chip(struct mtd_info *, struct erase_info *);
static int cfi_amdstd_erase_varsize(struct mtd_info *, struct erase_info *);
static void cfi_amdstd_sync (struct mtd_info *);
static int cfi_amdstd_suspend (struct mtd_info *);
static void cfi_amdstd_resume (struct mtd_info *);
+static int cfi_amdstd_secsi_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
static void cfi_amdstd_destroy(struct mtd_info *);
struct mtd_info *cfi_cmdset_0002(struct map_info *, int);
-static struct mtd_info *cfi_amdstd_setup (struct map_info *);
+static struct mtd_info *cfi_amdstd_setup (struct mtd_info *);
+static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
+static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
+#include "fwh_lock.h"
static struct mtd_chip_driver cfi_amdstd_chipdrv = {
- probe: NULL, /* Not usable directly */
- destroy: cfi_amdstd_destroy,
- name: "cfi_cmdset_0002",
- module: THIS_MODULE
+ .probe = NULL, /* Not usable directly */
+ .destroy = cfi_amdstd_destroy,
+ .name = "cfi_cmdset_0002",
+ .module = THIS_MODULE
+};
+
+
+/* #define DEBUG_CFI_FEATURES */
+
+
+#ifdef DEBUG_CFI_FEATURES
+static void cfi_tell_features(struct cfi_pri_amdstd *extp)
+{
+ const char* erase_suspend[3] = {
+ "Not supported", "Read only", "Read/write"
+ };
+ const char* top_bottom[6] = {
+ "No WP", "8x8KiB sectors at top & bottom, no WP",
+ "Bottom boot", "Top boot",
+ "Uniform, Bottom WP", "Uniform, Top WP"
+ };
+
+ printk(" Silicon revision: %d\n", extp->SiliconRevision >> 1);
+ printk(" Address sensitive unlock: %s\n",
+ (extp->SiliconRevision & 1) ? "Not required" : "Required");
+
+ if (extp->EraseSuspend < ARRAY_SIZE(erase_suspend))
+ printk(" Erase Suspend: %s\n", erase_suspend[extp->EraseSuspend]);
+ else
+ printk(" Erase Suspend: Unknown value %d\n", extp->EraseSuspend);
+
+ if (extp->BlkProt == 0)
+ printk(" Block protection: Not supported\n");
+ else
+ printk(" Block protection: %d sectors per group\n", extp->BlkProt);
+
+
+ printk(" Temporary block unprotect: %s\n",
+ extp->TmpBlkUnprotect ? "Supported" : "Not supported");
+ printk(" Block protect/unprotect scheme: %d\n", extp->BlkProtUnprot);
+ printk(" Number of simultaneous operations: %d\n", extp->SimultaneousOps);
+ printk(" Burst mode: %s\n",
+ extp->BurstMode ? "Supported" : "Not supported");
+ if (extp->PageMode == 0)
+ printk(" Page mode: Not supported\n");
+ else
+ printk(" Page mode: %d word page\n", extp->PageMode << 2);
+
+ printk(" Vpp Supply Minimum Program/Erase Voltage: %d.%d V\n",
+ extp->VppMin >> 4, extp->VppMin & 0xf);
+ printk(" Vpp Supply Maximum Program/Erase Voltage: %d.%d V\n",
+ extp->VppMax >> 4, extp->VppMax & 0xf);
+
+ if (extp->TopBottom < ARRAY_SIZE(top_bottom))
+ printk(" Top/Bottom Boot Block: %s\n", top_bottom[extp->TopBottom]);
+ else
+ printk(" Top/Bottom Boot Block: Unknown value %d\n", extp->TopBottom);
+}
+#endif
+
+#ifdef AMD_BOOTLOC_BUG
+/* Wheee. Bring me the head of someone at AMD. */
+static void fixup_amd_bootblock(struct mtd_info *mtd, void* param)
+{
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+ struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
+ __u8 major = extp->MajorVersion;
+ __u8 minor = extp->MinorVersion;
+
+ if (((major << 8) | minor) < 0x3131) {
+ /* CFI version 1.0 => don't trust bootloc */
+ if (cfi->id & 0x80) {
+ printk(KERN_WARNING "%s: JEDEC Device ID is 0x%02X. Assuming broken CFI table.\n", map->name, cfi->id);
+ extp->TopBottom = 3; /* top boot */
+ } else {
+ extp->TopBottom = 2; /* bottom boot */
+ }
+ }
+}
+#endif
+
+static void fixup_use_write_buffers(struct mtd_info *mtd, void *param)
+{
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+ if (cfi->cfiq->BufWriteTimeoutTyp) {
+ DEBUG(MTD_DEBUG_LEVEL1, "Using buffer write method\n" );
+ mtd->write = cfi_amdstd_write_buffers;
+ }
+}
+
+static void fixup_use_secsi(struct mtd_info *mtd, void *param)
+{
+ /* Setup for chips with a secsi area */
+ mtd->read_user_prot_reg = cfi_amdstd_secsi_read;
+ mtd->read_fact_prot_reg = cfi_amdstd_secsi_read;
+}
+
+static void fixup_use_erase_chip(struct mtd_info *mtd, void *param)
+{
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+ if ((cfi->cfiq->NumEraseRegions == 1) &&
+ ((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0)) {
+ mtd->erase = cfi_amdstd_erase_chip;
+ }
+
+}
+
+static struct cfi_fixup cfi_fixup_table[] = {
+#ifdef AMD_BOOTLOC_BUG
+ { CFI_MFR_AMD, CFI_ID_ANY, fixup_amd_bootblock, NULL },
+#endif
+ { CFI_MFR_AMD, 0x0050, fixup_use_secsi, NULL, },
+ { CFI_MFR_AMD, 0x0053, fixup_use_secsi, NULL, },
+ { CFI_MFR_AMD, 0x0055, fixup_use_secsi, NULL, },
+ { CFI_MFR_AMD, 0x0056, fixup_use_secsi, NULL, },
+ { CFI_MFR_AMD, 0x005C, fixup_use_secsi, NULL, },
+ { CFI_MFR_AMD, 0x005F, fixup_use_secsi, NULL, },
+#if !FORCE_WORD_WRITE
+ { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers, NULL, },
+#endif
+ { 0, 0, NULL, NULL }
+};
+static struct cfi_fixup jedec_fixup_table[] = {
+ { MANUFACTURER_SST, SST49LF004B, fixup_use_fwh_lock, NULL, },
+ { MANUFACTURER_SST, SST49LF008A, fixup_use_fwh_lock, NULL, },
+ { 0, 0, NULL, NULL }
+};
+
+static struct cfi_fixup fixup_table[] = {
+ /* The CFI vendor ids and the JEDEC vendor IDs appear
+ * to be common. It is like the devices id's are as
+ * well. This table is to pick all cases where
+ * we know that is the case.
+ */
+ { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_erase_chip, NULL },
+ { 0, 0, NULL, NULL }
};
+
struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary)
{
struct cfi_private *cfi = map->fldrv_priv;
- unsigned char bootloc;
- int ofs_factor = cfi->interleave * cfi->device_type;
+ struct mtd_info *mtd;
int i;
- __u8 major, minor;
- __u32 base = cfi->chips[0].start;
- if (cfi->cfi_mode==1){
+ mtd = kmalloc(sizeof(*mtd), GFP_KERNEL);
+ if (!mtd) {
+ printk(KERN_WARNING "Failed to allocate memory for MTD device\n");
+ return NULL;
+ }
+ memset(mtd, 0, sizeof(*mtd));
+ mtd->priv = map;
+ mtd->type = MTD_NORFLASH;
+
+ /* Fill in the default mtd operations */
+ mtd->erase = cfi_amdstd_erase_varsize;
+ mtd->write = cfi_amdstd_write_words;
+ mtd->read = cfi_amdstd_read;
+ mtd->sync = cfi_amdstd_sync;
+ mtd->suspend = cfi_amdstd_suspend;
+ mtd->resume = cfi_amdstd_resume;
+ mtd->flags = MTD_CAP_NORFLASH;
+ mtd->name = map->name;
+
+ if (cfi->cfi_mode==CFI_MODE_CFI){
+ unsigned char bootloc;
+ /*
+ * It's a real CFI chip, not one for which the probe
+ * routine faked a CFI structure. So we read the feature
+ * table from it.
+ */
__u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
+ struct cfi_pri_amdstd *extp;
- cfi_send_gen_cmd(0x98, 0x55, base, map, cfi, cfi->device_type, NULL);
-
- major = cfi_read_query(map, base + (adr+3)*ofs_factor);
- minor = cfi_read_query(map, base + (adr+4)*ofs_factor);
-
- printk(KERN_NOTICE " Amd/Fujitsu Extended Query Table v%c.%c at 0x%4.4X\n",
- major, minor, adr);
- cfi_send_gen_cmd(0xf0, 0x55, base, map, cfi, cfi->device_type, NULL);
-
- cfi_send_gen_cmd(0xaa, 0x555, base, map, cfi, cfi->device_type, NULL);
- cfi_send_gen_cmd(0x55, 0x2aa, base, map, cfi, cfi->device_type, NULL);
- cfi_send_gen_cmd(0x90, 0x555, base, map, cfi, cfi->device_type, NULL);
- cfi->mfr = cfi_read_query(map, base);
- cfi->id = cfi_read_query(map, base + ofs_factor);
-
- /* Wheee. Bring me the head of someone at AMD. */
-#ifdef AMD_BOOTLOC_BUG
- if (((major << 8) | minor) < 0x3131) {
- /* CFI version 1.0 => don't trust bootloc */
- if (cfi->id & 0x80) {
- printk(KERN_WARNING "%s: JEDEC Device ID is 0x%02X. Assuming broken CFI table.\n", map->name, cfi->id);
- bootloc = 3; /* top boot */
- } else {
- bootloc = 2; /* bottom boot */
- }
- } else
+ extp = (struct cfi_pri_amdstd*)cfi_read_pri(map, adr, sizeof(*extp), "Amd/Fujitsu");
+ if (!extp) {
+ kfree(mtd);
+ return NULL;
+ }
+
+ if (extp->MajorVersion != '1' ||
+ (extp->MinorVersion < '0' || extp->MinorVersion > '4')) {
+ printk(KERN_ERR " Unknown Amd/Fujitsu Extended Query "
+ "version %c.%c.\n", extp->MajorVersion,
+ extp->MinorVersion);
+ kfree(extp);
+ kfree(mtd);
+ return NULL;
+ }
+
+ /* Install our own private info structure */
+ cfi->cmdset_priv = extp;
+
+ /* Apply cfi device specific fixups */
+ cfi_fixup(mtd, cfi_fixup_table);
+
+#ifdef DEBUG_CFI_FEATURES
+ /* Tell the user about it in lots of lovely detail */
+ cfi_tell_features(extp);
#endif
- {
- cfi_send_gen_cmd(0x98, 0x55, base, map, cfi, cfi->device_type, NULL);
- bootloc = cfi_read_query(map, base + (adr+15)*ofs_factor);
- }
+
+ bootloc = extp->TopBottom;
+ if ((bootloc != 2) && (bootloc != 3)) {
+ printk(KERN_WARNING "%s: CFI does not contain boot "
+ "bank location. Assuming top.\n", map->name);
+ bootloc = 2;
+ }
+
if (bootloc == 3 && cfi->cfiq->NumEraseRegions > 1) {
printk(KERN_WARNING "%s: Swapping erase regions for broken CFI table.\n", map->name);
-
+
for (i=0; i<cfi->cfiq->NumEraseRegions / 2; i++) {
int j = (cfi->cfiq->NumEraseRegions-1)-i;
__u32 swap;
-
+
swap = cfi->cfiq->EraseRegionInfo[i];
cfi->cfiq->EraseRegionInfo[i] = cfi->cfiq->EraseRegionInfo[j];
cfi->cfiq->EraseRegionInfo[j] = swap;
}
}
- switch (cfi->device_type) {
- case CFI_DEVICETYPE_X8:
- cfi->addr_unlock1 = 0x555;
- cfi->addr_unlock2 = 0x2aa;
- break;
- case CFI_DEVICETYPE_X16:
+ /* Set the default CFI lock/unlock addresses */
+ cfi->addr_unlock1 = 0x555;
+ cfi->addr_unlock2 = 0x2aa;
+ /* Modify the unlock address if we are in compatibility mode */
+ if ( /* x16 in x8 mode */
+ ((cfi->device_type == CFI_DEVICETYPE_X8) &&
+ (cfi->cfiq->InterfaceDesc == 2)) ||
+ /* x32 in x16 mode */
+ ((cfi->device_type == CFI_DEVICETYPE_X16) &&
+ (cfi->cfiq->InterfaceDesc == 4)))
+ {
cfi->addr_unlock1 = 0xaaa;
- if (map->buswidth == cfi->interleave) {
- /* X16 chip(s) in X8 mode */
- cfi->addr_unlock2 = 0x555;
- } else {
- cfi->addr_unlock2 = 0x554;
- }
- break;
- case CFI_DEVICETYPE_X32:
- cfi->addr_unlock1 = 0x1555;
- cfi->addr_unlock2 = 0xaaa;
- break;
- default:
- printk(KERN_NOTICE "Eep. Unknown cfi_cmdset_0002 device type %d\n", cfi->device_type);
- return NULL;
+ cfi->addr_unlock2 = 0x555;
}
+
} /* CFI mode */
+ else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
+ /* Apply jedec specific fixups */
+ cfi_fixup(mtd, jedec_fixup_table);
+ }
+ /* Apply generic fixups */
+ cfi_fixup(mtd, fixup_table);
for (i=0; i< cfi->numchips; i++) {
cfi->chips[i].word_write_time = 1<<cfi->cfiq->WordWriteTimeoutTyp;
cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp;
cfi->chips[i].erase_time = 1<<cfi->cfiq->BlockEraseTimeoutTyp;
- }
-
+ }
+
map->fldrv = &cfi_amdstd_chipdrv;
- MOD_INC_USE_COUNT;
- cfi_send_gen_cmd(0xf0, 0x55, base, map, cfi, cfi->device_type, NULL);
- return cfi_amdstd_setup(map);
+ return cfi_amdstd_setup(mtd);
}
-static struct mtd_info *cfi_amdstd_setup(struct map_info *map)
+
+static struct mtd_info *cfi_amdstd_setup(struct mtd_info *mtd)
{
+ struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
- struct mtd_info *mtd;
unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
+ unsigned long offset = 0;
+ int i,j;
- mtd = kmalloc(sizeof(*mtd), GFP_KERNEL);
- printk(KERN_NOTICE "number of %s chips: %d\n", (cfi->cfi_mode)?"CFI":"JEDEC",cfi->numchips);
+ printk(KERN_NOTICE "number of %s chips: %d\n",
+ (cfi->cfi_mode == CFI_MODE_CFI)?"CFI":"JEDEC",cfi->numchips);
+ /* Select the correct geometry setup */
+ mtd->size = devsize * cfi->numchips;
- if (!mtd) {
- printk(KERN_WARNING "Failed to allocate memory for MTD device\n");
- kfree(cfi->cmdset_priv);
- return NULL;
+ mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
+ mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
+ * mtd->numeraseregions, GFP_KERNEL);
+ if (!mtd->eraseregions) {
+ printk(KERN_WARNING "Failed to allocate memory for MTD erase region info\n");
+ goto setup_err;
}
- memset(mtd, 0, sizeof(*mtd));
- mtd->priv = map;
- mtd->type = MTD_NORFLASH;
- /* Also select the correct geometry setup too */
- mtd->size = devsize * cfi->numchips;
-
- if (cfi->cfiq->NumEraseRegions == 1) {
- /* No need to muck about with multiple erase sizes */
- mtd->erasesize = ((cfi->cfiq->EraseRegionInfo[0] >> 8) & ~0xff) * cfi->interleave;
- } else {
- unsigned long offset = 0;
- int i,j;
-
- mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
- mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info) * mtd->numeraseregions, GFP_KERNEL);
- if (!mtd->eraseregions) {
- printk(KERN_WARNING "Failed to allocate memory for MTD erase region info\n");
- kfree(cfi->cmdset_priv);
- return NULL;
- }
-
- for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
- unsigned long ernum, ersize;
- ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
- ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
-
- if (mtd->erasesize < ersize) {
- mtd->erasesize = ersize;
- }
- for (j=0; j<cfi->numchips; j++) {
- mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
- mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
- mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
- }
- offset += (ersize * ernum);
+ for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
+ unsigned long ernum, ersize;
+ ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
+ ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
+
+ if (mtd->erasesize < ersize) {
+ mtd->erasesize = ersize;
}
- if (offset != devsize) {
- /* Argh */
- printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
- kfree(mtd->eraseregions);
- kfree(cfi->cmdset_priv);
- return NULL;
+ for (j=0; j<cfi->numchips; j++) {
+ mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
+ mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
+ mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
}
+ offset += (ersize * ernum);
+ }
+ if (offset != devsize) {
+ /* Argh */
+ printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
+ goto setup_err;
+ }
#if 0
- // debug
- for (i=0; i<mtd->numeraseregions;i++){
- printk("%d: offset=0x%x,size=0x%x,blocks=%d\n",
- i,mtd->eraseregions[i].offset,
- mtd->eraseregions[i].erasesize,
- mtd->eraseregions[i].numblocks);
- }
-#endif
+ // debug
+ for (i=0; i<mtd->numeraseregions;i++){
+ printk("%d: offset=0x%x,size=0x%x,blocks=%d\n",
+ i,mtd->eraseregions[i].offset,
+ mtd->eraseregions[i].erasesize,
+ mtd->eraseregions[i].numblocks);
}
-
- switch (CFIDEV_BUSWIDTH)
- {
- case 1:
- case 2:
- case 4:
-#if 1
- if (mtd->numeraseregions > 1)
- mtd->erase = cfi_amdstd_erase_varsize;
- else
#endif
- mtd->erase = cfi_amdstd_erase_onesize;
- mtd->read = cfi_amdstd_read;
- mtd->write = cfi_amdstd_write;
- break;
- default:
- printk(KERN_WARNING "Unsupported buswidth\n");
+ /* FIXME: erase-suspend-program is broken. See
+ http://lists.infradead.org/pipermail/linux-mtd/2003-December/009001.html */
+ printk(KERN_NOTICE "cfi_cmdset_0002: Disabling erase-suspend-program due to code brokenness.\n");
+
+ __module_get(THIS_MODULE);
+ return mtd;
+
+ setup_err:
+ if(mtd) {
+ kfree(mtd->eraseregions);
kfree(mtd);
- kfree(cfi->cmdset_priv);
- return NULL;
- break;
}
- mtd->sync = cfi_amdstd_sync;
- mtd->suspend = cfi_amdstd_suspend;
- mtd->resume = cfi_amdstd_resume;
- mtd->flags = MTD_CAP_NORFLASH;
- map->fldrv = &cfi_amdstd_chipdrv;
- mtd->name = map->name;
- MOD_INC_USE_COUNT;
- return mtd;
+ kfree(cfi->cmdset_priv);
+ kfree(cfi->cfiq);
+ return NULL;
}
-static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
+/*
+ * Return true if the chip is ready.
+ *
+ * Ready is one of: read mode, query mode, erase-suspend-read mode (in any
+ * non-suspended sector) and is indicated by no toggle bits toggling.
+ *
+ * Note that anything more complicated than checking if no bits are toggling
+ * (including checking DQ5 for an error status) is tricky to get working
+ * correctly and is therefore not done (particulary with interleaved chips
+ * as each chip must be checked independantly of the others).
+ */
+static int __xipram chip_ready(struct map_info *map, unsigned long addr)
+{
+ map_word d, t;
+
+ d = map_read(map, addr);
+ t = map_read(map, addr);
+
+ return map_word_equal(map, d, t);
+}
+
+/*
+ * Return true if the chip is ready and has the correct value.
+ *
+ * Ready is one of: read mode, query mode, erase-suspend-read mode (in any
+ * non-suspended sector) and it is indicated by no bits toggling.
+ *
+ * Error are indicated by toggling bits or bits held with the wrong value,
+ * or with bits toggling.
+ *
+ * Note that anything more complicated than checking if no bits are toggling
+ * (including checking DQ5 for an error status) is tricky to get working
+ * correctly and is therefore not done (particulary with interleaved chips
+ * as each chip must be checked independantly of the others).
+ *
+ */
+static int __xipram chip_good(struct map_info *map, unsigned long addr, map_word expected)
+{
+ map_word oldd, curd;
+
+ oldd = map_read(map, addr);
+ curd = map_read(map, addr);
+
+ return map_word_equal(map, oldd, curd) &&
+ map_word_equal(map, curd, expected);
+}
+
+static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
{
DECLARE_WAITQUEUE(wait, current);
- unsigned long timeo = jiffies + HZ;
+ struct cfi_private *cfi = map->fldrv_priv;
+ unsigned long timeo;
+ struct cfi_pri_amdstd *cfip = (struct cfi_pri_amdstd *)cfi->cmdset_priv;
+ resettime:
+ timeo = jiffies + HZ;
retry:
- cfi_spin_lock(chip->mutex);
+ switch (chip->state) {
- if (chip->state != FL_READY){
-#if 0
- printk(KERN_DEBUG "Waiting for chip to read, status = %d\n", chip->state);
-#endif
+ case FL_STATUS:
+ for (;;) {
+ if (chip_ready(map, adr))
+ break;
+
+ if (time_after(jiffies, timeo)) {
+ printk(KERN_ERR "Waiting for chip to be ready timed out.\n");
+ spin_unlock(chip->mutex);
+ return -EIO;
+ }
+ spin_unlock(chip->mutex);
+ cfi_udelay(1);
+ spin_lock(chip->mutex);
+ /* Someone else might have been playing with it. */
+ goto retry;
+ }
+
+ case FL_READY:
+ case FL_CFI_QUERY:
+ case FL_JEDEC_QUERY:
+ return 0;
+
+ case FL_ERASING:
+ if (mode == FL_WRITING) /* FIXME: Erase-suspend-program appears broken. */
+ goto sleep;
+
+ if (!(mode == FL_READY || mode == FL_POINT
+ || !cfip
+ || (mode == FL_WRITING && (cfip->EraseSuspend & 0x2))
+ || (mode == FL_WRITING && (cfip->EraseSuspend & 0x1))))
+ goto sleep;
+
+ /* We could check to see if we're trying to access the sector
+ * that is currently being erased. However, no user will try
+ * anything like that so we just wait for the timeout. */
+
+ /* Erase suspend */
+ /* It's harmless to issue the Erase-Suspend and Erase-Resume
+ * commands when the erase algorithm isn't in progress. */
+ map_write(map, CMD(0xB0), chip->in_progress_block_addr);
+ chip->oldstate = FL_ERASING;
+ chip->state = FL_ERASE_SUSPENDING;
+ chip->erase_suspended = 1;
+ for (;;) {
+ if (chip_ready(map, adr))
+ break;
+
+ if (time_after(jiffies, timeo)) {
+ /* Should have suspended the erase by now.
+ * Send an Erase-Resume command as either
+ * there was an error (so leave the erase
+ * routine to recover from it) or we trying to
+ * use the erase-in-progress sector. */
+ map_write(map, CMD(0x30), chip->in_progress_block_addr);
+ chip->state = FL_ERASING;
+ chip->oldstate = FL_READY;
+ printk(KERN_ERR "MTD %s(): chip not ready after erase suspend\n", __func__);
+ return -EIO;
+ }
+
+ spin_unlock(chip->mutex);
+ cfi_udelay(1);
+ spin_lock(chip->mutex);
+ /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
+ So we can just loop here. */
+ }
+ chip->state = FL_READY;
+ return 0;
+
+ case FL_XIP_WHILE_ERASING:
+ if (mode != FL_READY && mode != FL_POINT &&
+ (!cfip || !(cfip->EraseSuspend&2)))
+ goto sleep;
+ chip->oldstate = chip->state;
+ chip->state = FL_READY;
+ return 0;
+
+ case FL_POINT:
+ /* Only if there's no operation suspended... */
+ if (mode == FL_READY && chip->oldstate == FL_READY)
+ return 0;
+
+ default:
+ sleep:
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
-
- cfi_spin_unlock(chip->mutex);
-
+ spin_unlock(chip->mutex);
schedule();
remove_wait_queue(&chip->wq, &wait);
-#if 0
- if(signal_pending(current))
- return -EINTR;
+ spin_lock(chip->mutex);
+ goto resettime;
+ }
+}
+
+
+static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
+{
+ struct cfi_private *cfi = map->fldrv_priv;
+
+ switch(chip->oldstate) {
+ case FL_ERASING:
+ chip->state = chip->oldstate;
+ map_write(map, CMD(0x30), chip->in_progress_block_addr);
+ chip->oldstate = FL_READY;
+ chip->state = FL_ERASING;
+ break;
+
+ case FL_XIP_WHILE_ERASING:
+ chip->state = chip->oldstate;
+ chip->oldstate = FL_READY;
+ break;
+
+ case FL_READY:
+ case FL_STATUS:
+ /* We should really make set_vpp() count, rather than doing this */
+ DISABLE_VPP(map);
+ break;
+ default:
+ printk(KERN_ERR "MTD: put_chip() called with oldstate %d!!\n", chip->oldstate);
+ }
+ wake_up(&chip->wq);
+}
+
+#ifdef CONFIG_MTD_XIP
+
+/*
+ * No interrupt what so ever can be serviced while the flash isn't in array
+ * mode. This is ensured by the xip_disable() and xip_enable() functions
+ * enclosing any code path where the flash is known not to be in array mode.
+ * And within a XIP disabled code path, only functions marked with __xipram
+ * may be called and nothing else (it's a good thing to inspect generated
+ * assembly to make sure inline functions were actually inlined and that gcc
+ * didn't emit calls to its own support functions). Also configuring MTD CFI
+ * support to a single buswidth and a single interleave is also recommended.
+ */
+
+static void xip_disable(struct map_info *map, struct flchip *chip,
+ unsigned long adr)
+{
+ /* TODO: chips with no XIP use should ignore and return */
+ (void) map_read(map, adr); /* ensure mmu mapping is up to date */
+ local_irq_disable();
+}
+
+static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
+ unsigned long adr)
+{
+ struct cfi_private *cfi = map->fldrv_priv;
+
+ if (chip->state != FL_POINT && chip->state != FL_READY) {
+ map_write(map, CMD(0xf0), adr);
+ chip->state = FL_READY;
+ }
+ (void) map_read(map, adr);
+ xip_iprefetch();
+ local_irq_enable();
+}
+
+/*
+ * When a delay is required for the flash operation to complete, the
+ * xip_udelay() function is polling for both the given timeout and pending
+ * (but still masked) hardware interrupts. Whenever there is an interrupt
+ * pending then the flash erase operation is suspended, array mode restored
+ * and interrupts unmasked. Task scheduling might also happen at that
+ * point. The CPU eventually returns from the interrupt or the call to
+ * schedule() and the suspended flash operation is resumed for the remaining
+ * of the delay period.
+ *
+ * Warning: this function _will_ fool interrupt latency tracing tools.
+ */
+
+static void __xipram xip_udelay(struct map_info *map, struct flchip *chip,
+ unsigned long adr, int usec)
+{
+ struct cfi_private *cfi = map->fldrv_priv;
+ struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
+ map_word status, OK = CMD(0x80);
+ unsigned long suspended, start = xip_currtime();
+ flstate_t oldstate;
+
+ do {
+ cpu_relax();
+ if (xip_irqpending() && extp &&
+ ((chip->state == FL_ERASING && (extp->EraseSuspend & 2))) &&
+ (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) {
+ /*
+ * Let's suspend the erase operation when supported.
+ * Note that we currently don't try to suspend
+ * interleaved chips if there is already another
+ * operation suspended (imagine what happens
+ * when one chip was already done with the current
+ * operation while another chip suspended it, then
+ * we resume the whole thing at once). Yes, it
+ * can happen!
+ */
+ map_write(map, CMD(0xb0), adr);
+ usec -= xip_elapsed_since(start);
+ suspended = xip_currtime();
+ do {
+ if (xip_elapsed_since(suspended) > 100000) {
+ /*
+ * The chip doesn't want to suspend
+ * after waiting for 100 msecs.
+ * This is a critical error but there
+ * is not much we can do here.
+ */
+ return;
+ }
+ status = map_read(map, adr);
+ } while (!map_word_andequal(map, status, OK, OK));
+
+ /* Suspend succeeded */
+ oldstate = chip->state;
+ if (!map_word_bitsset(map, status, CMD(0x40)))
+ break;
+ chip->state = FL_XIP_WHILE_ERASING;
+ chip->erase_suspended = 1;
+ map_write(map, CMD(0xf0), adr);
+ (void) map_read(map, adr);
+ asm volatile (".rep 8; nop; .endr");
+ local_irq_enable();
+ spin_unlock(chip->mutex);
+ asm volatile (".rep 8; nop; .endr");
+ cond_resched();
+
+ /*
+ * We're back. However someone else might have
+ * decided to go write to the chip if we are in
+ * a suspended erase state. If so let's wait
+ * until it's done.
+ */
+ spin_lock(chip->mutex);
+ while (chip->state != FL_XIP_WHILE_ERASING) {
+ DECLARE_WAITQUEUE(wait, current);
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ add_wait_queue(&chip->wq, &wait);
+ spin_unlock(chip->mutex);
+ schedule();
+ remove_wait_queue(&chip->wq, &wait);
+ spin_lock(chip->mutex);
+ }
+ /* Disallow XIP again */
+ local_irq_disable();
+
+ /* Resume the write or erase operation */
+ map_write(map, CMD(0x30), adr);
+ chip->state = oldstate;
+ start = xip_currtime();
+ } else if (usec >= 1000000/HZ) {
+ /*
+ * Try to save on CPU power when waiting delay
+ * is at least a system timer tick period.
+ * No need to be extremely accurate here.
+ */
+ xip_cpu_idle();
+ }
+ status = map_read(map, adr);
+ } while (!map_word_andequal(map, status, OK, OK)
+ && xip_elapsed_since(start) < usec);
+}
+
+#define UDELAY(map, chip, adr, usec) xip_udelay(map, chip, adr, usec)
+
+/*
+ * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
+ * the flash is actively programming or erasing since we have to poll for
+ * the operation to complete anyway. We can't do that in a generic way with
+ * a XIP setup so do it before the actual flash operation in this case
+ * and stub it out from INVALIDATE_CACHE_UDELAY.
+ */
+#define XIP_INVAL_CACHED_RANGE(map, from, size) \
+ INVALIDATE_CACHED_RANGE(map, from, size)
+
+#define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec) \
+ UDELAY(map, chip, adr, usec)
+
+/*
+ * Extra notes:
+ *
+ * Activating this XIP support changes the way the code works a bit. For
+ * example the code to suspend the current process when concurrent access
+ * happens is never executed because xip_udelay() will always return with the
+ * same chip state as it was entered with. This is why there is no care for
+ * the presence of add_wait_queue() or schedule() calls from within a couple
+ * xip_disable()'d areas of code, like in do_erase_oneblock for example.
+ * The queueing and scheduling are always happening within xip_udelay().
+ *
+ * Similarly, get_chip() and put_chip() just happen to always be executed
+ * with chip->state set to FL_READY (or FL_XIP_WHILE_*) where flash state
+ * is in array mode, therefore never executing many cases therein and not
+ * causing any problem with XIP.
+ */
+
+#else
+
+#define xip_disable(map, chip, adr)
+#define xip_enable(map, chip, adr)
+#define XIP_INVAL_CACHED_RANGE(x...)
+
+#define UDELAY(map, chip, adr, usec) \
+do { \
+ spin_unlock(chip->mutex); \
+ cfi_udelay(usec); \
+ spin_lock(chip->mutex); \
+} while (0)
+
+#define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec) \
+do { \
+ spin_unlock(chip->mutex); \
+ INVALIDATE_CACHED_RANGE(map, adr, len); \
+ cfi_udelay(usec); \
+ spin_lock(chip->mutex); \
+} while (0)
+
#endif
- timeo = jiffies + HZ;
- goto retry;
- }
+static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
+{
+ unsigned long cmd_addr;
+ struct cfi_private *cfi = map->fldrv_priv;
+ int ret;
adr += chip->start;
- chip->state = FL_READY;
+ /* Ensure cmd read/writes are aligned. */
+ cmd_addr = adr & ~(map_bankwidth(map)-1);
- map->copy_from(map, buf, adr, len);
+ spin_lock(chip->mutex);
+ ret = get_chip(map, chip, cmd_addr, FL_READY);
+ if (ret) {
+ spin_unlock(chip->mutex);
+ return ret;
+ }
- wake_up(&chip->wq);
- cfi_spin_unlock(chip->mutex);
+ if (chip->state != FL_POINT && chip->state != FL_READY) {
+ map_write(map, CMD(0xf0), cmd_addr);
+ chip->state = FL_READY;
+ }
+ map_copy_from(map, buf, adr, len);
+
+ put_chip(map, chip, cmd_addr);
+
+ spin_unlock(chip->mutex);
return 0;
}
+
static int cfi_amdstd_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
{
struct map_info *map = mtd->priv;
@@ -323,95 +844,218 @@ static int cfi_amdstd_read (struct mtd_info *mtd, loff_t from, size_t len, size_
return ret;
}
-static int do_write_oneword(struct map_info *map, struct flchip *chip, unsigned long adr, __u32 datum, int fast)
+
+static inline int do_read_secsi_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
{
+ DECLARE_WAITQUEUE(wait, current);
unsigned long timeo = jiffies + HZ;
- unsigned int Last[4];
- unsigned long Count = 0;
struct cfi_private *cfi = map->fldrv_priv;
- DECLARE_WAITQUEUE(wait, current);
- int ret = 0;
retry:
- cfi_spin_lock(chip->mutex);
+ spin_lock(chip->mutex);
if (chip->state != FL_READY){
#if 0
- printk(KERN_DEBUG "Waiting for chip to write, status = %d\n", chip->state);
+ printk(KERN_DEBUG "Waiting for chip to read, status = %d\n", chip->state);
#endif
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
-
- cfi_spin_unlock(chip->mutex);
+
+ spin_unlock(chip->mutex);
schedule();
remove_wait_queue(&chip->wq, &wait);
#if 0
- printk(KERN_DEBUG "Wake up to write:\n");
if(signal_pending(current))
return -EINTR;
#endif
timeo = jiffies + HZ;
goto retry;
- }
+ }
- chip->state = FL_WRITING;
+ adr += chip->start;
+
+ chip->state = FL_READY;
+
+ cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
+ cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
+ cfi_send_gen_cmd(0x88, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
+
+ map_copy_from(map, buf, adr, len);
+
+ cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
+ cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
+ cfi_send_gen_cmd(0x90, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
+ cfi_send_gen_cmd(0x00, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
+
+ wake_up(&chip->wq);
+ spin_unlock(chip->mutex);
+
+ return 0;
+}
+
+static int cfi_amdstd_secsi_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
+{
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+ unsigned long ofs;
+ int chipnum;
+ int ret = 0;
+
+
+ /* ofs: offset within the first chip that the first read should start */
+
+ /* 8 secsi bytes per chip */
+ chipnum=from>>3;
+ ofs=from & 7;
+
+
+ *retlen = 0;
+
+ while (len) {
+ unsigned long thislen;
+
+ if (chipnum >= cfi->numchips)
+ break;
+
+ if ((len + ofs -1) >> 3)
+ thislen = (1<<3) - ofs;
+ else
+ thislen = len;
+
+ ret = do_read_secsi_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
+ if (ret)
+ break;
+
+ *retlen += thislen;
+ len -= thislen;
+ buf += thislen;
+
+ ofs = 0;
+ chipnum++;
+ }
+ return ret;
+}
+
+
+static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip, unsigned long adr, map_word datum)
+{
+ struct cfi_private *cfi = map->fldrv_priv;
+ unsigned long timeo = jiffies + HZ;
+ /*
+ * We use a 1ms + 1 jiffies generic timeout for writes (most devices
+ * have a max write time of a few hundreds usec). However, we should
+ * use the maximum timeout value given by the chip at probe time
+ * instead. Unfortunately, struct flchip does have a field for
+ * maximum timeout, only for typical which can be far too short
+ * depending of the conditions. The ' + 1' is to avoid having a
+ * timeout of 0 jiffies if HZ is smaller than 1000.
+ */
+ unsigned long uWriteTimeout = ( HZ / 1000 ) + 1;
+ int ret = 0;
+ map_word oldd;
+ int retry_cnt = 0;
adr += chip->start;
+
+ spin_lock(chip->mutex);
+ ret = get_chip(map, chip, adr, FL_WRITING);
+ if (ret) {
+ spin_unlock(chip->mutex);
+ return ret;
+ }
+
+ DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n",
+ __func__, adr, datum.x[0] );
+
+ /*
+ * Check for a NOP for the case when the datum to write is already
+ * present - it saves time and works around buggy chips that corrupt
+ * data at other locations when 0xff is written to a location that
+ * already contains 0xff.
+ */
+ oldd = map_read(map, adr);
+ if (map_word_equal(map, oldd, datum)) {
+ DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): NOP\n",
+ __func__);
+ goto op_done;
+ }
+
+ XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
ENABLE_VPP(map);
- if (fast) { /* Unlock bypass */
- cfi_send_gen_cmd(0xA0, 0, chip->start, map, cfi, cfi->device_type, NULL);
- }
- else {
- cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, CFI_DEVICETYPE_X8, NULL);
- cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, CFI_DEVICETYPE_X8, NULL);
- cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, CFI_DEVICETYPE_X8, NULL);
- }
-
- cfi_write(map, datum, adr);
-
- cfi_spin_unlock(chip->mutex);
- cfi_udelay(chip->word_write_time);
- cfi_spin_lock(chip->mutex);
-
- Last[0] = cfi_read(map, adr);
- // printk("Last[0] is %x\n", Last[0]);
- Last[1] = cfi_read(map, adr);
- // printk("Last[1] is %x\n", Last[1]);
- Last[2] = cfi_read(map, adr);
- // printk("Last[2] is %x\n", Last[2]);
-
- for (Count = 3; Last[(Count - 1) % 4] != datum && Count < 500000; Count++){
- cfi_spin_unlock(chip->mutex);
- cfi_udelay(10);
- cfi_spin_lock(chip->mutex);
-
- Last[Count % 4] = cfi_read(map, adr);
- // printk("Last[%d%%4] is %x\n", Count, Last[Count%4]);
- }
-
- if (Last[(Count - 1) % 4] != datum){
- printk(KERN_WARNING "Last[%ld] is %x, datum is %x\n",(Count - 1) % 4,Last[(Count - 1) % 4],datum);
- cfi_send_gen_cmd(0xF0, 0, chip->start, map, cfi, cfi->device_type, NULL);
- DISABLE_VPP(map);
+ xip_disable(map, chip, adr);
+ retry:
+ cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
+ cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
+ cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
+ map_write(map, datum, adr);
+ chip->state = FL_WRITING;
+
+ INVALIDATE_CACHE_UDELAY(map, chip,
+ adr, map_bankwidth(map),
+ chip->word_write_time);
+
+ /* See comment above for timeout value. */
+ timeo = jiffies + uWriteTimeout;
+ for (;;) {
+ if (chip->state != FL_WRITING) {
+ /* Someone's suspended the write. Sleep */
+ DECLARE_WAITQUEUE(wait, current);
+
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ add_wait_queue(&chip->wq, &wait);
+ spin_unlock(chip->mutex);
+ schedule();
+ remove_wait_queue(&chip->wq, &wait);
+ timeo = jiffies + (HZ / 2); /* FIXME */
+ spin_lock(chip->mutex);
+ continue;
+ }
+
+ if (time_after(jiffies, timeo) && !chip_ready(map, adr)){
+ xip_enable(map, chip, adr);
+ printk(KERN_WARNING "MTD %s(): software timeout\n", __func__);
+ xip_disable(map, chip, adr);
+ break;
+ }
+
+ if (chip_ready(map, adr))
+ break;
+
+ /* Latency issues. Drop the lock, wait a while and retry */
+ UDELAY(map, chip, adr, 1);
+ }
+ /* Did we succeed? */
+ if (!chip_good(map, adr, datum)) {
+ /* reset on all failures. */
+ map_write( map, CMD(0xF0), chip->start );
+ /* FIXME - should have reset delay before continuing */
+
+ if (++retry_cnt <= MAX_WORD_RETRIES)
+ goto retry;
+
ret = -EIO;
- }
- DISABLE_VPP(map);
+ }
+ xip_enable(map, chip, adr);
+ op_done:
chip->state = FL_READY;
- wake_up(&chip->wq);
- cfi_spin_unlock(chip->mutex);
-
+ put_chip(map, chip, adr);
+ spin_unlock(chip->mutex);
+
return ret;
}
-static int cfi_amdstd_write (struct mtd_info *mtd, loff_t to , size_t len, size_t *retlen, const u_char *buf)
+
+static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t *retlen, const u_char *buf)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
int ret = 0;
int chipnum;
unsigned long ofs, chipstart;
+ DECLARE_WAITQUEUE(wait, current);
*retlen = 0;
if (!len)
@@ -422,372 +1066,548 @@ static int cfi_amdstd_write (struct mtd_info *mtd, loff_t to , size_t len, size_
chipstart = cfi->chips[chipnum].start;
/* If it's not bus-aligned, do the first byte write */
- if (ofs & (CFIDEV_BUSWIDTH-1)) {
- unsigned long bus_ofs = ofs & ~(CFIDEV_BUSWIDTH-1);
+ if (ofs & (map_bankwidth(map)-1)) {
+ unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
int i = ofs - bus_ofs;
int n = 0;
- u_char tmp_buf[4];
- __u32 datum;
+ map_word tmp_buf;
- map->copy_from(map, tmp_buf, bus_ofs + cfi->chips[chipnum].start, CFIDEV_BUSWIDTH);
- while (len && i < CFIDEV_BUSWIDTH)
- tmp_buf[i++] = buf[n++], len--;
+ retry:
+ spin_lock(cfi->chips[chipnum].mutex);
- if (cfi_buswidth_is_2()) {
- datum = *(__u16*)tmp_buf;
- } else if (cfi_buswidth_is_4()) {
- datum = *(__u32*)tmp_buf;
- } else {
- return -EINVAL; /* should never happen, but be safe */
+ if (cfi->chips[chipnum].state != FL_READY) {
+#if 0
+ printk(KERN_DEBUG "Waiting for chip to write, status = %d\n", cfi->chips[chipnum].state);
+#endif
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ add_wait_queue(&cfi->chips[chipnum].wq, &wait);
+
+ spin_unlock(cfi->chips[chipnum].mutex);
+
+ schedule();
+ remove_wait_queue(&cfi->chips[chipnum].wq, &wait);
+#if 0
+ if(signal_pending(current))
+ return -EINTR;
+#endif
+ goto retry;
}
- ret = do_write_oneword(map, &cfi->chips[chipnum],
- bus_ofs, datum, 0);
- if (ret)
+ /* Load 'tmp_buf' with old contents of flash */
+ tmp_buf = map_read(map, bus_ofs+chipstart);
+
+ spin_unlock(cfi->chips[chipnum].mutex);
+
+ /* Number of bytes to copy from buffer */
+ n = min_t(int, len, map_bankwidth(map)-i);
+
+ tmp_buf = map_word_load_partial(map, tmp_buf, buf, i, n);
+
+ ret = do_write_oneword(map, &cfi->chips[chipnum],
+ bus_ofs, tmp_buf);
+ if (ret)
return ret;
-
+
ofs += n;
buf += n;
(*retlen) += n;
+ len -= n;
if (ofs >> cfi->chipshift) {
- chipnum ++;
+ chipnum ++;
ofs = 0;
if (chipnum == cfi->numchips)
return 0;
}
}
-
- /* Go into unlock bypass mode */
- if (cfi->fast_prog){
- cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chipstart, map, cfi, CFI_DEVICETYPE_X8, NULL);
- cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chipstart, map, cfi, CFI_DEVICETYPE_X8, NULL);
- cfi_send_gen_cmd(0x20, cfi->addr_unlock1, chipstart, map, cfi, CFI_DEVICETYPE_X8, NULL);
- }
/* We are now aligned, write as much as possible */
- while(len >= CFIDEV_BUSWIDTH) {
- __u32 datum;
-
- if (cfi_buswidth_is_1()) {
- datum = *(__u8*)buf;
- } else if (cfi_buswidth_is_2()) {
- datum = *(__u16*)buf;
- } else if (cfi_buswidth_is_4()) {
- datum = *(__u32*)buf;
- } else {
- return -EINVAL;
- }
+ while(len >= map_bankwidth(map)) {
+ map_word datum;
+
+ datum = map_word_load(map, buf);
+
ret = do_write_oneword(map, &cfi->chips[chipnum],
- ofs, datum, cfi->fast_prog);
- if (ret) {
- if (cfi->fast_prog){
- /* Get out of unlock bypass mode */
- cfi_send_gen_cmd(0x90, 0, chipstart, map, cfi, cfi->device_type, NULL);
- cfi_send_gen_cmd(0x00, 0, chipstart, map, cfi, cfi->device_type, NULL);
- }
+ ofs, datum);
+ if (ret)
return ret;
- }
- ofs += CFIDEV_BUSWIDTH;
- buf += CFIDEV_BUSWIDTH;
- (*retlen) += CFIDEV_BUSWIDTH;
- len -= CFIDEV_BUSWIDTH;
+ ofs += map_bankwidth(map);
+ buf += map_bankwidth(map);
+ (*retlen) += map_bankwidth(map);
+ len -= map_bankwidth(map);
if (ofs >> cfi->chipshift) {
- if (cfi->fast_prog){
- /* Get out of unlock bypass mode */
- cfi_send_gen_cmd(0x90, 0, chipstart, map, cfi, cfi->device_type, NULL);
- cfi_send_gen_cmd(0x00, 0, chipstart, map, cfi, cfi->device_type, NULL);
- }
-
- chipnum ++;
+ chipnum ++;
ofs = 0;
if (chipnum == cfi->numchips)
return 0;
chipstart = cfi->chips[chipnum].start;
- if (cfi->fast_prog){
- /* Go into unlock bypass mode for next set of chips */
- cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chipstart, map, cfi, CFI_DEVICETYPE_X8, NULL);
- cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chipstart, map, cfi, CFI_DEVICETYPE_X8, NULL);
- cfi_send_gen_cmd(0x20, cfi->addr_unlock1, chipstart, map, cfi, CFI_DEVICETYPE_X8, NULL);
- }
}
}
- if (cfi->fast_prog){
- /* Get out of unlock bypass mode */
- cfi_send_gen_cmd(0x90, 0, chipstart, map, cfi, cfi->device_type, NULL);
- cfi_send_gen_cmd(0x00, 0, chipstart, map, cfi, cfi->device_type, NULL);
- }
+ /* Write the trailing bytes if any */
+ if (len & (map_bankwidth(map)-1)) {
+ map_word tmp_buf;
- if (len & (CFIDEV_BUSWIDTH-1)) {
- int i = 0, n = 0;
- u_char tmp_buf[4];
- __u32 datum;
+ retry1:
+ spin_lock(cfi->chips[chipnum].mutex);
- map->copy_from(map, tmp_buf, ofs + cfi->chips[chipnum].start, CFIDEV_BUSWIDTH);
- while (len--)
- tmp_buf[i++] = buf[n++];
+ if (cfi->chips[chipnum].state != FL_READY) {
+#if 0
+ printk(KERN_DEBUG "Waiting for chip to write, status = %d\n", cfi->chips[chipnum].state);
+#endif
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ add_wait_queue(&cfi->chips[chipnum].wq, &wait);
- if (cfi_buswidth_is_2()) {
- datum = *(__u16*)tmp_buf;
- } else if (cfi_buswidth_is_4()) {
- datum = *(__u32*)tmp_buf;
- } else {
- return -EINVAL; /* should never happen, but be safe */
+ spin_unlock(cfi->chips[chipnum].mutex);
+
+ schedule();
+ remove_wait_queue(&cfi->chips[chipnum].wq, &wait);
+#if 0
+ if(signal_pending(current))
+ return -EINTR;
+#endif
+ goto retry1;
}
- ret = do_write_oneword(map, &cfi->chips[chipnum],
- ofs, datum, 0);
- if (ret)
+ tmp_buf = map_read(map, ofs + chipstart);
+
+ spin_unlock(cfi->chips[chipnum].mutex);
+
+ tmp_buf = map_word_load_partial(map, tmp_buf, buf, 0, len);
+
+ ret = do_write_oneword(map, &cfi->chips[chipnum],
+ ofs, tmp_buf);
+ if (ret)
return ret;
-
- (*retlen) += n;
+
+ (*retlen) += len;
}
return 0;
}
-static inline int do_erase_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr)
+
+/*
+ * FIXME: interleaved mode not tested, and probably not supported!
+ */
+static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
+ unsigned long adr, const u_char *buf,
+ int len)
{
- unsigned int status;
- unsigned long timeo = jiffies + HZ;
struct cfi_private *cfi = map->fldrv_priv;
- unsigned int rdy_mask;
- DECLARE_WAITQUEUE(wait, current);
-
- retry:
- cfi_spin_lock(chip->mutex);
+ unsigned long timeo = jiffies + HZ;
+ /* see comments in do_write_oneword() regarding uWriteTimeo. */
+ unsigned long uWriteTimeout = ( HZ / 1000 ) + 1;
+ int ret = -EIO;
+ unsigned long cmd_adr;
+ int z, words;
+ map_word datum;
- if (chip->state != FL_READY){
- set_current_state(TASK_UNINTERRUPTIBLE);
- add_wait_queue(&chip->wq, &wait);
-
- cfi_spin_unlock(chip->mutex);
+ adr += chip->start;
+ cmd_adr = adr;
- schedule();
- remove_wait_queue(&chip->wq, &wait);
-#if 0
- if(signal_pending(current))
- return -EINTR;
-#endif
- timeo = jiffies + HZ;
+ spin_lock(chip->mutex);
+ ret = get_chip(map, chip, adr, FL_WRITING);
+ if (ret) {
+ spin_unlock(chip->mutex);
+ return ret;
+ }
- goto retry;
- }
+ datum = map_word_load(map, buf);
- chip->state = FL_ERASING;
+ DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n",
+ __func__, adr, datum.x[0] );
- adr += chip->start;
+ XIP_INVAL_CACHED_RANGE(map, adr, len);
ENABLE_VPP(map);
- cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, CFI_DEVICETYPE_X8, NULL);
- cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, CFI_DEVICETYPE_X8, NULL);
- cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, CFI_DEVICETYPE_X8, NULL);
- cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, CFI_DEVICETYPE_X8, NULL);
- cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, CFI_DEVICETYPE_X8, NULL);
- cfi_write(map, CMD(0x30), adr);
-
- timeo = jiffies + (HZ*20);
+ xip_disable(map, chip, cmd_adr);
- cfi_spin_unlock(chip->mutex);
- schedule_timeout(HZ);
- cfi_spin_lock(chip->mutex);
-
- rdy_mask = CMD(0x80);
+ cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
+ cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
+ //cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
- /* FIXME. Use a timer to check this, and return immediately. */
- /* Once the state machine's known to be working I'll do that */
+ /* Write Buffer Load */
+ map_write(map, CMD(0x25), cmd_adr);
- while ( ( (status = cfi_read(map,adr)) & rdy_mask ) != rdy_mask ) {
- static int z=0;
+ chip->state = FL_WRITING_TO_BUFFER;
+
+ /* Write length of data to come */
+ words = len / map_bankwidth(map);
+ map_write(map, CMD(words - 1), cmd_adr);
+ /* Write data */
+ z = 0;
+ while(z < words * map_bankwidth(map)) {
+ datum = map_word_load(map, buf);
+ map_write(map, datum, adr + z);
+
+ z += map_bankwidth(map);
+ buf += map_bankwidth(map);
+ }
+ z -= map_bankwidth(map);
+
+ adr += z;
+
+ /* Write Buffer Program Confirm: GO GO GO */
+ map_write(map, CMD(0x29), cmd_adr);
+ chip->state = FL_WRITING;
+
+ INVALIDATE_CACHE_UDELAY(map, chip,
+ adr, map_bankwidth(map),
+ chip->word_write_time);
+
+ timeo = jiffies + uWriteTimeout;
+
+ for (;;) {
+ if (chip->state != FL_WRITING) {
+ /* Someone's suspended the write. Sleep */
+ DECLARE_WAITQUEUE(wait, current);
- if (chip->state != FL_ERASING) {
- /* Someone's suspended the erase. Sleep */
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
-
- cfi_spin_unlock(chip->mutex);
- printk(KERN_DEBUG "erase suspended. Sleeping\n");
-
+ spin_unlock(chip->mutex);
schedule();
remove_wait_queue(&chip->wq, &wait);
-#if 0
- if (signal_pending(current))
- return -EINTR;
-#endif
- timeo = jiffies + (HZ*2); /* FIXME */
- cfi_spin_lock(chip->mutex);
+ timeo = jiffies + (HZ / 2); /* FIXME */
+ spin_lock(chip->mutex);
continue;
}
- /* OK Still waiting */
- if (time_after(jiffies, timeo)) {
- chip->state = FL_READY;
- cfi_spin_unlock(chip->mutex);
- printk(KERN_WARNING "waiting for erase to complete timed out.");
- DISABLE_VPP(map);
- return -EIO;
+ if (time_after(jiffies, timeo) && !chip_ready(map, adr))
+ break;
+
+ if (chip_ready(map, adr)) {
+ xip_enable(map, chip, adr);
+ goto op_done;
}
-
+
/* Latency issues. Drop the lock, wait a while and retry */
- cfi_spin_unlock(chip->mutex);
+ UDELAY(map, chip, adr, 1);
+ }
- z++;
- if ( 0 && !(z % 100 ))
- printk(KERN_WARNING "chip not ready yet after erase. looping\n");
+ /* reset on all failures. */
+ map_write( map, CMD(0xF0), chip->start );
+ xip_enable(map, chip, adr);
+ /* FIXME - should have reset delay before continuing */
- cfi_udelay(1);
-
- cfi_spin_lock(chip->mutex);
- continue;
- }
-
- /* Done and happy. */
- DISABLE_VPP(map);
+ printk(KERN_WARNING "MTD %s(): software timeout\n",
+ __func__ );
+
+ ret = -EIO;
+ op_done:
chip->state = FL_READY;
- wake_up(&chip->wq);
- cfi_spin_unlock(chip->mutex);
- return 0;
+ put_chip(map, chip, adr);
+ spin_unlock(chip->mutex);
+
+ return ret;
}
-static int cfi_amdstd_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
+
+static int cfi_amdstd_write_buffers(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t *retlen, const u_char *buf)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
- unsigned long adr, len;
- int chipnum, ret = 0;
- int i, first;
- struct mtd_erase_region_info *regions = mtd->eraseregions;
+ int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
+ int ret = 0;
+ int chipnum;
+ unsigned long ofs;
- if (instr->addr > mtd->size)
- return -EINVAL;
+ *retlen = 0;
+ if (!len)
+ return 0;
- if ((instr->len + instr->addr) > mtd->size)
- return -EINVAL;
+ chipnum = to >> cfi->chipshift;
+ ofs = to - (chipnum << cfi->chipshift);
- /* Check that both start and end of the requested erase are
- * aligned with the erasesize at the appropriate addresses.
- */
+ /* If it's not bus-aligned, do the first word write */
+ if (ofs & (map_bankwidth(map)-1)) {
+ size_t local_len = (-ofs)&(map_bankwidth(map)-1);
+ if (local_len > len)
+ local_len = len;
+ ret = cfi_amdstd_write_words(mtd, ofs + (chipnum<<cfi->chipshift),
+ local_len, retlen, buf);
+ if (ret)
+ return ret;
+ ofs += local_len;
+ buf += local_len;
+ len -= local_len;
- i = 0;
-
- /* Skip all erase regions which are ended before the start of
- the requested erase. Actually, to save on the calculations,
- we skip to the first erase region which starts after the
- start of the requested erase, and then go back one.
- */
-
- while (i < mtd->numeraseregions && instr->addr >= regions[i].offset)
- i++;
- i--;
-
- /* OK, now i is pointing at the erase region in which this
- erase request starts. Check the start of the requested
- erase range is aligned with the erase size which is in
- effect here.
- */
-
- if (instr->addr & (regions[i].erasesize-1))
- return -EINVAL;
+ if (ofs >> cfi->chipshift) {
+ chipnum ++;
+ ofs = 0;
+ if (chipnum == cfi->numchips)
+ return 0;
+ }
+ }
- /* Remember the erase region we start on */
- first = i;
+ /* Write buffer is worth it only if more than one word to write... */
+ while (len >= map_bankwidth(map) * 2) {
+ /* We must not cross write block boundaries */
+ int size = wbufsize - (ofs & (wbufsize-1));
- /* Next, check that the end of the requested erase is aligned
- * with the erase region at that address.
- */
+ if (size > len)
+ size = len;
+ if (size % map_bankwidth(map))
+ size -= size % map_bankwidth(map);
- while (i<mtd->numeraseregions && (instr->addr + instr->len) >= regions[i].offset)
- i++;
+ ret = do_write_buffer(map, &cfi->chips[chipnum],
+ ofs, buf, size);
+ if (ret)
+ return ret;
- /* As before, drop back one to point at the region in which
- the address actually falls
- */
- i--;
-
- if ((instr->addr + instr->len) & (regions[i].erasesize-1))
- return -EINVAL;
-
- chipnum = instr->addr >> cfi->chipshift;
- adr = instr->addr - (chipnum << cfi->chipshift);
- len = instr->len;
+ ofs += size;
+ buf += size;
+ (*retlen) += size;
+ len -= size;
- i=first;
+ if (ofs >> cfi->chipshift) {
+ chipnum ++;
+ ofs = 0;
+ if (chipnum == cfi->numchips)
+ return 0;
+ }
+ }
- while(len) {
- ret = do_erase_oneblock(map, &cfi->chips[chipnum], adr);
+ if (len) {
+ size_t retlen_dregs = 0;
- if (ret)
- return ret;
+ ret = cfi_amdstd_write_words(mtd, ofs + (chipnum<<cfi->chipshift),
+ len, &retlen_dregs, buf);
+
+ *retlen += retlen_dregs;
+ return ret;
+ }
+
+ return 0;
+}
+
+
+/*
+ * Handle devices with one erase region, that only implement
+ * the chip erase command.
+ */
+static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip)
+{
+ struct cfi_private *cfi = map->fldrv_priv;
+ unsigned long timeo = jiffies + HZ;
+ unsigned long int adr;
+ DECLARE_WAITQUEUE(wait, current);
+ int ret = 0;
+
+ adr = cfi->addr_unlock1;
+
+ spin_lock(chip->mutex);
+ ret = get_chip(map, chip, adr, FL_WRITING);
+ if (ret) {
+ spin_unlock(chip->mutex);
+ return ret;
+ }
+
+ DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): ERASE 0x%.8lx\n",
+ __func__, chip->start );
+
+ XIP_INVAL_CACHED_RANGE(map, adr, map->size);
+ ENABLE_VPP(map);
+ xip_disable(map, chip, adr);
+
+ cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
+ cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
+ cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
+ cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
+ cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
+ cfi_send_gen_cmd(0x10, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
- adr += regions[i].erasesize;
- len -= regions[i].erasesize;
+ chip->state = FL_ERASING;
+ chip->erase_suspended = 0;
+ chip->in_progress_block_addr = adr;
+
+ INVALIDATE_CACHE_UDELAY(map, chip,
+ adr, map->size,
+ chip->erase_time*500);
- if (adr % (1<< cfi->chipshift) == ((regions[i].offset + (regions[i].erasesize * regions[i].numblocks)) %( 1<< cfi->chipshift)))
- i++;
+ timeo = jiffies + (HZ*20);
- if (adr >> cfi->chipshift) {
- adr = 0;
- chipnum++;
-
- if (chipnum >= cfi->numchips)
+ for (;;) {
+ if (chip->state != FL_ERASING) {
+ /* Someone's suspended the erase. Sleep */
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ add_wait_queue(&chip->wq, &wait);
+ spin_unlock(chip->mutex);
+ schedule();
+ remove_wait_queue(&chip->wq, &wait);
+ spin_lock(chip->mutex);
+ continue;
+ }
+ if (chip->erase_suspended) {
+ /* This erase was suspended and resumed.
+ Adjust the timeout */
+ timeo = jiffies + (HZ*20); /* FIXME */
+ chip->erase_suspended = 0;
+ }
+
+ if (chip_ready(map, adr))
+ break;
+
+ if (time_after(jiffies, timeo)) {
+ printk(KERN_WARNING "MTD %s(): software timeout\n",
+ __func__ );
break;
}
+
+ /* Latency issues. Drop the lock, wait a while and retry */
+ UDELAY(map, chip, adr, 1000000/HZ);
}
+ /* Did we succeed? */
+ if (!chip_good(map, adr, map_word_ff(map))) {
+ /* reset on all failures. */
+ map_write( map, CMD(0xF0), chip->start );
+ /* FIXME - should have reset delay before continuing */
- instr->state = MTD_ERASE_DONE;
- if (instr->callback)
- instr->callback(instr);
-
- return 0;
+ ret = -EIO;
+ }
+
+ chip->state = FL_READY;
+ xip_enable(map, chip, adr);
+ put_chip(map, chip, adr);
+ spin_unlock(chip->mutex);
+
+ return ret;
}
-static int cfi_amdstd_erase_onesize(struct mtd_info *mtd, struct erase_info *instr)
+
+static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr, int len, void *thunk)
{
- struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
- unsigned long adr, len;
- int chipnum, ret = 0;
+ unsigned long timeo = jiffies + HZ;
+ DECLARE_WAITQUEUE(wait, current);
+ int ret = 0;
- if (instr->addr & (mtd->erasesize - 1))
- return -EINVAL;
+ adr += chip->start;
- if (instr->len & (mtd->erasesize -1))
- return -EINVAL;
+ spin_lock(chip->mutex);
+ ret = get_chip(map, chip, adr, FL_ERASING);
+ if (ret) {
+ spin_unlock(chip->mutex);
+ return ret;
+ }
- if ((instr->len + instr->addr) > mtd->size)
- return -EINVAL;
+ DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): ERASE 0x%.8lx\n",
+ __func__, adr );
- chipnum = instr->addr >> cfi->chipshift;
- adr = instr->addr - (chipnum << cfi->chipshift);
- len = instr->len;
+ XIP_INVAL_CACHED_RANGE(map, adr, len);
+ ENABLE_VPP(map);
+ xip_disable(map, chip, adr);
- while(len) {
- ret = do_erase_oneblock(map, &cfi->chips[chipnum], adr);
+ cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
+ cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
+ cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
+ cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
+ cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
+ map_write(map, CMD(0x30), adr);
- if (ret)
- return ret;
+ chip->state = FL_ERASING;
+ chip->erase_suspended = 0;
+ chip->in_progress_block_addr = adr;
- adr += mtd->erasesize;
- len -= mtd->erasesize;
+ INVALIDATE_CACHE_UDELAY(map, chip,
+ adr, len,
+ chip->erase_time*500);
- if (adr >> cfi->chipshift) {
- adr = 0;
- chipnum++;
-
- if (chipnum >= cfi->numchips)
+ timeo = jiffies + (HZ*20);
+
+ for (;;) {
+ if (chip->state != FL_ERASING) {
+ /* Someone's suspended the erase. Sleep */
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ add_wait_queue(&chip->wq, &wait);
+ spin_unlock(chip->mutex);
+ schedule();
+ remove_wait_queue(&chip->wq, &wait);
+ spin_lock(chip->mutex);
+ continue;
+ }
+ if (chip->erase_suspended) {
+ /* This erase was suspended and resumed.
+ Adjust the timeout */
+ timeo = jiffies + (HZ*20); /* FIXME */
+ chip->erase_suspended = 0;
+ }
+
+ if (chip_ready(map, adr)) {
+ xip_enable(map, chip, adr);
break;
}
+
+ if (time_after(jiffies, timeo)) {
+ xip_enable(map, chip, adr);
+ printk(KERN_WARNING "MTD %s(): software timeout\n",
+ __func__ );
+ break;
+ }
+
+ /* Latency issues. Drop the lock, wait a while and retry */
+ UDELAY(map, chip, adr, 1000000/HZ);
}
-
+ /* Did we succeed? */
+ if (!chip_good(map, adr, map_word_ff(map))) {
+ /* reset on all failures. */
+ map_write( map, CMD(0xF0), chip->start );
+ /* FIXME - should have reset delay before continuing */
+
+ ret = -EIO;
+ }
+
+ chip->state = FL_READY;
+ put_chip(map, chip, adr);
+ spin_unlock(chip->mutex);
+ return ret;
+}
+
+
+int cfi_amdstd_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
+{
+ unsigned long ofs, len;
+ int ret;
+
+ ofs = instr->addr;
+ len = instr->len;
+
+ ret = cfi_varsize_frob(mtd, do_erase_oneblock, ofs, len, NULL);
+ if (ret)
+ return ret;
+
+ instr->state = MTD_ERASE_DONE;
+ mtd_erase_callback(instr);
+
+ return 0;
+}
+
+
+static int cfi_amdstd_erase_chip(struct mtd_info *mtd, struct erase_info *instr)
+{
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+ int ret = 0;
+
+ if (instr->addr != 0)
+ return -EINVAL;
+
+ if (instr->len != mtd->size)
+ return -EINVAL;
+
+ ret = do_erase_chip(map, &cfi->chips[0]);
+ if (ret)
+ return ret;
+
instr->state = MTD_ERASE_DONE;
- if (instr->callback)
- instr->callback(instr);
-
+ mtd_erase_callback(instr);
+
return 0;
}
+
static void cfi_amdstd_sync (struct mtd_info *mtd)
{
struct map_info *map = mtd->priv;
@@ -801,7 +1621,7 @@ static void cfi_amdstd_sync (struct mtd_info *mtd)
chip = &cfi->chips[i];
retry:
- cfi_spin_lock(chip->mutex);
+ spin_lock(chip->mutex);
switch(chip->state) {
case FL_READY:
@@ -810,24 +1630,24 @@ static void cfi_amdstd_sync (struct mtd_info *mtd)
case FL_JEDEC_QUERY:
chip->oldstate = chip->state;
chip->state = FL_SYNCING;
- /* No need to wake_up() on this state change -
+ /* No need to wake_up() on this state change -
* as the whole point is that nobody can do anything
* with the chip now anyway.
*/
case FL_SYNCING:
- cfi_spin_unlock(chip->mutex);
+ spin_unlock(chip->mutex);
break;
default:
/* Not an idle state */
add_wait_queue(&chip->wq, &wait);
-
- cfi_spin_unlock(chip->mutex);
+
+ spin_unlock(chip->mutex);
schedule();
- remove_wait_queue(&chip->wq, &wait);
-
+ remove_wait_queue(&chip->wq, &wait);
+
goto retry;
}
}
@@ -837,13 +1657,13 @@ static void cfi_amdstd_sync (struct mtd_info *mtd)
for (i--; i >=0; i--) {
chip = &cfi->chips[i];
- cfi_spin_lock(chip->mutex);
-
+ spin_lock(chip->mutex);
+
if (chip->state == FL_SYNCING) {
chip->state = chip->oldstate;
wake_up(&chip->wq);
}
- cfi_spin_unlock(chip->mutex);
+ spin_unlock(chip->mutex);
}
}
@@ -855,12 +1675,11 @@ static int cfi_amdstd_suspend(struct mtd_info *mtd)
int i;
struct flchip *chip;
int ret = 0;
-//printk("suspend\n");
for (i=0; !ret && i<cfi->numchips; i++) {
chip = &cfi->chips[i];
- cfi_spin_lock(chip->mutex);
+ spin_lock(chip->mutex);
switch(chip->state) {
case FL_READY:
@@ -869,7 +1688,7 @@ static int cfi_amdstd_suspend(struct mtd_info *mtd)
case FL_JEDEC_QUERY:
chip->oldstate = chip->state;
chip->state = FL_PM_SUSPENDED;
- /* No need to wake_up() on this state change -
+ /* No need to wake_up() on this state change -
* as the whole point is that nobody can do anything
* with the chip now anyway.
*/
@@ -880,51 +1699,51 @@ static int cfi_amdstd_suspend(struct mtd_info *mtd)
ret = -EAGAIN;
break;
}
- cfi_spin_unlock(chip->mutex);
+ spin_unlock(chip->mutex);
}
/* Unlock the chips again */
if (ret) {
- for (i--; i >=0; i--) {
+ for (i--; i >=0; i--) {
chip = &cfi->chips[i];
- cfi_spin_lock(chip->mutex);
-
+ spin_lock(chip->mutex);
+
if (chip->state == FL_PM_SUSPENDED) {
chip->state = chip->oldstate;
wake_up(&chip->wq);
}
- cfi_spin_unlock(chip->mutex);
+ spin_unlock(chip->mutex);
}
}
-
+
return ret;
}
+
static void cfi_amdstd_resume(struct mtd_info *mtd)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
int i;
struct flchip *chip;
-//printk("resume\n");
for (i=0; i<cfi->numchips; i++) {
-
+
chip = &cfi->chips[i];
- cfi_spin_lock(chip->mutex);
-
+ spin_lock(chip->mutex);
+
if (chip->state == FL_PM_SUSPENDED) {
chip->state = FL_READY;
- cfi_write(map, CMD(0xF0), chip->start);
+ map_write(map, CMD(0xF0), chip->start);
wake_up(&chip->wq);
}
else
printk(KERN_ERR "Argh. Chip not in PM_SUSPENDED state upon resume()\n");
- cfi_spin_unlock(chip->mutex);
+ spin_unlock(chip->mutex);
}
}
@@ -932,23 +1751,29 @@ static void cfi_amdstd_destroy(struct mtd_info *mtd)
{
struct map_info *map = mtd->priv;
struct cfi_private *cfi = map->fldrv_priv;
+
kfree(cfi->cmdset_priv);
+ kfree(cfi->cfiq);
kfree(cfi);
+ kfree(mtd->eraseregions);
}
static char im_name[]="cfi_cmdset_0002";
-int __init cfi_amdstd_init(void)
+
+static int __init cfi_amdstd_init(void)
{
inter_module_register(im_name, THIS_MODULE, &cfi_cmdset_0002);
return 0;
}
+
static void __exit cfi_amdstd_exit(void)
{
inter_module_unregister(im_name);
}
+
module_init(cfi_amdstd_init);
module_exit(cfi_amdstd_exit);
diff --git a/linux-2.4.x/drivers/mtd/chips/cfi_cmdset_0020.c b/linux-2.4.x/drivers/mtd/chips/cfi_cmdset_0020.c
new file mode 100644
index 0000000..392627f
--- /dev/null
+++ b/linux-2.4.x/drivers/mtd/chips/cfi_cmdset_0020.c
@@ -0,0 +1,1429 @@
+/*
+ * Common Flash Interface support:
+ * ST Advanced Architecture Command Set (ID 0x0020)
+ *
+ * (C) 2000 Red Hat. GPL'd
+ *
+ * $Id: cfi_cmdset_0020.c,v 1.23 2005/11/29 20:01:26 gleixner Exp $
+ *
+ * 10/10/2000 Nicolas Pitre <nico@cam.org>
+ * - completely revamped method functions so they are aware and
+ * independent of the flash geometry (buswidth, interleave, etc.)
+ * - scalability vs code size is completely set at compile-time
+ * (see include/linux/mtd/cfi.h for selection)
+ * - optimized write buffer method
+ * 06/21/2002 Joern Engel <joern@wh.fh-wedel.de> and others
+ * - modified Intel Command Set 0x0001 to support ST Advanced Architecture
+ * (command set 0x0020)
+ * - added a writev function
+ * 07/13/2005 Joern Engel <joern@wh.fh-wedel.de>
+ * - Plugged memory leak in cfi_staa_writev().
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/init.h>
+#include <asm/io.h>
+#include <asm/byteorder.h>
+
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/cfi.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/compatmac.h>
+
+
+static int cfi_staa_read(struct mtd_info *, loff_t, size_t, size_t *, u_char *);
+static int cfi_staa_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
+static int cfi_staa_writev(struct mtd_info *mtd, const struct kvec *vecs,
+ unsigned long count, loff_t to, size_t *retlen);
+static int cfi_staa_erase_varsize(struct mtd_info *, struct erase_info *);
+static void cfi_staa_sync (struct mtd_info *);
+static int cfi_staa_lock(struct mtd_info *mtd, loff_t ofs, size_t len);
+static int cfi_staa_unlock(struct mtd_info *mtd, loff_t ofs, size_t len);
+static int cfi_staa_suspend (struct mtd_info *);
+static void cfi_staa_resume (struct mtd_info *);
+
+static void cfi_staa_destroy(struct mtd_info *);
+
+struct mtd_info *cfi_cmdset_0020(struct map_info *, int);
+
+static struct mtd_info *cfi_staa_setup (struct map_info *);
+
+static struct mtd_chip_driver cfi_staa_chipdrv = {
+ .probe = NULL, /* Not usable directly */
+ .destroy = cfi_staa_destroy,
+ .name = "cfi_cmdset_0020",
+ .module = THIS_MODULE
+};
+
+/* #define DEBUG_LOCK_BITS */
+//#define DEBUG_CFI_FEATURES
+
+#ifdef DEBUG_CFI_FEATURES
+static void cfi_tell_features(struct cfi_pri_intelext *extp)
+{
+ int i;
+ printk(" Feature/Command Support: %4.4X\n", extp->FeatureSupport);
+ printk(" - Chip Erase: %s\n", extp->FeatureSupport&1?"supported":"unsupported");
+ printk(" - Suspend Erase: %s\n", extp->FeatureSupport&2?"supported":"unsupported");
+ printk(" - Suspend Program: %s\n", extp->FeatureSupport&4?"supported":"unsupported");
+ printk(" - Legacy Lock/Unlock: %s\n", extp->FeatureSupport&8?"supported":"unsupported");
+ printk(" - Queued Erase: %s\n", extp->FeatureSupport&16?"supported":"unsupported");
+ printk(" - Instant block lock: %s\n", extp->FeatureSupport&32?"supported":"unsupported");
+ printk(" - Protection Bits: %s\n", extp->FeatureSupport&64?"supported":"unsupported");
+ printk(" - Page-mode read: %s\n", extp->FeatureSupport&128?"supported":"unsupported");
+ printk(" - Synchronous read: %s\n", extp->FeatureSupport&256?"supported":"unsupported");
+ for (i=9; i<32; i++) {
+ if (extp->FeatureSupport & (1<<i))
+ printk(" - Unknown Bit %X: supported\n", i);
+ }
+
+ printk(" Supported functions after Suspend: %2.2X\n", extp->SuspendCmdSupport);
+ printk(" - Program after Erase Suspend: %s\n", extp->SuspendCmdSupport&1?"supported":"unsupported");
+ for (i=1; i<8; i++) {
+ if (extp->SuspendCmdSupport & (1<<i))
+ printk(" - Unknown Bit %X: supported\n", i);
+ }
+
+ printk(" Block Status Register Mask: %4.4X\n", extp->BlkStatusRegMask);
+ printk(" - Lock Bit Active: %s\n", extp->BlkStatusRegMask&1?"yes":"no");
+ printk(" - Valid Bit Active: %s\n", extp->BlkStatusRegMask&2?"yes":"no");
+ for (i=2; i<16; i++) {
+ if (extp->BlkStatusRegMask & (1<<i))
+ printk(" - Unknown Bit %X Active: yes\n",i);
+ }
+
+ printk(" Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n",
+ extp->VccOptimal >> 8, extp->VccOptimal & 0xf);
+ if (extp->VppOptimal)
+ printk(" Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n",
+ extp->VppOptimal >> 8, extp->VppOptimal & 0xf);
+}
+#endif
+
+/* This routine is made available to other mtd code via
+ * inter_module_register. It must only be accessed through
+ * inter_module_get which will bump the use count of this module. The
+ * addresses passed back in cfi are valid as long as the use count of
+ * this module is non-zero, i.e. between inter_module_get and
+ * inter_module_put. Keith Owens <kaos@ocs.com.au> 29 Oct 2000.
+ */
+struct mtd_info *cfi_cmdset_0020(struct map_info *map, int primary)
+{
+ struct cfi_private *cfi = map->fldrv_priv;
+ int i;
+
+ if (cfi->cfi_mode) {
+ /*
+ * It's a real CFI chip, not one for which the probe
+ * routine faked a CFI structure. So we read the feature
+ * table from it.
+ */
+ __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
+ struct cfi_pri_intelext *extp;
+
+ extp = (struct cfi_pri_intelext*)cfi_read_pri(map, adr, sizeof(*extp), "ST Microelectronics");
+ if (!extp)
+ return NULL;
+
+ if (extp->MajorVersion != '1' ||
+ (extp->MinorVersion < '0' || extp->MinorVersion > '3')) {
+ printk(KERN_ERR " Unknown ST Microelectronics"
+ " Extended Query version %c.%c.\n",
+ extp->MajorVersion, extp->MinorVersion);
+ kfree(extp);
+ return NULL;
+ }
+
+ /* Do some byteswapping if necessary */
+ extp->FeatureSupport = cfi32_to_cpu(extp->FeatureSupport);
+ extp->BlkStatusRegMask = cfi32_to_cpu(extp->BlkStatusRegMask);
+
+#ifdef DEBUG_CFI_FEATURES
+ /* Tell the user about it in lots of lovely detail */
+ cfi_tell_features(extp);
+#endif
+
+ /* Install our own private info structure */
+ cfi->cmdset_priv = extp;
+ }
+
+ for (i=0; i< cfi->numchips; i++) {
+ cfi->chips[i].word_write_time = 128;
+ cfi->chips[i].buffer_write_time = 128;
+ cfi->chips[i].erase_time = 1024;
+ }
+
+ return cfi_staa_setup(map);
+}
+
+static struct mtd_info *cfi_staa_setup(struct map_info *map)
+{
+ struct cfi_private *cfi = map->fldrv_priv;
+ struct mtd_info *mtd;
+ unsigned long offset = 0;
+ int i,j;
+ unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
+
+ mtd = kmalloc(sizeof(*mtd), GFP_KERNEL);
+ //printk(KERN_DEBUG "number of CFI chips: %d\n", cfi->numchips);
+
+ if (!mtd) {
+ printk(KERN_ERR "Failed to allocate memory for MTD device\n");
+ kfree(cfi->cmdset_priv);
+ return NULL;
+ }
+
+ memset(mtd, 0, sizeof(*mtd));
+ mtd->priv = map;
+ mtd->type = MTD_NORFLASH;
+ mtd->size = devsize * cfi->numchips;
+
+ mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
+ mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
+ * mtd->numeraseregions, GFP_KERNEL);
+ if (!mtd->eraseregions) {
+ printk(KERN_ERR "Failed to allocate memory for MTD erase region info\n");
+ kfree(cfi->cmdset_priv);
+ kfree(mtd);
+ return NULL;
+ }
+
+ for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
+ unsigned long ernum, ersize;
+ ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
+ ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
+
+ if (mtd->erasesize < ersize) {
+ mtd->erasesize = ersize;
+ }
+ for (j=0; j<cfi->numchips; j++) {
+ mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
+ mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
+ mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
+ }
+ offset += (ersize * ernum);
+ }
+
+ if (offset != devsize) {
+ /* Argh */
+ printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
+ kfree(mtd->eraseregions);
+ kfree(cfi->cmdset_priv);
+ kfree(mtd);
+ return NULL;
+ }
+
+ for (i=0; i<mtd->numeraseregions;i++){
+ printk(KERN_DEBUG "%d: offset=0x%x,size=0x%x,blocks=%d\n",
+ i,mtd->eraseregions[i].offset,
+ mtd->eraseregions[i].erasesize,
+ mtd->eraseregions[i].numblocks);
+ }
+
+ /* Also select the correct geometry setup too */
+ mtd->erase = cfi_staa_erase_varsize;
+ mtd->read = cfi_staa_read;
+ mtd->write = cfi_staa_write_buffers;
+ mtd->writev = cfi_staa_writev;
+ mtd->sync = cfi_staa_sync;
+ mtd->lock = cfi_staa_lock;
+ mtd->unlock = cfi_staa_unlock;
+ mtd->suspend = cfi_staa_suspend;
+ mtd->resume = cfi_staa_resume;
+ mtd->flags = MTD_CAP_NORFLASH;
+ mtd->flags |= MTD_ECC; /* FIXME: Not all STMicro flashes have this */
+ mtd->eccsize = 8; /* FIXME: Should be 0 for STMicro flashes w/out ECC */
+ map->fldrv = &cfi_staa_chipdrv;
+ __module_get(THIS_MODULE);
+ mtd->name = map->name;
+ return mtd;
+}
+
+
+static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
+{
+ map_word status, status_OK;
+ unsigned long timeo;
+ DECLARE_WAITQUEUE(wait, current);
+ int suspended = 0;
+ unsigned long cmd_addr;
+ struct cfi_private *cfi = map->fldrv_priv;
+
+ adr += chip->start;
+
+ /* Ensure cmd read/writes are aligned. */
+ cmd_addr = adr & ~(map_bankwidth(map)-1);
+
+ /* Let's determine this according to the interleave only once */
+ status_OK = CMD(0x80);
+
+ timeo = jiffies + HZ;
+ retry:
+ spin_lock_bh(chip->mutex);
+
+ /* Check that the chip's ready to talk to us.
+ * If it's in FL_ERASING state, suspend it and make it talk now.
+ */
+ switch (chip->state) {
+ case FL_ERASING:
+ if (!(((struct cfi_pri_intelext *)cfi->cmdset_priv)->FeatureSupport & 2))
+ goto sleep; /* We don't support erase suspend */
+
+ map_write (map, CMD(0xb0), cmd_addr);
+ /* If the flash has finished erasing, then 'erase suspend'
+ * appears to make some (28F320) flash devices switch to
+ * 'read' mode. Make sure that we switch to 'read status'
+ * mode so we get the right data. --rmk
+ */
+ map_write(map, CMD(0x70), cmd_addr);
+ chip->oldstate = FL_ERASING;
+ chip->state = FL_ERASE_SUSPENDING;
+ // printk("Erase suspending at 0x%lx\n", cmd_addr);
+ for (;;) {
+ status = map_read(map, cmd_addr);
+ if (map_word_andequal(map, status, status_OK, status_OK))
+ break;
+
+ if (time_after(jiffies, timeo)) {
+ /* Urgh */
+ map_write(map, CMD(0xd0), cmd_addr);
+ /* make sure we're in 'read status' mode */
+ map_write(map, CMD(0x70), cmd_addr);
+ chip->state = FL_ERASING;
+ spin_unlock_bh(chip->mutex);
+ printk(KERN_ERR "Chip not ready after erase "
+ "suspended: status = 0x%lx\n", status.x[0]);
+ return -EIO;
+ }
+
+ spin_unlock_bh(chip->mutex);
+ cfi_udelay(1);
+ spin_lock_bh(chip->mutex);
+ }
+
+ suspended = 1;
+ map_write(map, CMD(0xff), cmd_addr);
+ chip->state = FL_READY;
+ break;
+
+#if 0
+ case FL_WRITING:
+ /* Not quite yet */
+#endif
+
+ case FL_READY:
+ break;
+
+ case FL_CFI_QUERY:
+ case FL_JEDEC_QUERY:
+ map_write(map, CMD(0x70), cmd_addr);
+ chip->state = FL_STATUS;
+
+ case FL_STATUS:
+ status = map_read(map, cmd_addr);
+ if (map_word_andequal(map, status, status_OK, status_OK)) {
+ map_write(map, CMD(0xff), cmd_addr);
+ chip->state = FL_READY;
+ break;
+ }
+
+ /* Urgh. Chip not yet ready to talk to us. */
+ if (time_after(jiffies, timeo)) {
+ spin_unlock_bh(chip->mutex);
+ printk(KERN_ERR "waiting for chip to be ready timed out in read. WSM status = %lx\n", status.x[0]);
+ return -EIO;
+ }
+
+ /* Latency issues. Drop the lock, wait a while and retry */
+ spin_unlock_bh(chip->mutex);
+ cfi_udelay(1);
+ goto retry;
+
+ default:
+ sleep:
+ /* Stick ourselves on a wait queue to be woken when
+ someone changes the status */
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ add_wait_queue(&chip->wq, &wait);
+ spin_unlock_bh(chip->mutex);
+ schedule();
+ remove_wait_queue(&chip->wq, &wait);
+ timeo = jiffies + HZ;
+ goto retry;
+ }
+
+ map_copy_from(map, buf, adr, len);
+
+ if (suspended) {
+ chip->state = chip->oldstate;
+ /* What if one interleaved chip has finished and the
+ other hasn't? The old code would leave the finished
+ one in READY mode. That's bad, and caused -EROFS
+ errors to be returned from do_erase_oneblock because
+ that's the only bit it checked for at the time.
+ As the state machine appears to explicitly allow
+ sending the 0x70 (Read Status) command to an erasing
+ chip and expecting it to be ignored, that's what we
+ do. */
+ map_write(map, CMD(0xd0), cmd_addr);
+ map_write(map, CMD(0x70), cmd_addr);
+ }
+
+ wake_up(&chip->wq);
+ spin_unlock_bh(chip->mutex);
+ return 0;
+}
+
+static int cfi_staa_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
+{
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+ unsigned long ofs;
+ int chipnum;
+ int ret = 0;
+
+ /* ofs: offset within the first chip that the first read should start */
+ chipnum = (from >> cfi->chipshift);
+ ofs = from - (chipnum << cfi->chipshift);
+
+ *retlen = 0;
+
+ while (len) {
+ unsigned long thislen;
+
+ if (chipnum >= cfi->numchips)
+ break;
+
+ if ((len + ofs -1) >> cfi->chipshift)
+ thislen = (1<<cfi->chipshift) - ofs;
+ else
+ thislen = len;
+
+ ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
+ if (ret)
+ break;
+
+ *retlen += thislen;
+ len -= thislen;
+ buf += thislen;
+
+ ofs = 0;
+ chipnum++;
+ }
+ return ret;
+}
+
+static inline int do_write_buffer(struct map_info *map, struct flchip *chip,
+ unsigned long adr, const u_char *buf, int len)
+{
+ struct cfi_private *cfi = map->fldrv_priv;
+ map_word status, status_OK;
+ unsigned long cmd_adr, timeo;
+ DECLARE_WAITQUEUE(wait, current);
+ int wbufsize, z;
+
+ /* M58LW064A requires bus alignment for buffer wriets -- saw */
+ if (adr & (map_bankwidth(map)-1))
+ return -EINVAL;
+
+ wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
+ adr += chip->start;
+ cmd_adr = adr & ~(wbufsize-1);
+
+ /* Let's determine this according to the interleave only once */
+ status_OK = CMD(0x80);
+
+ timeo = jiffies + HZ;
+ retry:
+
+#ifdef DEBUG_CFI_FEATURES
+ printk("%s: chip->state[%d]\n", __FUNCTION__, chip->state);
+#endif
+ spin_lock_bh(chip->mutex);
+
+ /* Check that the chip's ready to talk to us.
+ * Later, we can actually think about interrupting it
+ * if it's in FL_ERASING state.
+ * Not just yet, though.
+ */
+ switch (chip->state) {
+ case FL_READY:
+ break;
+
+ case FL_CFI_QUERY:
+ case FL_JEDEC_QUERY:
+ map_write(map, CMD(0x70), cmd_adr);
+ chip->state = FL_STATUS;
+#ifdef DEBUG_CFI_FEATURES
+ printk("%s: 1 status[%x]\n", __FUNCTION__, map_read(map, cmd_adr));
+#endif
+
+ case FL_STATUS:
+ status = map_read(map, cmd_adr);
+ if (map_word_andequal(map, status, status_OK, status_OK))
+ break;
+ /* Urgh. Chip not yet ready to talk to us. */
+ if (time_after(jiffies, timeo)) {
+ spin_unlock_bh(chip->mutex);
+ printk(KERN_ERR "waiting for chip to be ready timed out in buffer write Xstatus = %lx, status = %lx\n",
+ status.x[0], map_read(map, cmd_adr).x[0]);
+ return -EIO;
+ }
+
+ /* Latency issues. Drop the lock, wait a while and retry */
+ spin_unlock_bh(chip->mutex);
+ cfi_udelay(1);
+ goto retry;
+
+ default:
+ /* Stick ourselves on a wait queue to be woken when
+ someone changes the status */
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ add_wait_queue(&chip->wq, &wait);
+ spin_unlock_bh(chip->mutex);
+ schedule();
+ remove_wait_queue(&chip->wq, &wait);
+ timeo = jiffies + HZ;
+ goto retry;
+ }
+
+ ENABLE_VPP(map);
+ map_write(map, CMD(0xe8), cmd_adr);
+ chip->state = FL_WRITING_TO_BUFFER;
+
+ z = 0;
+ for (;;) {
+ status = map_read(map, cmd_adr);
+ if (map_word_andequal(map, status, status_OK, status_OK))
+ break;
+
+ spin_unlock_bh(chip->mutex);
+ cfi_udelay(1);
+ spin_lock_bh(chip->mutex);
+
+ if (++z > 100) {
+ /* Argh. Not ready for write to buffer */
+ DISABLE_VPP(map);
+ map_write(map, CMD(0x70), cmd_adr);
+ chip->state = FL_STATUS;
+ spin_unlock_bh(chip->mutex);
+ printk(KERN_ERR "Chip not ready for buffer write. Xstatus = %lx\n", status.x[0]);
+ return -EIO;
+ }
+ }
+
+ /* Write length of data to come */
+ map_write(map, CMD(len/map_bankwidth(map)-1), cmd_adr );
+
+ /* Write data */
+ for (z = 0; z < len;
+ z += map_bankwidth(map), buf += map_bankwidth(map)) {
+ map_word d;
+ d = map_word_load(map, buf);
+ map_write(map, d, adr+z);
+ }
+ /* GO GO GO */
+ map_write(map, CMD(0xd0), cmd_adr);
+ chip->state = FL_WRITING;
+
+ spin_unlock_bh(chip->mutex);
+ cfi_udelay(chip->buffer_write_time);
+ spin_lock_bh(chip->mutex);
+
+ timeo = jiffies + (HZ/2);
+ z = 0;
+ for (;;) {
+ if (chip->state != FL_WRITING) {
+ /* Someone's suspended the write. Sleep */
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ add_wait_queue(&chip->wq, &wait);
+ spin_unlock_bh(chip->mutex);
+ schedule();
+ remove_wait_queue(&chip->wq, &wait);
+ timeo = jiffies + (HZ / 2); /* FIXME */
+ spin_lock_bh(chip->mutex);
+ continue;
+ }
+
+ status = map_read(map, cmd_adr);
+ if (map_word_andequal(map, status, status_OK, status_OK))
+ break;
+
+ /* OK Still waiting */
+ if (time_after(jiffies, timeo)) {
+ /* clear status */
+ map_write(map, CMD(0x50), cmd_adr);
+ /* put back into read status register mode */
+ map_write(map, CMD(0x70), adr);
+ chip->state = FL_STATUS;
+ DISABLE_VPP(map);
+ spin_unlock_bh(chip->mutex);
+ printk(KERN_ERR "waiting for chip to be ready timed out in bufwrite\n");
+ return -EIO;
+ }
+
+ /* Latency issues. Drop the lock, wait a while and retry */
+ spin_unlock_bh(chip->mutex);
+ cfi_udelay(1);
+ z++;
+ spin_lock_bh(chip->mutex);
+ }
+ if (!z) {
+ chip->buffer_write_time--;
+ if (!chip->buffer_write_time)
+ chip->buffer_write_time++;
+ }
+ if (z > 1)
+ chip->buffer_write_time++;
+
+ /* Done and happy. */
+ DISABLE_VPP(map);
+ chip->state = FL_STATUS;
+
+ /* check for errors: 'lock bit', 'VPP', 'dead cell'/'unerased cell' or 'incorrect cmd' -- saw */
+ if (map_word_bitsset(map, status, CMD(0x3a))) {
+#ifdef DEBUG_CFI_FEATURES
+ printk("%s: 2 status[%lx]\n", __FUNCTION__, status.x[0]);
+#endif
+ /* clear status */
+ map_write(map, CMD(0x50), cmd_adr);
+ /* put back into read status register mode */
+ map_write(map, CMD(0x70), adr);
+ wake_up(&chip->wq);
+ spin_unlock_bh(chip->mutex);
+ return map_word_bitsset(map, status, CMD(0x02)) ? -EROFS : -EIO;
+ }
+ wake_up(&chip->wq);
+ spin_unlock_bh(chip->mutex);
+
+ return 0;
+}
+
+static int cfi_staa_write_buffers (struct mtd_info *mtd, loff_t to,
+ size_t len, size_t *retlen, const u_char *buf)
+{
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+ int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
+ int ret = 0;
+ int chipnum;
+ unsigned long ofs;
+
+ *retlen = 0;
+ if (!len)
+ return 0;
+
+ chipnum = to >> cfi->chipshift;
+ ofs = to - (chipnum << cfi->chipshift);
+
+#ifdef DEBUG_CFI_FEATURES
+ printk("%s: map_bankwidth(map)[%x]\n", __FUNCTION__, map_bankwidth(map));
+ printk("%s: chipnum[%x] wbufsize[%x]\n", __FUNCTION__, chipnum, wbufsize);
+ printk("%s: ofs[%x] len[%x]\n", __FUNCTION__, ofs, len);
+#endif
+
+ /* Write buffer is worth it only if more than one word to write... */
+ while (len > 0) {
+ /* We must not cross write block boundaries */
+ int size = wbufsize - (ofs & (wbufsize-1));
+
+ if (size > len)
+ size = len;
+
+ ret = do_write_buffer(map, &cfi->chips[chipnum],
+ ofs, buf, size);
+ if (ret)
+ return ret;
+
+ ofs += size;
+ buf += size;
+ (*retlen) += size;
+ len -= size;
+
+ if (ofs >> cfi->chipshift) {
+ chipnum ++;
+ ofs = 0;
+ if (chipnum == cfi->numchips)
+ return 0;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * Writev for ECC-Flashes is a little more complicated. We need to maintain
+ * a small buffer for this.
+ * XXX: If the buffer size is not a multiple of 2, this will break
+ */
+#define ECCBUF_SIZE (mtd->eccsize)
+#define ECCBUF_DIV(x) ((x) & ~(ECCBUF_SIZE - 1))
+#define ECCBUF_MOD(x) ((x) & (ECCBUF_SIZE - 1))
+static int
+cfi_staa_writev(struct mtd_info *mtd, const struct kvec *vecs,
+ unsigned long count, loff_t to, size_t *retlen)
+{
+ unsigned long i;
+ size_t totlen = 0, thislen;
+ int ret = 0;
+ size_t buflen = 0;
+ static char *buffer;
+
+ if (!ECCBUF_SIZE) {
+ /* We should fall back to a general writev implementation.
+ * Until that is written, just break.
+ */
+ return -EIO;
+ }
+ buffer = kmalloc(ECCBUF_SIZE, GFP_KERNEL);
+ if (!buffer)
+ return -ENOMEM;
+
+ for (i=0; i<count; i++) {
+ size_t elem_len = vecs[i].iov_len;
+ void *elem_base = vecs[i].iov_base;
+ if (!elem_len) /* FIXME: Might be unnecessary. Check that */
+ continue;
+ if (buflen) { /* cut off head */
+ if (buflen + elem_len < ECCBUF_SIZE) { /* just accumulate */
+ memcpy(buffer+buflen, elem_base, elem_len);
+ buflen += elem_len;
+ continue;
+ }
+ memcpy(buffer+buflen, elem_base, ECCBUF_SIZE-buflen);
+ ret = mtd->write(mtd, to, ECCBUF_SIZE, &thislen, buffer);
+ totlen += thislen;
+ if (ret || thislen != ECCBUF_SIZE)
+ goto write_error;
+ elem_len -= thislen-buflen;
+ elem_base += thislen-buflen;
+ to += ECCBUF_SIZE;
+ }
+ if (ECCBUF_DIV(elem_len)) { /* write clean aligned data */
+ ret = mtd->write(mtd, to, ECCBUF_DIV(elem_len), &thislen, elem_base);
+ totlen += thislen;
+ if (ret || thislen != ECCBUF_DIV(elem_len))
+ goto write_error;
+ to += thislen;
+ }
+ buflen = ECCBUF_MOD(elem_len); /* cut off tail */
+ if (buflen) {
+ memset(buffer, 0xff, ECCBUF_SIZE);
+ memcpy(buffer, elem_base + thislen, buflen);
+ }
+ }
+ if (buflen) { /* flush last page, even if not full */
+ /* This is sometimes intended behaviour, really */
+ ret = mtd->write(mtd, to, buflen, &thislen, buffer);
+ totlen += thislen;
+ if (ret || thislen != ECCBUF_SIZE)
+ goto write_error;
+ }
+write_error:
+ if (retlen)
+ *retlen = totlen;
+ kfree(buffer);
+ return ret;
+}
+
+
+static inline int do_erase_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr)
+{
+ struct cfi_private *cfi = map->fldrv_priv;
+ map_word status, status_OK;
+ unsigned long timeo;
+ int retries = 3;
+ DECLARE_WAITQUEUE(wait, current);
+ int ret = 0;
+
+ adr += chip->start;
+
+ /* Let's determine this according to the interleave only once */
+ status_OK = CMD(0x80);
+
+ timeo = jiffies + HZ;
+retry:
+ spin_lock_bh(chip->mutex);
+
+ /* Check that the chip's ready to talk to us. */
+ switch (chip->state) {
+ case FL_CFI_QUERY:
+ case FL_JEDEC_QUERY:
+ case FL_READY:
+ map_write(map, CMD(0x70), adr);
+ chip->state = FL_STATUS;
+
+ case FL_STATUS:
+ status = map_read(map, adr);
+ if (map_word_andequal(map, status, status_OK, status_OK))
+ break;
+
+ /* Urgh. Chip not yet ready to talk to us. */
+ if (time_after(jiffies, timeo)) {
+ spin_unlock_bh(chip->mutex);
+ printk(KERN_ERR "waiting for chip to be ready timed out in erase\n");
+ return -EIO;
+ }
+
+ /* Latency issues. Drop the lock, wait a while and retry */
+ spin_unlock_bh(chip->mutex);
+ cfi_udelay(1);
+ goto retry;
+
+ default:
+ /* Stick ourselves on a wait queue to be woken when
+ someone changes the status */
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ add_wait_queue(&chip->wq, &wait);
+ spin_unlock_bh(chip->mutex);
+ schedule();
+ remove_wait_queue(&chip->wq, &wait);
+ timeo = jiffies + HZ;
+ goto retry;
+ }
+
+ ENABLE_VPP(map);
+ /* Clear the status register first */
+ map_write(map, CMD(0x50), adr);
+
+ /* Now erase */
+ map_write(map, CMD(0x20), adr);
+ map_write(map, CMD(0xD0), adr);
+ chip->state = FL_ERASING;
+
+ spin_unlock_bh(chip->mutex);
+ msleep(1000);
+ spin_lock_bh(chip->mutex);
+
+ /* FIXME. Use a timer to check this, and return immediately. */
+ /* Once the state machine's known to be working I'll do that */
+
+ timeo = jiffies + (HZ*20);
+ for (;;) {
+ if (chip->state != FL_ERASING) {
+ /* Someone's suspended the erase. Sleep */
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ add_wait_queue(&chip->wq, &wait);
+ spin_unlock_bh(chip->mutex);
+ schedule();
+ remove_wait_queue(&chip->wq, &wait);
+ timeo = jiffies + (HZ*20); /* FIXME */
+ spin_lock_bh(chip->mutex);
+ continue;
+ }
+
+ status = map_read(map, adr);
+ if (map_word_andequal(map, status, status_OK, status_OK))
+ break;
+
+ /* OK Still waiting */
+ if (time_after(jiffies, timeo)) {
+ map_write(map, CMD(0x70), adr);
+ chip->state = FL_STATUS;
+ printk(KERN_ERR "waiting for erase to complete timed out. Xstatus = %lx, status = %lx.\n", status.x[0], map_read(map, adr).x[0]);
+ DISABLE_VPP(map);
+ spin_unlock_bh(chip->mutex);
+ return -EIO;
+ }
+
+ /* Latency issues. Drop the lock, wait a while and retry */
+ spin_unlock_bh(chip->mutex);
+ cfi_udelay(1);
+ spin_lock_bh(chip->mutex);
+ }
+
+ DISABLE_VPP(map);
+ ret = 0;
+
+ /* We've broken this before. It doesn't hurt to be safe */
+ map_write(map, CMD(0x70), adr);
+ chip->state = FL_STATUS;
+ status = map_read(map, adr);
+
+ /* check for lock bit */
+ if (map_word_bitsset(map, status, CMD(0x3a))) {
+ unsigned char chipstatus = status.x[0];
+ if (!map_word_equal(map, status, CMD(chipstatus))) {
+ int i, w;
+ for (w=0; w<map_words(map); w++) {
+ for (i = 0; i<cfi_interleave(cfi); i++) {
+ chipstatus |= status.x[w] >> (cfi->device_type * 8);
+ }
+ }
+ printk(KERN_WARNING "Status is not identical for all chips: 0x%lx. Merging to give 0x%02x\n",
+ status.x[0], chipstatus);
+ }
+ /* Reset the error bits */
+ map_write(map, CMD(0x50), adr);
+ map_write(map, CMD(0x70), adr);
+
+ if ((chipstatus & 0x30) == 0x30) {
+ printk(KERN_NOTICE "Chip reports improper command sequence: status 0x%x\n", chipstatus);
+ ret = -EIO;
+ } else if (chipstatus & 0x02) {
+ /* Protection bit set */
+ ret = -EROFS;
+ } else if (chipstatus & 0x8) {
+ /* Voltage */
+ printk(KERN_WARNING "Chip reports voltage low on erase: status 0x%x\n", chipstatus);
+ ret = -EIO;
+ } else if (chipstatus & 0x20) {
+ if (retries--) {
+ printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%x. Retrying...\n", adr, chipstatus);
+ timeo = jiffies + HZ;
+ chip->state = FL_STATUS;
+ spin_unlock_bh(chip->mutex);
+ goto retry;
+ }
+ printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%x\n", adr, chipstatus);
+ ret = -EIO;
+ }
+ }
+
+ wake_up(&chip->wq);
+ spin_unlock_bh(chip->mutex);
+ return ret;
+}
+
+int cfi_staa_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
+{ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+ unsigned long adr, len;
+ int chipnum, ret = 0;
+ int i, first;
+ struct mtd_erase_region_info *regions = mtd->eraseregions;
+
+ if (instr->addr > mtd->size)
+ return -EINVAL;
+
+ if ((instr->len + instr->addr) > mtd->size)
+ return -EINVAL;
+
+ /* Check that both start and end of the requested erase are
+ * aligned with the erasesize at the appropriate addresses.
+ */
+
+ i = 0;
+
+ /* Skip all erase regions which are ended before the start of
+ the requested erase. Actually, to save on the calculations,
+ we skip to the first erase region which starts after the
+ start of the requested erase, and then go back one.
+ */
+
+ while (i < mtd->numeraseregions && instr->addr >= regions[i].offset)
+ i++;
+ i--;
+
+ /* OK, now i is pointing at the erase region in which this
+ erase request starts. Check the start of the requested
+ erase range is aligned with the erase size which is in
+ effect here.
+ */
+
+ if (instr->addr & (regions[i].erasesize-1))
+ return -EINVAL;
+
+ /* Remember the erase region we start on */
+ first = i;
+
+ /* Next, check that the end of the requested erase is aligned
+ * with the erase region at that address.
+ */
+
+ while (i<mtd->numeraseregions && (instr->addr + instr->len) >= regions[i].offset)
+ i++;
+
+ /* As before, drop back one to point at the region in which
+ the address actually falls
+ */
+ i--;
+
+ if ((instr->addr + instr->len) & (regions[i].erasesize-1))
+ return -EINVAL;
+
+ chipnum = instr->addr >> cfi->chipshift;
+ adr = instr->addr - (chipnum << cfi->chipshift);
+ len = instr->len;
+
+ i=first;
+
+ while(len) {
+ ret = do_erase_oneblock(map, &cfi->chips[chipnum], adr);
+
+ if (ret)
+ return ret;
+
+ adr += regions[i].erasesize;
+ len -= regions[i].erasesize;
+
+ if (adr % (1<< cfi->chipshift) == ((regions[i].offset + (regions[i].erasesize * regions[i].numblocks)) %( 1<< cfi->chipshift)))
+ i++;
+
+ if (adr >> cfi->chipshift) {
+ adr = 0;
+ chipnum++;
+
+ if (chipnum >= cfi->numchips)
+ break;
+ }
+ }
+
+ instr->state = MTD_ERASE_DONE;
+ mtd_erase_callback(instr);
+
+ return 0;
+}
+
+static void cfi_staa_sync (struct mtd_info *mtd)
+{
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+ int i;
+ struct flchip *chip;
+ int ret = 0;
+ DECLARE_WAITQUEUE(wait, current);
+
+ for (i=0; !ret && i<cfi->numchips; i++) {
+ chip = &cfi->chips[i];
+
+ retry:
+ spin_lock_bh(chip->mutex);
+
+ switch(chip->state) {
+ case FL_READY:
+ case FL_STATUS:
+ case FL_CFI_QUERY:
+ case FL_JEDEC_QUERY:
+ chip->oldstate = chip->state;
+ chip->state = FL_SYNCING;
+ /* No need to wake_up() on this state change -
+ * as the whole point is that nobody can do anything
+ * with the chip now anyway.
+ */
+ case FL_SYNCING:
+ spin_unlock_bh(chip->mutex);
+ break;
+
+ default:
+ /* Not an idle state */
+ add_wait_queue(&chip->wq, &wait);
+
+ spin_unlock_bh(chip->mutex);
+ schedule();
+ remove_wait_queue(&chip->wq, &wait);
+
+ goto retry;
+ }
+ }
+
+ /* Unlock the chips again */
+
+ for (i--; i >=0; i--) {
+ chip = &cfi->chips[i];
+
+ spin_lock_bh(chip->mutex);
+
+ if (chip->state == FL_SYNCING) {
+ chip->state = chip->oldstate;
+ wake_up(&chip->wq);
+ }
+ spin_unlock_bh(chip->mutex);
+ }
+}
+
+static inline int do_lock_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr)
+{
+ struct cfi_private *cfi = map->fldrv_priv;
+ map_word status, status_OK;
+ unsigned long timeo = jiffies + HZ;
+ DECLARE_WAITQUEUE(wait, current);
+
+ adr += chip->start;
+
+ /* Let's determine this according to the interleave only once */
+ status_OK = CMD(0x80);
+
+ timeo = jiffies + HZ;
+retry:
+ spin_lock_bh(chip->mutex);
+
+ /* Check that the chip's ready to talk to us. */
+ switch (chip->state) {
+ case FL_CFI_QUERY:
+ case FL_JEDEC_QUERY:
+ case FL_READY:
+ map_write(map, CMD(0x70), adr);
+ chip->state = FL_STATUS;
+
+ case FL_STATUS:
+ status = map_read(map, adr);
+ if (map_word_andequal(map, status, status_OK, status_OK))
+ break;
+
+ /* Urgh. Chip not yet ready to talk to us. */
+ if (time_after(jiffies, timeo)) {
+ spin_unlock_bh(chip->mutex);
+ printk(KERN_ERR "waiting for chip to be ready timed out in lock\n");
+ return -EIO;
+ }
+
+ /* Latency issues. Drop the lock, wait a while and retry */
+ spin_unlock_bh(chip->mutex);
+ cfi_udelay(1);
+ goto retry;
+
+ default:
+ /* Stick ourselves on a wait queue to be woken when
+ someone changes the status */
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ add_wait_queue(&chip->wq, &wait);
+ spin_unlock_bh(chip->mutex);
+ schedule();
+ remove_wait_queue(&chip->wq, &wait);
+ timeo = jiffies + HZ;
+ goto retry;
+ }
+
+ ENABLE_VPP(map);
+ map_write(map, CMD(0x60), adr);
+ map_write(map, CMD(0x01), adr);
+ chip->state = FL_LOCKING;
+
+ spin_unlock_bh(chip->mutex);
+ msleep(1000);
+ spin_lock_bh(chip->mutex);
+
+ /* FIXME. Use a timer to check this, and return immediately. */
+ /* Once the state machine's known to be working I'll do that */
+
+ timeo = jiffies + (HZ*2);
+ for (;;) {
+
+ status = map_read(map, adr);
+ if (map_word_andequal(map, status, status_OK, status_OK))
+ break;
+
+ /* OK Still waiting */
+ if (time_after(jiffies, timeo)) {
+ map_write(map, CMD(0x70), adr);
+ chip->state = FL_STATUS;
+ printk(KERN_ERR "waiting for lock to complete timed out. Xstatus = %lx, status = %lx.\n", status.x[0], map_read(map, adr).x[0]);
+ DISABLE_VPP(map);
+ spin_unlock_bh(chip->mutex);
+ return -EIO;
+ }
+
+ /* Latency issues. Drop the lock, wait a while and retry */
+ spin_unlock_bh(chip->mutex);
+ cfi_udelay(1);
+ spin_lock_bh(chip->mutex);
+ }
+
+ /* Done and happy. */
+ chip->state = FL_STATUS;
+ DISABLE_VPP(map);
+ wake_up(&chip->wq);
+ spin_unlock_bh(chip->mutex);
+ return 0;
+}
+static int cfi_staa_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
+{
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+ unsigned long adr;
+ int chipnum, ret = 0;
+#ifdef DEBUG_LOCK_BITS
+ int ofs_factor = cfi->interleave * cfi->device_type;
+#endif
+
+ if (ofs & (mtd->erasesize - 1))
+ return -EINVAL;
+
+ if (len & (mtd->erasesize -1))
+ return -EINVAL;
+
+ if ((len + ofs) > mtd->size)
+ return -EINVAL;
+
+ chipnum = ofs >> cfi->chipshift;
+ adr = ofs - (chipnum << cfi->chipshift);
+
+ while(len) {
+
+#ifdef DEBUG_LOCK_BITS
+ cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL);
+ printk("before lock: block status register is %x\n",cfi_read_query(map, adr+(2*ofs_factor)));
+ cfi_send_gen_cmd(0xff, 0x55, 0, map, cfi, cfi->device_type, NULL);
+#endif
+
+ ret = do_lock_oneblock(map, &cfi->chips[chipnum], adr);
+
+#ifdef DEBUG_LOCK_BITS
+ cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL);
+ printk("after lock: block status register is %x\n",cfi_read_query(map, adr+(2*ofs_factor)));
+ cfi_send_gen_cmd(0xff, 0x55, 0, map, cfi, cfi->device_type, NULL);
+#endif
+
+ if (ret)
+ return ret;
+
+ adr += mtd->erasesize;
+ len -= mtd->erasesize;
+
+ if (adr >> cfi->chipshift) {
+ adr = 0;
+ chipnum++;
+
+ if (chipnum >= cfi->numchips)
+ break;
+ }
+ }
+ return 0;
+}
+static inline int do_unlock_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr)
+{
+ struct cfi_private *cfi = map->fldrv_priv;
+ map_word status, status_OK;
+ unsigned long timeo = jiffies + HZ;
+ DECLARE_WAITQUEUE(wait, current);
+
+ adr += chip->start;
+
+ /* Let's determine this according to the interleave only once */
+ status_OK = CMD(0x80);
+
+ timeo = jiffies + HZ;
+retry:
+ spin_lock_bh(chip->mutex);
+
+ /* Check that the chip's ready to talk to us. */
+ switch (chip->state) {
+ case FL_CFI_QUERY:
+ case FL_JEDEC_QUERY:
+ case FL_READY:
+ map_write(map, CMD(0x70), adr);
+ chip->state = FL_STATUS;
+
+ case FL_STATUS:
+ status = map_read(map, adr);
+ if (map_word_andequal(map, status, status_OK, status_OK))
+ break;
+
+ /* Urgh. Chip not yet ready to talk to us. */
+ if (time_after(jiffies, timeo)) {
+ spin_unlock_bh(chip->mutex);
+ printk(KERN_ERR "waiting for chip to be ready timed out in unlock\n");
+ return -EIO;
+ }
+
+ /* Latency issues. Drop the lock, wait a while and retry */
+ spin_unlock_bh(chip->mutex);
+ cfi_udelay(1);
+ goto retry;
+
+ default:
+ /* Stick ourselves on a wait queue to be woken when
+ someone changes the status */
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ add_wait_queue(&chip->wq, &wait);
+ spin_unlock_bh(chip->mutex);
+ schedule();
+ remove_wait_queue(&chip->wq, &wait);
+ timeo = jiffies + HZ;
+ goto retry;
+ }
+
+ ENABLE_VPP(map);
+ map_write(map, CMD(0x60), adr);
+ map_write(map, CMD(0xD0), adr);
+ chip->state = FL_UNLOCKING;
+
+ spin_unlock_bh(chip->mutex);
+ msleep(1000);
+ spin_lock_bh(chip->mutex);
+
+ /* FIXME. Use a timer to check this, and return immediately. */
+ /* Once the state machine's known to be working I'll do that */
+
+ timeo = jiffies + (HZ*2);
+ for (;;) {
+
+ status = map_read(map, adr);
+ if (map_word_andequal(map, status, status_OK, status_OK))
+ break;
+
+ /* OK Still waiting */
+ if (time_after(jiffies, timeo)) {
+ map_write(map, CMD(0x70), adr);
+ chip->state = FL_STATUS;
+ printk(KERN_ERR "waiting for unlock to complete timed out. Xstatus = %lx, status = %lx.\n", status.x[0], map_read(map, adr).x[0]);
+ DISABLE_VPP(map);
+ spin_unlock_bh(chip->mutex);
+ return -EIO;
+ }
+
+ /* Latency issues. Drop the unlock, wait a while and retry */
+ spin_unlock_bh(chip->mutex);
+ cfi_udelay(1);
+ spin_lock_bh(chip->mutex);
+ }
+
+ /* Done and happy. */
+ chip->state = FL_STATUS;
+ DISABLE_VPP(map);
+ wake_up(&chip->wq);
+ spin_unlock_bh(chip->mutex);
+ return 0;
+}
+static int cfi_staa_unlock(struct mtd_info *mtd, loff_t ofs, size_t len)
+{
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+ unsigned long adr;
+ int chipnum, ret = 0;
+#ifdef DEBUG_LOCK_BITS
+ int ofs_factor = cfi->interleave * cfi->device_type;
+#endif
+
+ chipnum = ofs >> cfi->chipshift;
+ adr = ofs - (chipnum << cfi->chipshift);
+
+#ifdef DEBUG_LOCK_BITS
+ {
+ unsigned long temp_adr = adr;
+ unsigned long temp_len = len;
+
+ cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL);
+ while (temp_len) {
+ printk("before unlock %x: block status register is %x\n",temp_adr,cfi_read_query(map, temp_adr+(2*ofs_factor)));
+ temp_adr += mtd->erasesize;
+ temp_len -= mtd->erasesize;
+ }
+ cfi_send_gen_cmd(0xff, 0x55, 0, map, cfi, cfi->device_type, NULL);
+ }
+#endif
+
+ ret = do_unlock_oneblock(map, &cfi->chips[chipnum], adr);
+
+#ifdef DEBUG_LOCK_BITS
+ cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL);
+ printk("after unlock: block status register is %x\n",cfi_read_query(map, adr+(2*ofs_factor)));
+ cfi_send_gen_cmd(0xff, 0x55, 0, map, cfi, cfi->device_type, NULL);
+#endif
+
+ return ret;
+}
+
+static int cfi_staa_suspend(struct mtd_info *mtd)
+{
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+ int i;
+ struct flchip *chip;
+ int ret = 0;
+
+ for (i=0; !ret && i<cfi->numchips; i++) {
+ chip = &cfi->chips[i];
+
+ spin_lock_bh(chip->mutex);
+
+ switch(chip->state) {
+ case FL_READY:
+ case FL_STATUS:
+ case FL_CFI_QUERY:
+ case FL_JEDEC_QUERY:
+ chip->oldstate = chip->state;
+ chip->state = FL_PM_SUSPENDED;
+ /* No need to wake_up() on this state change -
+ * as the whole point is that nobody can do anything
+ * with the chip now anyway.
+ */
+ case FL_PM_SUSPENDED:
+ break;
+
+ default:
+ ret = -EAGAIN;
+ break;
+ }
+ spin_unlock_bh(chip->mutex);
+ }
+
+ /* Unlock the chips again */
+
+ if (ret) {
+ for (i--; i >=0; i--) {
+ chip = &cfi->chips[i];
+
+ spin_lock_bh(chip->mutex);
+
+ if (chip->state == FL_PM_SUSPENDED) {
+ /* No need to force it into a known state here,
+ because we're returning failure, and it didn't
+ get power cycled */
+ chip->state = chip->oldstate;
+ wake_up(&chip->wq);
+ }
+ spin_unlock_bh(chip->mutex);
+ }
+ }
+
+ return ret;
+}
+
+static void cfi_staa_resume(struct mtd_info *mtd)
+{
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+ int i;
+ struct flchip *chip;
+
+ for (i=0; i<cfi->numchips; i++) {
+
+ chip = &cfi->chips[i];
+
+ spin_lock_bh(chip->mutex);
+
+ /* Go to known state. Chip may have been power cycled */
+ if (chip->state == FL_PM_SUSPENDED) {
+ map_write(map, CMD(0xFF), 0);
+ chip->state = FL_READY;
+ wake_up(&chip->wq);
+ }
+
+ spin_unlock_bh(chip->mutex);
+ }
+}
+
+static void cfi_staa_destroy(struct mtd_info *mtd)
+{
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+ kfree(cfi->cmdset_priv);
+ kfree(cfi);
+}
+
+static char im_name[]="cfi_cmdset_0020";
+
+static int __init cfi_staa_init(void)
+{
+ inter_module_register(im_name, THIS_MODULE, &cfi_cmdset_0020);
+ return 0;
+}
+
+static void __exit cfi_staa_exit(void)
+{
+ inter_module_unregister(im_name);
+}
+
+module_init(cfi_staa_init);
+module_exit(cfi_staa_exit);
+
+MODULE_LICENSE("GPL");
diff --git a/linux-2.4.x/drivers/mtd/chips/cfi_probe.c b/linux-2.4.x/drivers/mtd/chips/cfi_probe.c
index 920f588..e636aa8 100644
--- a/linux-2.4.x/drivers/mtd/chips/cfi_probe.c
+++ b/linux-2.4.x/drivers/mtd/chips/cfi_probe.c
@@ -1,131 +1,203 @@
-/*
+/*
Common Flash Interface probe code.
(C) 2000 Red Hat. GPL'd.
- $Id: cfi_probe.c,v 1.66 2001/10/02 15:05:12 dwmw2 Exp $
+ $Id: cfi_probe.c,v 1.86 2005/11/29 14:48:31 gleixner Exp $
*/
#include <linux/config.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
+#include <linux/init.h>
#include <asm/io.h>
#include <asm/byteorder.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
+#include <linux/mtd/xip.h>
#include <linux/mtd/map.h>
#include <linux/mtd/cfi.h>
#include <linux/mtd/gen_probe.h>
-//#define DEBUG_CFI
+//#define DEBUG_CFI
#ifdef DEBUG_CFI
static void print_cfi_ident(struct cfi_ident *);
#endif
-int cfi_jedec_setup(struct cfi_private *p_cfi, int index);
-int cfi_jedec_lookup(int index, int mfr_id, int dev_id);
-
static int cfi_probe_chip(struct map_info *map, __u32 base,
- struct flchip *chips, struct cfi_private *cfi);
+ unsigned long *chip_map, struct cfi_private *cfi);
static int cfi_chip_setup(struct map_info *map, struct cfi_private *cfi);
struct mtd_info *cfi_probe(struct map_info *map);
-/* check for QRY, or search for jedec id.
+#ifdef CONFIG_MTD_XIP
+
+/* only needed for short periods, so this is rather simple */
+#define xip_disable() local_irq_disable()
+
+#define xip_allowed(base, map) \
+do { \
+ (void) map_read(map, base); \
+ asm volatile (".rep 8; nop; .endr"); \
+ local_irq_enable(); \
+} while (0)
+
+#define xip_enable(base, map, cfi) \
+do { \
+ cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL); \
+ cfi_send_gen_cmd(0xFF, 0, base, map, cfi, cfi->device_type, NULL); \
+ xip_allowed(base, map); \
+} while (0)
+
+#define xip_disable_qry(base, map, cfi) \
+do { \
+ xip_disable(); \
+ cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL); \
+ cfi_send_gen_cmd(0xFF, 0, base, map, cfi, cfi->device_type, NULL); \
+ cfi_send_gen_cmd(0x98, 0x55, base, map, cfi, cfi->device_type, NULL); \
+} while (0)
+
+#else
+
+#define xip_disable() do { } while (0)
+#define xip_allowed(base, map) do { } while (0)
+#define xip_enable(base, map, cfi) do { } while (0)
+#define xip_disable_qry(base, map, cfi) do { } while (0)
+
+#endif
+
+/* check for QRY.
in: interleave,type,mode
ret: table index, <0 for error
*/
-static inline int qry_present(struct map_info *map, __u32 base,
+static int __xipram qry_present(struct map_info *map, __u32 base,
struct cfi_private *cfi)
{
int osf = cfi->interleave * cfi->device_type; // scale factor
+ map_word val[3];
+ map_word qry[3];
- if (cfi_read(map,base+osf*0x10)==cfi_build_cmd('Q',map,cfi) &&
- cfi_read(map,base+osf*0x11)==cfi_build_cmd('R',map,cfi) &&
- cfi_read(map,base+osf*0x12)==cfi_build_cmd('Y',map,cfi))
- return 1; // ok !
-
- return 0; // nothing found
-}
+ qry[0] = cfi_build_cmd('Q', map, cfi);
+ qry[1] = cfi_build_cmd('R', map, cfi);
+ qry[2] = cfi_build_cmd('Y', map, cfi);
+ val[0] = map_read(map, base + osf*0x10);
+ val[1] = map_read(map, base + osf*0x11);
+ val[2] = map_read(map, base + osf*0x12);
-static int cfi_probe_chip(struct map_info *map, __u32 base,
- struct flchip *chips, struct cfi_private *cfi)
+ if (!map_word_equal(map, qry[0], val[0]))
+ return 0;
+
+ if (!map_word_equal(map, qry[1], val[1]))
+ return 0;
+
+ if (!map_word_equal(map, qry[2], val[2]))
+ return 0;
+
+ return 1; // "QRY" found
+}
+
+static int __xipram cfi_probe_chip(struct map_info *map, __u32 base,
+ unsigned long *chip_map, struct cfi_private *cfi)
{
int i;
-
+
+ if ((base + 0) >= map->size) {
+ printk(KERN_NOTICE
+ "Probe at base[0x00](0x%08lx) past the end of the map(0x%08lx)\n",
+ (unsigned long)base, map->size -1);
+ return 0;
+ }
+ if ((base + 0xff) >= map->size) {
+ printk(KERN_NOTICE
+ "Probe at base[0x55](0x%08lx) past the end of the map(0x%08lx)\n",
+ (unsigned long)base + 0x55, map->size -1);
+ return 0;
+ }
+
+ xip_disable();
cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL);
+ cfi_send_gen_cmd(0xFF, 0, base, map, cfi, cfi->device_type, NULL);
cfi_send_gen_cmd(0x98, 0x55, base, map, cfi, cfi->device_type, NULL);
- if (!qry_present(map,base,cfi))
+ if (!qry_present(map,base,cfi)) {
+ xip_enable(base, map, cfi);
return 0;
+ }
if (!cfi->numchips) {
- /* This is the first time we're called. Set up the CFI
+ /* This is the first time we're called. Set up the CFI
stuff accordingly and return */
return cfi_chip_setup(map, cfi);
}
/* Check each previous chip to see if it's an alias */
- for (i=0; i<cfi->numchips; i++) {
+ for (i=0; i < (base >> cfi->chipshift); i++) {
+ unsigned long start;
+ if(!test_bit(i, chip_map)) {
+ /* Skip location; no valid chip at this address */
+ continue;
+ }
+ start = i << cfi->chipshift;
/* This chip should be in read mode if it's one
we've already touched. */
- if (qry_present(map,chips[i].start,cfi)) {
- /* Eep. This chip also had the QRY marker.
+ if (qry_present(map, start, cfi)) {
+ /* Eep. This chip also had the QRY marker.
* Is it an alias for the new one? */
- cfi_send_gen_cmd(0xF0, 0, chips[i].start, map, cfi, cfi->device_type, NULL);
+ cfi_send_gen_cmd(0xF0, 0, start, map, cfi, cfi->device_type, NULL);
+ cfi_send_gen_cmd(0xFF, 0, start, map, cfi, cfi->device_type, NULL);
/* If the QRY marker goes away, it's an alias */
- if (!qry_present(map, chips[i].start, cfi)) {
+ if (!qry_present(map, start, cfi)) {
+ xip_allowed(base, map);
printk(KERN_DEBUG "%s: Found an alias at 0x%x for the chip at 0x%lx\n",
- map->name, base, chips[i].start);
+ map->name, base, start);
return 0;
}
- /* Yes, it's actually got QRY for data. Most
+ /* Yes, it's actually got QRY for data. Most
* unfortunate. Stick the new chip in read mode
* too and if it's the same, assume it's an alias. */
/* FIXME: Use other modes to do a proper check */
cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL);
-
+ cfi_send_gen_cmd(0xFF, 0, start, map, cfi, cfi->device_type, NULL);
+
if (qry_present(map, base, cfi)) {
+ xip_allowed(base, map);
printk(KERN_DEBUG "%s: Found an alias at 0x%x for the chip at 0x%lx\n",
- map->name, base, chips[i].start);
+ map->name, base, start);
return 0;
}
}
}
-
+
/* OK, if we got to here, then none of the previous chips appear to
be aliases for the current one. */
- if (cfi->numchips == MAX_CFI_CHIPS) {
- printk(KERN_WARNING"%s: Too many flash chips detected. Increase MAX_CFI_CHIPS from %d.\n", map->name, MAX_CFI_CHIPS);
- /* Doesn't matter about resetting it to Read Mode - we're not going to talk to it anyway */
- return -1;
- }
- chips[cfi->numchips].start = base;
- chips[cfi->numchips].state = FL_READY;
+ set_bit((base >> cfi->chipshift), chip_map); /* Update chip map */
cfi->numchips++;
-
+
/* Put it back into Read Mode */
cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL);
+ cfi_send_gen_cmd(0xFF, 0, base, map, cfi, cfi->device_type, NULL);
+ xip_allowed(base, map);
- printk(KERN_INFO "%s: Found %d x%d devices at 0x%x in %d-bit mode\n",
+ printk(KERN_INFO "%s: Found %d x%d devices at 0x%x in %d-bit bank\n",
map->name, cfi->interleave, cfi->device_type*8, base,
- map->buswidth*8);
-
+ map->bankwidth*8);
+
return 1;
}
-static int cfi_chip_setup(struct map_info *map,
- struct cfi_private *cfi)
+static int __xipram cfi_chip_setup(struct map_info *map,
+ struct cfi_private *cfi)
{
int ofs_factor = cfi->interleave*cfi->device_type;
__u32 base = 0;
int num_erase_regions = cfi_read_query(map, base + (0x10 + 28)*ofs_factor);
int i;
+ xip_enable(base, map, cfi);
#ifdef DEBUG_CFI
printk("Number of erase regions: %d\n", num_erase_regions);
#endif
@@ -137,20 +209,39 @@ static int cfi_chip_setup(struct map_info *map,
printk(KERN_WARNING "%s: kmalloc failed for CFI ident structure\n", map->name);
return 0;
}
-
- memset(cfi->cfiq,0,sizeof(struct cfi_ident));
-
- cfi->cfi_mode = 1;
- cfi->fast_prog=1; /* CFI supports fast programming */
-
+
+ memset(cfi->cfiq,0,sizeof(struct cfi_ident));
+
+ cfi->cfi_mode = CFI_MODE_CFI;
+
/* Read the CFI info structure */
- for (i=0; i<(sizeof(struct cfi_ident) + num_erase_regions * 4); i++) {
+ xip_disable_qry(base, map, cfi);
+ for (i=0; i<(sizeof(struct cfi_ident) + num_erase_regions * 4); i++)
((unsigned char *)cfi->cfiq)[i] = cfi_read_query(map,base + (0x10 + i)*ofs_factor);
- }
-
+
+ /* Note we put the device back into Read Mode BEFORE going into Auto
+ * Select Mode, as some devices support nesting of modes, others
+ * don't. This way should always work.
+ * On cmdset 0001 the writes of 0xaa and 0x55 are not needed, and
+ * so should be treated as nops or illegal (and so put the device
+ * back into Read Mode, which is a nop in this case).
+ */
+ cfi_send_gen_cmd(0xf0, 0, base, map, cfi, cfi->device_type, NULL);
+ cfi_send_gen_cmd(0xaa, 0x555, base, map, cfi, cfi->device_type, NULL);
+ cfi_send_gen_cmd(0x55, 0x2aa, base, map, cfi, cfi->device_type, NULL);
+ cfi_send_gen_cmd(0x90, 0x555, base, map, cfi, cfi->device_type, NULL);
+ cfi->mfr = cfi_read_query16(map, base);
+ cfi->id = cfi_read_query16(map, base + ofs_factor);
+
+ /* Put it back into Read Mode */
+ cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL);
+ /* ... even if it's an Intel chip */
+ cfi_send_gen_cmd(0xFF, 0, base, map, cfi, cfi->device_type, NULL);
+ xip_allowed(base, map);
+
/* Do any necessary byteswapping */
cfi->cfiq->P_ID = le16_to_cpu(cfi->cfiq->P_ID);
-
+
cfi->cfiq->P_ADR = le16_to_cpu(cfi->cfiq->P_ADR);
cfi->cfiq->A_ID = le16_to_cpu(cfi->cfiq->A_ID);
cfi->cfiq->A_ADR = le16_to_cpu(cfi->cfiq->A_ADR);
@@ -164,47 +255,64 @@ static int cfi_chip_setup(struct map_info *map,
for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
cfi->cfiq->EraseRegionInfo[i] = le32_to_cpu(cfi->cfiq->EraseRegionInfo[i]);
-
-#ifdef DEBUG_CFI
+
+#ifdef DEBUG_CFI
printk(" Erase Region #%d: BlockSize 0x%4.4X bytes, %d blocks\n",
- i, (cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff,
+ i, (cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff,
(cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1);
#endif
}
- /* Put it back into Read Mode */
- cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL);
+
+ printk(KERN_INFO "%s: Found %d x%d devices at 0x%x in %d-bit bank\n",
+ map->name, cfi->interleave, cfi->device_type*8, base,
+ map->bankwidth*8);
return 1;
}
#ifdef DEBUG_CFI
-static char *vendorname(__u16 vendor)
+static char *vendorname(__u16 vendor)
{
switch (vendor) {
case P_ID_NONE:
return "None";
-
+
case P_ID_INTEL_EXT:
return "Intel/Sharp Extended";
-
+
case P_ID_AMD_STD:
return "AMD/Fujitsu Standard";
-
+
case P_ID_INTEL_STD:
return "Intel/Sharp Standard";
-
+
case P_ID_AMD_EXT:
return "AMD/Fujitsu Extended";
-
+
+ case P_ID_WINBOND:
+ return "Winbond Standard";
+
+ case P_ID_ST_ADV:
+ return "ST Advanced";
+
case P_ID_MITSUBISHI_STD:
return "Mitsubishi Standard";
-
+
case P_ID_MITSUBISHI_EXT:
return "Mitsubishi Extended";
-
+
+ case P_ID_SST_PAGE:
+ return "SST Page Write";
+
+ case P_ID_INTEL_PERFORMANCE:
+ return "Intel Performance Code";
+
+ case P_ID_INTEL_DATA:
+ return "Intel Data";
+
case P_ID_RESERVED:
return "Not Allowed / Reserved for Future Use";
-
+
default:
return "Unknown";
}
@@ -217,86 +325,90 @@ static void print_cfi_ident(struct cfi_ident *cfip)
if (cfip->qry[0] != 'Q' || cfip->qry[1] != 'R' || cfip->qry[2] != 'Y') {
printk("Invalid CFI ident structure.\n");
return;
- }
-#endif
+ }
+#endif
printk("Primary Vendor Command Set: %4.4X (%s)\n", cfip->P_ID, vendorname(cfip->P_ID));
if (cfip->P_ADR)
printk("Primary Algorithm Table at %4.4X\n", cfip->P_ADR);
else
printk("No Primary Algorithm Table\n");
-
+
printk("Alternative Vendor Command Set: %4.4X (%s)\n", cfip->A_ID, vendorname(cfip->A_ID));
if (cfip->A_ADR)
printk("Alternate Algorithm Table at %4.4X\n", cfip->A_ADR);
else
printk("No Alternate Algorithm Table\n");
-
-
- printk("Vcc Minimum: %x.%x V\n", cfip->VccMin >> 4, cfip->VccMin & 0xf);
- printk("Vcc Maximum: %x.%x V\n", cfip->VccMax >> 4, cfip->VccMax & 0xf);
+
+
+ printk("Vcc Minimum: %2d.%d V\n", cfip->VccMin >> 4, cfip->VccMin & 0xf);
+ printk("Vcc Maximum: %2d.%d V\n", cfip->VccMax >> 4, cfip->VccMax & 0xf);
if (cfip->VppMin) {
- printk("Vpp Minimum: %x.%x V\n", cfip->VppMin >> 4, cfip->VppMin & 0xf);
- printk("Vpp Maximum: %x.%x V\n", cfip->VppMax >> 4, cfip->VppMax & 0xf);
+ printk("Vpp Minimum: %2d.%d V\n", cfip->VppMin >> 4, cfip->VppMin & 0xf);
+ printk("Vpp Maximum: %2d.%d V\n", cfip->VppMax >> 4, cfip->VppMax & 0xf);
}
else
printk("No Vpp line\n");
-
+
printk("Typical byte/word write timeout: %d µs\n", 1<<cfip->WordWriteTimeoutTyp);
printk("Maximum byte/word write timeout: %d µs\n", (1<<cfip->WordWriteTimeoutMax) * (1<<cfip->WordWriteTimeoutTyp));
-
+
if (cfip->BufWriteTimeoutTyp || cfip->BufWriteTimeoutMax) {
printk("Typical full buffer write timeout: %d µs\n", 1<<cfip->BufWriteTimeoutTyp);
printk("Maximum full buffer write timeout: %d µs\n", (1<<cfip->BufWriteTimeoutMax) * (1<<cfip->BufWriteTimeoutTyp));
}
else
printk("Full buffer write not supported\n");
-
- printk("Typical block erase timeout: %d µs\n", 1<<cfip->BlockEraseTimeoutTyp);
- printk("Maximum block erase timeout: %d µs\n", (1<<cfip->BlockEraseTimeoutMax) * (1<<cfip->BlockEraseTimeoutTyp));
+
+ printk("Typical block erase timeout: %d ms\n", 1<<cfip->BlockEraseTimeoutTyp);
+ printk("Maximum block erase timeout: %d ms\n", (1<<cfip->BlockEraseTimeoutMax) * (1<<cfip->BlockEraseTimeoutTyp));
if (cfip->ChipEraseTimeoutTyp || cfip->ChipEraseTimeoutMax) {
- printk("Typical chip erase timeout: %d µs\n", 1<<cfip->ChipEraseTimeoutTyp);
- printk("Maximum chip erase timeout: %d µs\n", (1<<cfip->ChipEraseTimeoutMax) * (1<<cfip->ChipEraseTimeoutTyp));
+ printk("Typical chip erase timeout: %d ms\n", 1<<cfip->ChipEraseTimeoutTyp);
+ printk("Maximum chip erase timeout: %d ms\n", (1<<cfip->ChipEraseTimeoutMax) * (1<<cfip->ChipEraseTimeoutTyp));
}
else
printk("Chip erase not supported\n");
-
+
printk("Device size: 0x%X bytes (%d MiB)\n", 1 << cfip->DevSize, 1<< (cfip->DevSize - 20));
printk("Flash Device Interface description: 0x%4.4X\n", cfip->InterfaceDesc);
switch(cfip->InterfaceDesc) {
case 0:
printk(" - x8-only asynchronous interface\n");
break;
-
+
case 1:
printk(" - x16-only asynchronous interface\n");
break;
-
+
case 2:
printk(" - supports x8 and x16 via BYTE# with asynchronous interface\n");
break;
-
+
case 3:
printk(" - x32-only asynchronous interface\n");
break;
-
+
+ case 4:
+ printk(" - supports x16 and x32 via Word# with asynchronous interface\n");
+ break;
+
case 65535:
printk(" - Not Allowed / Reserved\n");
break;
-
+
default:
printk(" - Unknown\n");
break;
}
-
+
printk("Max. bytes in buffer write: 0x%x\n", 1<< cfip->MaxBufWriteSize);
printk("Number of Erase Block Regions: %d\n", cfip->NumEraseRegions);
-
+
}
#endif /* DEBUG_CFI */
static struct chip_probe cfi_chip_probe = {
- name: "CFI",
- probe_chip: cfi_probe_chip
+ .name = "CFI",
+ .probe_chip = cfi_probe_chip
};
struct mtd_info *cfi_probe(struct map_info *map)
@@ -309,12 +421,12 @@ struct mtd_info *cfi_probe(struct map_info *map)
}
static struct mtd_chip_driver cfi_chipdrv = {
- probe: cfi_probe,
- name: "cfi_probe",
- module: THIS_MODULE
+ .probe = cfi_probe,
+ .name = "cfi_probe",
+ .module = THIS_MODULE
};
-int __init cfi_probe_init(void)
+static int __init cfi_probe_init(void)
{
register_mtd_chip_driver(&cfi_chipdrv);
return 0;
diff --git a/linux-2.4.x/drivers/mtd/chips/cfi_util.c b/linux-2.4.x/drivers/mtd/chips/cfi_util.c
new file mode 100644
index 0000000..d8e7a02
--- /dev/null
+++ b/linux-2.4.x/drivers/mtd/chips/cfi_util.c
@@ -0,0 +1,187 @@
+/*
+ * Common Flash Interface support:
+ * Generic utility functions not dependant on command set
+ *
+ * Copyright (C) 2002 Red Hat
+ * Copyright (C) 2003 STMicroelectronics Limited
+ *
+ * This code is covered by the GPL.
+ *
+ * $Id: cfi_util.c,v 1.10 2005/11/07 11:14:23 gleixner Exp $
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <asm/io.h>
+#include <asm/byteorder.h>
+
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/mtd/xip.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/cfi.h>
+#include <linux/mtd/compatmac.h>
+
+struct cfi_extquery *
+__xipram cfi_read_pri(struct map_info *map, __u16 adr, __u16 size, const char* name)
+{
+ struct cfi_private *cfi = map->fldrv_priv;
+ __u32 base = 0; // cfi->chips[0].start;
+ int ofs_factor = cfi->interleave * cfi->device_type;
+ int i;
+ struct cfi_extquery *extp = NULL;
+
+ printk(" %s Extended Query Table at 0x%4.4X\n", name, adr);
+ if (!adr)
+ goto out;
+
+ extp = kmalloc(size, GFP_KERNEL);
+ if (!extp) {
+ printk(KERN_ERR "Failed to allocate memory\n");
+ goto out;
+ }
+
+#ifdef CONFIG_MTD_XIP
+ local_irq_disable();
+#endif
+
+ /* Switch it into Query Mode */
+ cfi_send_gen_cmd(0x98, 0x55, base, map, cfi, cfi->device_type, NULL);
+
+ /* Read in the Extended Query Table */
+ for (i=0; i<size; i++) {
+ ((unsigned char *)extp)[i] =
+ cfi_read_query(map, base+((adr+i)*ofs_factor));
+ }
+
+ /* Make sure it returns to read mode */
+ cfi_send_gen_cmd(0xf0, 0, base, map, cfi, cfi->device_type, NULL);
+ cfi_send_gen_cmd(0xff, 0, base, map, cfi, cfi->device_type, NULL);
+
+#ifdef CONFIG_MTD_XIP
+ (void) map_read(map, base);
+ asm volatile (".rep 8; nop; .endr");
+ local_irq_enable();
+#endif
+
+ out: return extp;
+}
+
+EXPORT_SYMBOL(cfi_read_pri);
+
+void cfi_fixup(struct mtd_info *mtd, struct cfi_fixup *fixups)
+{
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+ struct cfi_fixup *f;
+
+ for (f=fixups; f->fixup; f++) {
+ if (((f->mfr == CFI_MFR_ANY) || (f->mfr == cfi->mfr)) &&
+ ((f->id == CFI_ID_ANY) || (f->id == cfi->id))) {
+ f->fixup(mtd, f->param);
+ }
+ }
+}
+
+EXPORT_SYMBOL(cfi_fixup);
+
+int cfi_varsize_frob(struct mtd_info *mtd, varsize_frob_t frob,
+ loff_t ofs, size_t len, void *thunk)
+{
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+ unsigned long adr;
+ int chipnum, ret = 0;
+ int i, first;
+ struct mtd_erase_region_info *regions = mtd->eraseregions;
+
+ if (ofs > mtd->size)
+ return -EINVAL;
+
+ if ((len + ofs) > mtd->size)
+ return -EINVAL;
+
+ /* Check that both start and end of the requested erase are
+ * aligned with the erasesize at the appropriate addresses.
+ */
+
+ i = 0;
+
+ /* Skip all erase regions which are ended before the start of
+ the requested erase. Actually, to save on the calculations,
+ we skip to the first erase region which starts after the
+ start of the requested erase, and then go back one.
+ */
+
+ while (i < mtd->numeraseregions && ofs >= regions[i].offset)
+ i++;
+ i--;
+
+ /* OK, now i is pointing at the erase region in which this
+ erase request starts. Check the start of the requested
+ erase range is aligned with the erase size which is in
+ effect here.
+ */
+
+ if (ofs & (regions[i].erasesize-1))
+ return -EINVAL;
+
+ /* Remember the erase region we start on */
+ first = i;
+
+ /* Next, check that the end of the requested erase is aligned
+ * with the erase region at that address.
+ */
+
+ while (i<mtd->numeraseregions && (ofs + len) >= regions[i].offset)
+ i++;
+
+ /* As before, drop back one to point at the region in which
+ the address actually falls
+ */
+ i--;
+
+ if ((ofs + len) & (regions[i].erasesize-1))
+ return -EINVAL;
+
+ chipnum = ofs >> cfi->chipshift;
+ adr = ofs - (chipnum << cfi->chipshift);
+
+ i=first;
+
+ while(len) {
+ int size = regions[i].erasesize;
+
+ ret = (*frob)(map, &cfi->chips[chipnum], adr, size, thunk);
+
+ if (ret)
+ return ret;
+
+ adr += size;
+ ofs += size;
+ len -= size;
+
+ if (ofs == regions[i].offset + size * regions[i].numblocks)
+ i++;
+
+ if (adr >> cfi->chipshift) {
+ adr = 0;
+ chipnum++;
+
+ if (chipnum >= cfi->numchips)
+ break;
+ }
+ }
+
+ return 0;
+}
+
+EXPORT_SYMBOL(cfi_varsize_frob);
+
+MODULE_LICENSE("GPL");
diff --git a/linux-2.4.x/drivers/mtd/chips/chipreg.c b/linux-2.4.x/drivers/mtd/chips/chipreg.c
index da5512c..51c4952 100644
--- a/linux-2.4.x/drivers/mtd/chips/chipreg.c
+++ b/linux-2.4.x/drivers/mtd/chips/chipreg.c
@@ -1,5 +1,5 @@
/*
- * $Id: chipreg.c,v 1.12 2001/10/02 15:29:53 dwmw2 Exp $
+ * $Id: chipreg.c,v 1.19 2005/11/07 11:14:23 gleixner Exp $
*
* Registration for chip drivers
*
@@ -7,12 +7,15 @@
#include <linux/kernel.h>
#include <linux/config.h>
+#include <linux/module.h>
#include <linux/kmod.h>
#include <linux/spinlock.h>
-#include <linux/mtd/compatmac.h>
+#include <linux/slab.h>
#include <linux/mtd/map.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/compatmac.h>
-spinlock_t chip_drvs_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(chip_drvs_lock);
static LIST_HEAD(chip_drvs_list);
void register_mtd_chip_driver(struct mtd_chip_driver *drv)
@@ -29,7 +32,7 @@ void unregister_mtd_chip_driver(struct mtd_chip_driver *drv)
spin_unlock(&chip_drvs_lock);
}
-static struct mtd_chip_driver *get_mtd_chip_driver (char *name)
+static struct mtd_chip_driver *get_mtd_chip_driver (const char *name)
{
struct list_head *pos;
struct mtd_chip_driver *ret = NULL, *this;
@@ -38,16 +41,14 @@ static struct mtd_chip_driver *get_mtd_chip_driver (char *name)
list_for_each(pos, &chip_drvs_list) {
this = list_entry(pos, typeof(*this), list);
-
+
if (!strcmp(this->name, name)) {
ret = this;
break;
}
}
- if (ret && !try_inc_mod_count(ret->module)) {
- /* Eep. Failed. */
+ if (ret && !try_module_get(ret->module))
ret = NULL;
- }
spin_unlock(&chip_drvs_lock);
@@ -57,39 +58,53 @@ static struct mtd_chip_driver *get_mtd_chip_driver (char *name)
/* Hide all the horrid details, like some silly person taking
get_module_symbol() away from us, from the caller. */
-struct mtd_info *do_map_probe(char *name, struct map_info *map)
+struct mtd_info *do_map_probe(const char *name, struct map_info *map)
{
struct mtd_chip_driver *drv;
struct mtd_info *ret;
drv = get_mtd_chip_driver(name);
- if (!drv && !request_module(name))
+ if (!drv && !request_module("%s", name))
drv = get_mtd_chip_driver(name);
if (!drv)
return NULL;
ret = drv->probe(map);
-#ifdef CONFIG_MODULES
- /* We decrease the use count here. It may have been a
+
+ /* We decrease the use count here. It may have been a
probe-only module, which is no longer required from this
point, having given us a handle on (and increased the use
count of) the actual driver code.
*/
- if(drv->module)
- __MOD_DEC_USE_COUNT(drv->module);
-#endif
+ module_put(drv->module);
if (ret)
return ret;
-
+
return NULL;
}
+/*
+ * Destroy an MTD device which was created for a map device.
+ * Make sure the MTD device is already unregistered before calling this
+ */
+void map_destroy(struct mtd_info *mtd)
+{
+ struct map_info *map = mtd->priv;
+
+ if (map->fldrv->destroy)
+ map->fldrv->destroy(mtd);
+
+ module_put(map->fldrv->module);
+
+ kfree(mtd);
+}
EXPORT_SYMBOL(register_mtd_chip_driver);
EXPORT_SYMBOL(unregister_mtd_chip_driver);
EXPORT_SYMBOL(do_map_probe);
+EXPORT_SYMBOL(map_destroy);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
diff --git a/linux-2.4.x/drivers/mtd/chips/fwh_lock.h b/linux-2.4.x/drivers/mtd/chips/fwh_lock.h
new file mode 100644
index 0000000..77303ce
--- /dev/null
+++ b/linux-2.4.x/drivers/mtd/chips/fwh_lock.h
@@ -0,0 +1,107 @@
+#ifndef FWH_LOCK_H
+#define FWH_LOCK_H
+
+
+enum fwh_lock_state {
+ FWH_UNLOCKED = 0,
+ FWH_DENY_WRITE = 1,
+ FWH_IMMUTABLE = 2,
+ FWH_DENY_READ = 4,
+};
+
+struct fwh_xxlock_thunk {
+ enum fwh_lock_state val;
+ flstate_t state;
+};
+
+
+#define FWH_XXLOCK_ONEBLOCK_LOCK ((struct fwh_xxlock_thunk){ FWH_DENY_WRITE, FL_LOCKING})
+#define FWH_XXLOCK_ONEBLOCK_UNLOCK ((struct fwh_xxlock_thunk){ FWH_UNLOCKED, FL_UNLOCKING})
+
+/*
+ * This locking/unlock is specific to firmware hub parts. Only one
+ * is known that supports the Intel command set. Firmware
+ * hub parts cannot be interleaved as they are on the LPC bus
+ * so this code has not been tested with interleaved chips,
+ * and will likely fail in that context.
+ */
+static int fwh_xxlock_oneblock(struct map_info *map, struct flchip *chip,
+ unsigned long adr, int len, void *thunk)
+{
+ struct cfi_private *cfi = map->fldrv_priv;
+ struct fwh_xxlock_thunk *xxlt = (struct fwh_xxlock_thunk *)thunk;
+ int ret;
+
+ /* Refuse the operation if the we cannot look behind the chip */
+ if (chip->start < 0x400000) {
+ DEBUG( MTD_DEBUG_LEVEL3,
+ "MTD %s(): chip->start: %lx wanted >= 0x400000\n",
+ __func__, chip->start );
+ return -EIO;
+ }
+ /*
+ * lock block registers:
+ * - on 64k boundariesand
+ * - bit 1 set high
+ * - block lock registers are 4MiB lower - overflow subtract (danger)
+ *
+ * The address manipulation is first done on the logical address
+ * which is 0 at the start of the chip, and then the offset of
+ * the individual chip is addted to it. Any other order a weird
+ * map offset could cause problems.
+ */
+ adr = (adr & ~0xffffUL) | 0x2;
+ adr += chip->start - 0x400000;
+
+ /*
+ * This is easy because these are writes to registers and not writes
+ * to flash memory - that means that we don't have to check status
+ * and timeout.
+ */
+ spin_lock(chip->mutex);
+ ret = get_chip(map, chip, adr, FL_LOCKING);
+ if (ret) {
+ spin_unlock(chip->mutex);
+ return ret;
+ }
+
+ chip->state = xxlt->state;
+ map_write(map, CMD(xxlt->val), adr);
+
+ /* Done and happy. */
+ chip->state = FL_READY;
+ put_chip(map, chip, adr);
+ spin_unlock(chip->mutex);
+ return 0;
+}
+
+
+static int fwh_lock_varsize(struct mtd_info *mtd, loff_t ofs, size_t len)
+{
+ int ret;
+
+ ret = cfi_varsize_frob(mtd, fwh_xxlock_oneblock, ofs, len,
+ (void *)&FWH_XXLOCK_ONEBLOCK_LOCK);
+
+ return ret;
+}
+
+
+static int fwh_unlock_varsize(struct mtd_info *mtd, loff_t ofs, size_t len)
+{
+ int ret;
+
+ ret = cfi_varsize_frob(mtd, fwh_xxlock_oneblock, ofs, len,
+ (void *)&FWH_XXLOCK_ONEBLOCK_UNLOCK);
+
+ return ret;
+}
+
+static void fixup_use_fwh_lock(struct mtd_info *mtd, void *param)
+{
+ printk(KERN_NOTICE "using fwh lock/unlock method\n");
+ /* Setup for the chips with the fwh lock method */
+ mtd->lock = fwh_lock_varsize;
+ mtd->unlock = fwh_unlock_varsize;
+}
+#endif /* FWH_LOCK_H */
diff --git a/linux-2.4.x/drivers/mtd/chips/gen_probe.c b/linux-2.4.x/drivers/mtd/chips/gen_probe.c
index c6e815e..41bd59d 100644
--- a/linux-2.4.x/drivers/mtd/chips/gen_probe.c
+++ b/linux-2.4.x/drivers/mtd/chips/gen_probe.c
@@ -1,11 +1,13 @@
/*
* Routines common to all CFI-type probes.
- * (C) 2001, 2001 Red Hat, Inc.
+ * (C) 2001-2003 Red Hat, Inc.
* GPL'd
- * $Id: gen_probe.c,v 1.5 2001/10/02 15:05:12 dwmw2 Exp $
+ * $Id: gen_probe.c,v 1.24 2005/11/07 11:14:23 gleixner Exp $
*/
#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/module.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/map.h>
#include <linux/mtd/cfi.h>
@@ -24,7 +26,7 @@ struct mtd_info *mtd_do_chip_probe(struct map_info *map, struct chip_probe *cp)
/* First probe the map to see if we have CFI stuff there. */
cfi = genprobe_ident_chips(map, cp);
-
+
if (!cfi)
return NULL;
@@ -34,12 +36,12 @@ struct mtd_info *mtd_do_chip_probe(struct map_info *map, struct chip_probe *cp)
mtd = check_cmd_set(map, 1); /* First the primary cmdset */
if (!mtd)
mtd = check_cmd_set(map, 0); /* Then the secondary */
-
+
if (mtd)
return mtd;
- printk(KERN_WARNING"cfi_probe: No supported Vendor Command Set found\n");
-
+ printk(KERN_WARNING"gen_probe: No supported Vendor Command Set found\n");
+
kfree(cfi->cfiq);
kfree(cfi);
map->fldrv_priv = NULL;
@@ -48,24 +50,24 @@ struct mtd_info *mtd_do_chip_probe(struct map_info *map, struct chip_probe *cp)
EXPORT_SYMBOL(mtd_do_chip_probe);
-struct cfi_private *genprobe_ident_chips(struct map_info *map, struct chip_probe *cp)
+static struct cfi_private *genprobe_ident_chips(struct map_info *map, struct chip_probe *cp)
{
- unsigned long base=0;
struct cfi_private cfi;
struct cfi_private *retcfi;
- struct flchip chip[MAX_CFI_CHIPS];
- int i;
+ unsigned long *chip_map;
+ int i, j, mapsize;
+ int max_chips;
memset(&cfi, 0, sizeof(cfi));
- /* Call the probetype-specific code with all permutations of
+ /* Call the probetype-specific code with all permutations of
interleave and device type, etc. */
if (!genprobe_new_chip(map, cp, &cfi)) {
/* The probe didn't like it */
- printk(KERN_WARNING "%s: Found no %s device at location zero\n",
+ printk(KERN_DEBUG "%s: Found no %s device at location zero\n",
cp->name, map->name);
return NULL;
- }
+ }
#if 0 /* Let the CFI probe routine do this sanity check. The Intel and AMD
probe routines won't ever return a broken CFI structure anyway,
@@ -77,43 +79,50 @@ struct cfi_private *genprobe_ident_chips(struct map_info *map, struct chip_probe
return NULL;
}
#endif
- chip[0].start = 0;
- chip[0].state = FL_READY;
cfi.chipshift = cfi.cfiq->DevSize;
- switch(cfi.interleave) {
-#ifdef CFIDEV_INTERLEAVE_1
- case 1:
- break;
-#endif
-#ifdef CFIDEV_INTERLEAVE_2
- case 2:
+ if (cfi_interleave_is_1(&cfi)) {
+ ;
+ } else if (cfi_interleave_is_2(&cfi)) {
cfi.chipshift++;
- break;
-#endif
-#ifdef CFIDEV_INTERLEAVE_4
- case 4:
- cfi.chipshift+=2;
- break;
-#endif
- default:
+ } else if (cfi_interleave_is_4((&cfi))) {
+ cfi.chipshift += 2;
+ } else if (cfi_interleave_is_8(&cfi)) {
+ cfi.chipshift += 3;
+ } else {
BUG();
}
-
+
cfi.numchips = 1;
/*
+ * Allocate memory for bitmap of valid chips.
+ * Align bitmap storage size to full byte.
+ */
+ max_chips = map->size >> cfi.chipshift;
+ mapsize = (max_chips / 8) + ((max_chips % 8) ? 1 : 0);
+ chip_map = kmalloc(mapsize, GFP_KERNEL);
+ if (!chip_map) {
+ printk(KERN_WARNING "%s: kmalloc failed for CFI chip map\n", map->name);
+ kfree(cfi.cfiq);
+ return NULL;
+ }
+ memset (chip_map, 0, mapsize);
+
+ set_bit(0, chip_map); /* Mark first chip valid */
+
+ /*
* Now probe for other chips, checking sensibly for aliases while
* we're at it. The new_chip probe above should have let the first
* chip in read mode.
*/
- for (base = (1<<cfi.chipshift); base + (1<<cfi.chipshift) <= map->size;
- base += (1<<cfi.chipshift))
- cp->probe_chip(map, base, &chip[0], &cfi);
+ for (i = 1; i < max_chips; i++) {
+ cp->probe_chip(map, i << cfi.chipshift, chip_map, &cfi);
+ }
/*
- * Now allocate the space for the structures we need to return to
+ * Now allocate the space for the structures we need to return to
* our caller, and copy the appropriate data into them.
*/
@@ -122,122 +131,65 @@ struct cfi_private *genprobe_ident_chips(struct map_info *map, struct chip_probe
if (!retcfi) {
printk(KERN_WARNING "%s: kmalloc failed for CFI private structure\n", map->name);
kfree(cfi.cfiq);
+ kfree(chip_map);
return NULL;
}
memcpy(retcfi, &cfi, sizeof(cfi));
- memcpy(&retcfi->chips[0], chip, sizeof(struct flchip) * cfi.numchips);
-
- /* Fix up the stuff that breaks when you move it */
- for (i=0; i< retcfi->numchips; i++) {
- init_waitqueue_head(&retcfi->chips[i].wq);
- spin_lock_init(&retcfi->chips[i]._spinlock);
- retcfi->chips[i].mutex = &retcfi->chips[i]._spinlock;
+ memset(&retcfi->chips[0], 0, sizeof(struct flchip) * cfi.numchips);
+
+ for (i = 0, j = 0; (j < cfi.numchips) && (i < max_chips); i++) {
+ if(test_bit(i, chip_map)) {
+ struct flchip *pchip = &retcfi->chips[j++];
+
+ pchip->start = (i << cfi.chipshift);
+ pchip->state = FL_READY;
+ init_waitqueue_head(&pchip->wq);
+ spin_lock_init(&pchip->_spinlock);
+ pchip->mutex = &pchip->_spinlock;
+ }
}
+ kfree(chip_map);
return retcfi;
}
-
+
static int genprobe_new_chip(struct map_info *map, struct chip_probe *cp,
struct cfi_private *cfi)
{
- switch (map->buswidth) {
-#ifdef CFIDEV_BUSWIDTH_1
- case CFIDEV_BUSWIDTH_1:
- cfi->interleave = CFIDEV_INTERLEAVE_1;
-
- cfi->device_type = CFI_DEVICETYPE_X8;
- if (cp->probe_chip(map, 0, NULL, cfi))
- return 1;
-
- cfi->device_type = CFI_DEVICETYPE_X16;
- if (cp->probe_chip(map, 0, NULL, cfi))
- return 1;
- break;
-#endif /* CFIDEV_BUSWITDH_1 */
-
-#ifdef CFIDEV_BUSWIDTH_2
- case CFIDEV_BUSWIDTH_2:
-#ifdef CFIDEV_INTERLEAVE_1
- cfi->interleave = CFIDEV_INTERLEAVE_1;
-
- cfi->device_type = CFI_DEVICETYPE_X16;
- if (cp->probe_chip(map, 0, NULL, cfi))
- return 1;
-#endif /* CFIDEV_INTERLEAVE_1 */
-#ifdef CFIDEV_INTERLEAVE_2
- cfi->interleave = CFIDEV_INTERLEAVE_2;
-
- cfi->device_type = CFI_DEVICETYPE_X8;
- if (cp->probe_chip(map, 0, NULL, cfi))
- return 1;
-
- cfi->device_type = CFI_DEVICETYPE_X16;
- if (cp->probe_chip(map, 0, NULL, cfi))
- return 1;
-#endif /* CFIDEV_INTERLEAVE_2 */
- break;
-#endif /* CFIDEV_BUSWIDTH_2 */
-
-#ifdef CFIDEV_BUSWIDTH_4
- case CFIDEV_BUSWIDTH_4:
-#if defined(CFIDEV_INTERLEAVE_1) && defined(SOMEONE_ACTUALLY_MAKES_THESE)
- cfi->interleave = CFIDEV_INTERLEAVE_1;
-
- cfi->device_type = CFI_DEVICETYPE_X32;
- if (cp->probe_chip(map, 0, NULL, cfi))
- return 1;
-#endif /* CFIDEV_INTERLEAVE_1 */
-#ifdef CFIDEV_INTERLEAVE_2
- cfi->interleave = CFIDEV_INTERLEAVE_2;
-
-#ifdef SOMEONE_ACTUALLY_MAKES_THESE
- cfi->device_type = CFI_DEVICETYPE_X32;
- if (cp->probe_chip(map, 0, NULL, cfi))
- return 1;
-#endif
- cfi->device_type = CFI_DEVICETYPE_X16;
- if (cp->probe_chip(map, 0, NULL, cfi))
- return 1;
-
- cfi->device_type = CFI_DEVICETYPE_X8;
- if (cp->probe_chip(map, 0, NULL, cfi))
- return 1;
-#endif /* CFIDEV_INTERLEAVE_2 */
-#ifdef CFIDEV_INTERLEAVE_4
- cfi->interleave = CFIDEV_INTERLEAVE_4;
-
-#ifdef SOMEONE_ACTUALLY_MAKES_THESE
- cfi->device_type = CFI_DEVICETYPE_X32;
- if (cp->probe_chip(map, 0, NULL, cfi))
- return 1;
-#endif
- cfi->device_type = CFI_DEVICETYPE_X16;
- if (cp->probe_chip(map, 0, NULL, cfi))
- return 1;
-
- cfi->device_type = CFI_DEVICETYPE_X8;
- if (cp->probe_chip(map, 0, NULL, cfi))
- return 1;
-#endif /* CFIDEV_INTERLEAVE_4 */
- break;
-#endif /* CFIDEV_BUSWIDTH_4 */
-
- default:
- printk(KERN_WARNING "genprobe_new_chip called with unsupported buswidth %d\n", map->buswidth);
- return 0;
+ int min_chips = (map_bankwidth(map)/4?:1); /* At most 4-bytes wide. */
+ int max_chips = map_bankwidth(map); /* And minimum 1 */
+ int nr_chips, type;
+
+ for (nr_chips = max_chips; nr_chips >= min_chips; nr_chips >>= 1) {
+
+ if (!cfi_interleave_supported(nr_chips))
+ continue;
+
+ cfi->interleave = nr_chips;
+
+ /* Minimum device size. Don't look for one 8-bit device
+ in a 16-bit bus, etc. */
+ type = map_bankwidth(map) / nr_chips;
+
+ for (; type <= CFI_DEVICETYPE_X32; type<<=1) {
+ cfi->device_type = type;
+
+ if (cp->probe_chip(map, 0, NULL, cfi))
+ return 1;
+ }
}
return 0;
}
-
typedef struct mtd_info *cfi_cmdset_fn_t(struct map_info *, int);
extern cfi_cmdset_fn_t cfi_cmdset_0001;
extern cfi_cmdset_fn_t cfi_cmdset_0002;
+extern cfi_cmdset_fn_t cfi_cmdset_0020;
-static inline struct mtd_info *cfi_cmdset_unknown(struct map_info *map,
+static inline struct mtd_info *cfi_cmdset_unknown(struct map_info *map,
int primary)
{
struct cfi_private *cfi = map->fldrv_priv;
@@ -247,7 +199,7 @@ static inline struct mtd_info *cfi_cmdset_unknown(struct map_info *map,
cfi_cmdset_fn_t *probe_function;
sprintf(probename, "cfi_cmdset_%4.4X", type);
-
+
probe_function = inter_module_get_request(probename, probename);
if (probe_function) {
@@ -269,7 +221,7 @@ static struct mtd_info *check_cmd_set(struct map_info *map, int primary)
{
struct cfi_private *cfi = map->fldrv_priv;
__u16 type = primary?cfi->cfiq->P_ID:cfi->cfiq->A_ID;
-
+
if (type == P_ID_NONE || type == P_ID_RESERVED)
return NULL;
@@ -283,12 +235,17 @@ static struct mtd_info *check_cmd_set(struct map_info *map, int primary)
#ifdef CONFIG_MTD_CFI_INTELEXT
case 0x0001:
case 0x0003:
+ case 0x0200:
return cfi_cmdset_0001(map, primary);
#endif
#ifdef CONFIG_MTD_CFI_AMDSTD
case 0x0002:
return cfi_cmdset_0002(map, primary);
#endif
+#ifdef CONFIG_MTD_CFI_STAA
+ case 0x0020:
+ return cfi_cmdset_0020(map, primary);
+#endif
}
return cfi_cmdset_unknown(map, primary);
diff --git a/linux-2.4.x/drivers/mtd/chips/jedec.c b/linux-2.4.x/drivers/mtd/chips/jedec.c
index b61b0ce..93efdf0 100644
--- a/linux-2.4.x/drivers/mtd/chips/jedec.c
+++ b/linux-2.4.x/drivers/mtd/chips/jedec.c
@@ -1,6 +1,6 @@
/* JEDEC Flash Interface.
- * This is an older type of interface for self programming flash. It is
+ * This is an older type of interface for self programming flash. It is
* commonly use in older AMD chips and is obsolete compared with CFI.
* It is called JEDEC because the JEDEC association distributes the ID codes
* for the chips.
@@ -11,10 +11,17 @@
* not going to guess how to send commands to them, plus I expect they will
* all speak CFI..
*
- * $Id: jedec.c,v 1.13 2002/02/08 15:57:21 rkaiser Exp $
+ * $Id: jedec.c,v 1.24 2005/11/07 11:14:23 gleixner Exp $
*/
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
#include <linux/mtd/jedec.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/compatmac.h>
static struct mtd_info *jedec_probe(struct map_info *);
static int jedec_probe8(struct map_info *map,unsigned long base,
@@ -33,20 +40,57 @@ static unsigned long my_bank_size;
/* Listing of parts and sizes. We need this table to learn the sector
size of the chip and the total length */
-static const struct JEDECTable JEDEC_table[] =
- {{0x013D,"AMD Am29F017D",2*1024*1024,64*1024,MTD_CAP_NORFLASH},
- {0x01AD,"AMD Am29F016",2*1024*1024,64*1024,MTD_CAP_NORFLASH},
- {0x01D5,"AMD Am29F080",1*1024*1024,64*1024,MTD_CAP_NORFLASH},
- {0x01A4,"AMD Am29F040",512*1024,64*1024,MTD_CAP_NORFLASH},
- {0x20E3,"AMD Am29W040B",512*1024,64*1024,MTD_CAP_NORFLASH},
- {0xC2AD,"Macronix MX29F016",2*1024*1024,64*1024,MTD_CAP_NORFLASH},
- {}};
+static const struct JEDECTable JEDEC_table[] = {
+ {
+ .jedec = 0x013D,
+ .name = "AMD Am29F017D",
+ .size = 2*1024*1024,
+ .sectorsize = 64*1024,
+ .capabilities = MTD_CAP_NORFLASH
+ },
+ {
+ .jedec = 0x01AD,
+ .name = "AMD Am29F016",
+ .size = 2*1024*1024,
+ .sectorsize = 64*1024,
+ .capabilities = MTD_CAP_NORFLASH
+ },
+ {
+ .jedec = 0x01D5,
+ .name = "AMD Am29F080",
+ .size = 1*1024*1024,
+ .sectorsize = 64*1024,
+ .capabilities = MTD_CAP_NORFLASH
+ },
+ {
+ .jedec = 0x01A4,
+ .name = "AMD Am29F040",
+ .size = 512*1024,
+ .sectorsize = 64*1024,
+ .capabilities = MTD_CAP_NORFLASH
+ },
+ {
+ .jedec = 0x20E3,
+ .name = "AMD Am29W040B",
+ .size = 512*1024,
+ .sectorsize = 64*1024,
+ .capabilities = MTD_CAP_NORFLASH
+ },
+ {
+ .jedec = 0xC2AD,
+ .name = "Macronix MX29F016",
+ .size = 2*1024*1024,
+ .sectorsize = 64*1024,
+ .capabilities = MTD_CAP_NORFLASH
+ },
+ { .jedec = 0x0 }
+};
static const struct JEDECTable *jedec_idtoinf(__u8 mfr,__u8 id);
static void jedec_sync(struct mtd_info *mtd) {};
-static int jedec_read(struct mtd_info *mtd, loff_t from, size_t len,
+static int jedec_read(struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, u_char *buf);
-static int jedec_read_banked(struct mtd_info *mtd, loff_t from, size_t len,
+static int jedec_read_banked(struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, u_char *buf);
static struct mtd_info *jedec_probe(struct map_info *map);
@@ -54,9 +98,9 @@ static struct mtd_info *jedec_probe(struct map_info *map);
static struct mtd_chip_driver jedec_chipdrv = {
- probe: jedec_probe,
- name: "jedec",
- module: THIS_MODULE
+ .probe = jedec_probe,
+ .name = "jedec",
+ .module = THIS_MODULE
};
/* Probe entry point */
@@ -78,27 +122,27 @@ static struct mtd_info *jedec_probe(struct map_info *map)
memset(MTD, 0, sizeof(struct mtd_info) + sizeof(struct jedec_private));
priv = (struct jedec_private *)&MTD[1];
-
+
my_bank_size = map->size;
if (map->size/my_bank_size > MAX_JEDEC_CHIPS)
{
printk("mtd: Increase MAX_JEDEC_CHIPS, too many banks.\n");
kfree(MTD);
- return 0;
+ return NULL;
}
-
+
for (Base = 0; Base < map->size; Base += my_bank_size)
{
// Perhaps zero could designate all tests?
if (map->buswidth == 0)
map->buswidth = 1;
-
+
if (map->buswidth == 1){
if (jedec_probe8(map,Base,priv) == 0) {
printk("did recognize jedec chip\n");
kfree(MTD);
- return 0;
+ return NULL;
}
}
if (map->buswidth == 2)
@@ -106,7 +150,7 @@ static struct mtd_info *jedec_probe(struct map_info *map)
if (map->buswidth == 4)
jedec_probe32(map,Base,priv);
}
-
+
// Get the biggest sector size
SectorSize = 0;
for (I = 0; priv->chips[I].jedec != 0 && I < MAX_JEDEC_CHIPS; I++)
@@ -116,7 +160,7 @@ static struct mtd_info *jedec_probe(struct map_info *map)
if (priv->chips[I].sectorsize > SectorSize)
SectorSize = priv->chips[I].sectorsize;
}
-
+
// Quickly ensure that the other sector sizes are factors of the largest
for (I = 0; priv->chips[I].jedec != 0 && I < MAX_JEDEC_CHIPS; I++)
{
@@ -124,40 +168,39 @@ static struct mtd_info *jedec_probe(struct map_info *map)
{
printk("mtd: Failed. Device has incompatible mixed sector sizes\n");
kfree(MTD);
- return 0;
- }
+ return NULL;
+ }
}
-
+
/* Generate a part name that includes the number of different chips and
other configuration information */
count = 1;
- strncpy(Part,map->name,sizeof(Part)-10);
- Part[sizeof(Part)-11] = 0;
+ strlcpy(Part,map->name,sizeof(Part)-10);
strcat(Part," ");
Uniq = 0;
for (I = 0; priv->chips[I].jedec != 0 && I < MAX_JEDEC_CHIPS; I++)
{
const struct JEDECTable *JEDEC;
-
+
if (priv->chips[I+1].jedec == priv->chips[I].jedec)
{
count++;
continue;
}
-
+
// Locate the chip in the jedec table
JEDEC = jedec_idtoinf(priv->chips[I].jedec >> 8,priv->chips[I].jedec);
if (JEDEC == 0)
{
printk("mtd: Internal Error, JEDEC not set\n");
kfree(MTD);
- return 0;
+ return NULL;
}
-
+
if (Uniq != 0)
strcat(Part,",");
Uniq++;
-
+
if (count != 1)
sprintf(Part+strlen(Part),"%x*[%s]",count,JEDEC->name);
else
@@ -165,7 +208,7 @@ static struct mtd_info *jedec_probe(struct map_info *map)
if (strlen(Part) > sizeof(Part)*2/3)
break;
count = 1;
- }
+ }
/* Determine if the chips are organized in a linear fashion, or if there
are empty banks. Note, the last bank does not count here, only the
@@ -179,7 +222,7 @@ static struct mtd_info *jedec_probe(struct map_info *map)
if (!priv->size) {
printk("priv->size is zero\n");
kfree(MTD);
- return 0;
+ return NULL;
}
if (priv->size/my_bank_size) {
if (priv->size/my_bank_size == 1) {
@@ -190,7 +233,7 @@ static struct mtd_info *jedec_probe(struct map_info *map)
{
if (priv->bank_fill[I] != my_bank_size)
priv->is_banked = 1;
-
+
/* This even could be eliminated, but new de-optimized read/write
functions have to be written */
printk("priv->bank_fill[%d] is %lx, priv->bank_fill[0] is %lx\n",I,priv->bank_fill[I],priv->bank_fill[0]);
@@ -198,8 +241,8 @@ static struct mtd_info *jedec_probe(struct map_info *map)
{
printk("mtd: Failed. Cannot handle unsymmetric banking\n");
kfree(MTD);
- return 0;
- }
+ return NULL;
+ }
}
}
}
@@ -207,10 +250,9 @@ static struct mtd_info *jedec_probe(struct map_info *map)
strcat(Part,", banked");
// printk("Part: '%s'\n",Part);
-
+
memset(MTD,0,sizeof(*MTD));
- // strncpy(MTD->name,Part,sizeof(MTD->name));
- // MTD->name[sizeof(MTD->name)-1] = 0;
+ // strlcpy(MTD->name,Part,sizeof(MTD->name));
MTD->name = map->name;
MTD->type = MTD_NORFLASH;
MTD->flags = MTD_CAP_NORFLASH;
@@ -229,7 +271,7 @@ static struct mtd_info *jedec_probe(struct map_info *map)
MTD->priv = map;
map->fldrv_priv = priv;
map->fldrv = &jedec_chipdrv;
- MOD_INC_USE_COUNT;
+ __module_get(THIS_MODULE);
return MTD;
}
@@ -249,7 +291,7 @@ static int checkparity(u_char C)
/* Take an array of JEDEC numbers that represent interleved flash chips
and process them. Check to make sure they are good JEDEC numbers, look
- them up and then add them to the chip list */
+ them up and then add them to the chip list */
static int handle_jedecs(struct map_info *map,__u8 *Mfg,__u8 *Id,unsigned Count,
unsigned long base,struct jedec_private *priv)
{
@@ -264,16 +306,16 @@ static int handle_jedecs(struct map_info *map,__u8 *Mfg,__u8 *Id,unsigned Count,
if (checkparity(Mfg[I]) == 0 || checkparity(Id[I]) == 0)
return 0;
}
-
+
// Finally, just make sure all the chip sizes are the same
JEDEC = jedec_idtoinf(Mfg[0],Id[0]);
-
+
if (JEDEC == 0)
{
printk("mtd: Found JEDEC flash chip, but do not have a table entry for %x:%x\n",Mfg[0],Mfg[1]);
return 0;
}
-
+
Size = JEDEC->size;
SectorSize = JEDEC->sectorsize;
for (I = 0; I != Count; I++)
@@ -289,7 +331,7 @@ static int handle_jedecs(struct map_info *map,__u8 *Mfg,__u8 *Id,unsigned Count,
{
printk("mtd: Failed. Interleved flash does not have matching characteristics\n");
return 0;
- }
+ }
}
// Load the Chips
@@ -303,13 +345,13 @@ static int handle_jedecs(struct map_info *map,__u8 *Mfg,__u8 *Id,unsigned Count,
{
printk("mtd: Device has too many chips. Increase MAX_JEDEC_CHIPS\n");
return 0;
- }
-
+ }
+
// Add them to the table
for (J = 0; J != Count; J++)
{
unsigned long Bank;
-
+
JEDEC = jedec_idtoinf(Mfg[J],Id[J]);
priv->chips[I].jedec = (Mfg[J] << 8) | Id[J];
priv->chips[I].size = JEDEC->size;
@@ -322,17 +364,17 @@ static int handle_jedecs(struct map_info *map,__u8 *Mfg,__u8 *Id,unsigned Count,
// log2 n :|
priv->chips[I].addrshift = 0;
for (Bank = Count; Bank != 1; Bank >>= 1, priv->chips[I].addrshift++);
-
+
// Determine how filled this bank is.
Bank = base & (~(my_bank_size-1));
- if (priv->bank_fill[Bank/my_bank_size] < base +
+ if (priv->bank_fill[Bank/my_bank_size] < base +
(JEDEC->size << priv->chips[I].addrshift) - Bank)
priv->bank_fill[Bank/my_bank_size] = base + (JEDEC->size << priv->chips[I].addrshift) - Bank;
I++;
}
priv->size += priv->chips[I-1].size*Count;
-
+
return priv->chips[I-1].size;
}
@@ -344,15 +386,15 @@ static const struct JEDECTable *jedec_idtoinf(__u8 mfr,__u8 id)
for (I = 0; JEDEC_table[I].jedec != 0; I++)
if (JEDEC_table[I].jedec == Id)
return JEDEC_table + I;
- return 0;
+ return NULL;
}
// Look for flash using an 8 bit bus interface
static int jedec_probe8(struct map_info *map,unsigned long base,
struct jedec_private *priv)
-{
- #define flread(x) map->read8(map,base+x)
- #define flwrite(v,x) map->write8(map,v,base+x)
+{
+ #define flread(x) map_read8(map,base+x)
+ #define flwrite(v,x) map_write8(map,v,base+x)
const unsigned long AutoSel1 = 0xAA;
const unsigned long AutoSel2 = 0x55;
@@ -368,20 +410,20 @@ static int jedec_probe8(struct map_info *map,unsigned long base,
OldVal = flread(base);
for (I = 0; OldVal != flread(base) && I < 10000; I++)
OldVal = flread(base);
-
+
// Reset the chip
- flwrite(Reset,0x555);
-
+ flwrite(Reset,0x555);
+
// Send the sequence
flwrite(AutoSel1,0x555);
flwrite(AutoSel2,0x2AA);
flwrite(AutoSel3,0x555);
-
+
// Get the JEDEC numbers
Mfg[0] = flread(0);
Id[0] = flread(1);
// printk("Mfg is %x, Id is %x\n",Mfg[0],Id[0]);
-
+
Size = handle_jedecs(map,Mfg,Id,1,base,priv);
// printk("handle_jedecs Size is %x\n",(unsigned int)Size);
if (Size == 0)
@@ -389,13 +431,13 @@ static int jedec_probe8(struct map_info *map,unsigned long base,
flwrite(Reset,0x555);
return 0;
}
-
+
// Reset.
flwrite(Reset,0x555);
-
+
return 1;
-
+
#undef flread
#undef flwrite
}
@@ -411,8 +453,8 @@ static int jedec_probe16(struct map_info *map,unsigned long base,
static int jedec_probe32(struct map_info *map,unsigned long base,
struct jedec_private *priv)
{
- #define flread(x) map->read32(map,base+((x)<<2))
- #define flwrite(v,x) map->write32(map,v,base+((x)<<2))
+ #define flread(x) map_read32(map,base+((x)<<2))
+ #define flwrite(v,x) map_write32(map,v,base+((x)<<2))
const unsigned long AutoSel1 = 0xAAAAAAAA;
const unsigned long AutoSel2 = 0x55555555;
@@ -428,17 +470,17 @@ static int jedec_probe32(struct map_info *map,unsigned long base,
OldVal = flread(base);
for (I = 0; OldVal != flread(base) && I < 10000; I++)
OldVal = flread(base);
-
+
// Reset the chip
- flwrite(Reset,0x555);
-
+ flwrite(Reset,0x555);
+
// Send the sequence
flwrite(AutoSel1,0x555);
flwrite(AutoSel2,0x2AA);
flwrite(AutoSel3,0x555);
-
+
// Test #1, JEDEC numbers are readable from 0x??00/0x??01
- if (flread(0) != flread(0x100) ||
+ if (flread(0) != flread(0x100) ||
flread(1) != flread(0x101))
{
flwrite(Reset,0x555);
@@ -452,21 +494,21 @@ static int jedec_probe32(struct map_info *map,unsigned long base,
OldVal = flread(1);
for (I = 0; I != 4; I++)
Id[I] = (OldVal >> (I*8));
-
+
Size = handle_jedecs(map,Mfg,Id,4,base,priv);
if (Size == 0)
{
flwrite(Reset,0x555);
return 0;
}
-
+
/* Check if there is address wrap around within a single bank, if this
returns JEDEC numbers then we assume that it is wrap around. Notice
we call this routine with the JEDEC return still enabled, if two or
more flashes have a truncated address space the probe test will still
work */
- if (base + Size+0x555 < map->size &&
- base + Size+0x555 < (base & (~(my_bank_size-1))) + my_bank_size)
+ if (base + (Size<<2)+0x555 < map->size &&
+ base + (Size<<2)+0x555 < (base & (~(my_bank_size-1))) + my_bank_size)
{
if (flread(base+Size) != flread(base+Size + 0x100) ||
flread(base+Size + 1) != flread(base+Size + 0x101))
@@ -477,31 +519,31 @@ static int jedec_probe32(struct map_info *map,unsigned long base,
// Reset.
flwrite(0xF0F0F0F0,0x555);
-
+
return 1;
-
+
#undef flread
#undef flwrite
}
/* Linear read. */
-static int jedec_read(struct mtd_info *mtd, loff_t from, size_t len,
+static int jedec_read(struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, u_char *buf)
{
- struct map_info *map = (struct map_info *)mtd->priv;
-
- map->copy_from(map, buf, from, len);
+ struct map_info *map = mtd->priv;
+
+ map_copy_from(map, buf, from, len);
*retlen = len;
- return 0;
+ return 0;
}
/* Banked read. Take special care to jump past the holes in the bank
mapping. This version assumes symetry in the holes.. */
-static int jedec_read_banked(struct mtd_info *mtd, loff_t from, size_t len,
+static int jedec_read_banked(struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, u_char *buf)
{
- struct map_info *map = (struct map_info *)mtd->priv;
- struct jedec_private *priv = (struct jedec_private *)map->fldrv_priv;
+ struct map_info *map = mtd->priv;
+ struct jedec_private *priv = map->fldrv_priv;
*retlen = 0;
while (len > 0)
@@ -513,17 +555,17 @@ static int jedec_read_banked(struct mtd_info *mtd, loff_t from, size_t len,
if (priv->bank_fill[0] - offset < len)
get = priv->bank_fill[0] - offset;
- bank /= priv->bank_fill[0];
- map->copy_from(map,buf + *retlen,bank*my_bank_size + offset,get);
-
+ bank /= priv->bank_fill[0];
+ map_copy_from(map,buf + *retlen,bank*my_bank_size + offset,get);
+
len -= get;
*retlen += get;
from += get;
- }
- return 0;
+ }
+ return 0;
}
-/* Pass the flags value that the flash return before it re-entered read
+/* Pass the flags value that the flash return before it re-entered read
mode. */
static void jedec_flash_failed(unsigned char code)
{
@@ -537,23 +579,23 @@ static void jedec_flash_failed(unsigned char code)
printk("mtd: Programming didn't take\n");
}
-/* This uses the erasure function described in the AMD Flash Handbook,
+/* This uses the erasure function described in the AMD Flash Handbook,
it will work for flashes with a fixed sector size only. Flashes with
a selection of sector sizes (ie the AMD Am29F800B) will need a different
- routine. This routine tries to parallize erasing multiple chips/sectors
+ routine. This routine tries to parallize erasing multiple chips/sectors
where possible */
static int flash_erase(struct mtd_info *mtd, struct erase_info *instr)
{
// Does IO to the currently selected chip
- #define flread(x) map->read8(map,chip->base+((x)<<chip->addrshift))
- #define flwrite(v,x) map->write8(map,v,chip->base+((x)<<chip->addrshift))
-
+ #define flread(x) map_read8(map,chip->base+((x)<<chip->addrshift))
+ #define flwrite(v,x) map_write8(map,v,chip->base+((x)<<chip->addrshift))
+
unsigned long Time = 0;
unsigned long NoTime = 0;
unsigned long start = instr->addr, len = instr->len;
unsigned int I;
- struct map_info *map = (struct map_info *)mtd->priv;
- struct jedec_private *priv = (struct jedec_private *)map->fldrv_priv;
+ struct map_info *map = mtd->priv;
+ struct jedec_private *priv = map->fldrv_priv;
// Verify the arguments..
if (start + len > mtd->size ||
@@ -561,7 +603,7 @@ static int flash_erase(struct mtd_info *mtd, struct erase_info *instr)
(len % mtd->erasesize) != 0 ||
(len/mtd->erasesize) == 0)
return -EINVAL;
-
+
jedec_flash_chip_scan(priv,start,len);
// Start the erase sequence on each chip
@@ -569,16 +611,16 @@ static int flash_erase(struct mtd_info *mtd, struct erase_info *instr)
{
unsigned long off;
struct jedec_flash_chip *chip = priv->chips + I;
-
+
if (chip->length == 0)
continue;
-
+
if (chip->start + chip->length > chip->size)
{
printk("DIE\n");
return -EIO;
- }
-
+ }
+
flwrite(0xF0,chip->start + 0x555);
flwrite(0xAA,chip->start + 0x555);
flwrite(0x55,chip->start + 0x2AA);
@@ -586,8 +628,8 @@ static int flash_erase(struct mtd_info *mtd, struct erase_info *instr)
flwrite(0xAA,chip->start + 0x555);
flwrite(0x55,chip->start + 0x2AA);
- /* Once we start selecting the erase sectors the delay between each
- command must not exceed 50us or it will immediately start erasing
+ /* Once we start selecting the erase sectors the delay between each
+ command must not exceed 50us or it will immediately start erasing
and ignore the other sectors */
for (off = 0; off < len; off += chip->sectorsize)
{
@@ -599,19 +641,19 @@ static int flash_erase(struct mtd_info *mtd, struct erase_info *instr)
{
printk("mtd: Ack! We timed out the erase timer!\n");
return -EIO;
- }
+ }
}
- }
+ }
/* We could split this into a timer routine and return early, performing
background erasure.. Maybe later if the need warrents */
/* Poll the flash for erasure completion, specs say this can take as long
- as 480 seconds to do all the sectors (for a 2 meg flash).
- Erasure time is dependant on chip age, temp and wear.. */
-
+ as 480 seconds to do all the sectors (for a 2 meg flash).
+ Erasure time is dependent on chip age, temp and wear.. */
+
/* This being a generic routine assumes a 32 bit bus. It does read32s
- and bundles interleved chips into the same grouping. This will work
+ and bundles interleved chips into the same grouping. This will work
for all bus widths */
Time = 0;
NoTime = 0;
@@ -622,20 +664,20 @@ static int flash_erase(struct mtd_info *mtd, struct erase_info *instr)
unsigned todo[4] = {0,0,0,0};
unsigned todo_left = 0;
unsigned J;
-
+
if (chip->length == 0)
continue;
- /* Find all chips in this data line, realistically this is all
+ /* Find all chips in this data line, realistically this is all
or nothing up to the interleve count */
for (J = 0; priv->chips[J].jedec != 0 && J < MAX_JEDEC_CHIPS; J++)
{
- if ((priv->chips[J].base & (~((1<<chip->addrshift)-1))) ==
+ if ((priv->chips[J].base & (~((1<<chip->addrshift)-1))) ==
(chip->base & (~((1<<chip->addrshift)-1))))
{
todo_left++;
todo[priv->chips[J].base & ((1<<chip->addrshift)-1)] = 1;
- }
+ }
}
/* printk("todo: %x %x %x %x\n",(short)todo[0],(short)todo[1],
@@ -645,25 +687,25 @@ static int flash_erase(struct mtd_info *mtd, struct erase_info *instr)
{
__u32 Last[4];
unsigned long Count = 0;
-
+
/* During erase bit 7 is held low and bit 6 toggles, we watch this,
should it stop toggling or go high then the erase is completed,
or this is not really flash ;> */
switch (map->buswidth) {
case 1:
- Last[0] = map->read8(map,(chip->base >> chip->addrshift) + chip->start + off);
- Last[1] = map->read8(map,(chip->base >> chip->addrshift) + chip->start + off);
- Last[2] = map->read8(map,(chip->base >> chip->addrshift) + chip->start + off);
+ Last[0] = map_read8(map,(chip->base >> chip->addrshift) + chip->start + off);
+ Last[1] = map_read8(map,(chip->base >> chip->addrshift) + chip->start + off);
+ Last[2] = map_read8(map,(chip->base >> chip->addrshift) + chip->start + off);
break;
case 2:
- Last[0] = map->read16(map,(chip->base >> chip->addrshift) + chip->start + off);
- Last[1] = map->read16(map,(chip->base >> chip->addrshift) + chip->start + off);
- Last[2] = map->read16(map,(chip->base >> chip->addrshift) + chip->start + off);
+ Last[0] = map_read16(map,(chip->base >> chip->addrshift) + chip->start + off);
+ Last[1] = map_read16(map,(chip->base >> chip->addrshift) + chip->start + off);
+ Last[2] = map_read16(map,(chip->base >> chip->addrshift) + chip->start + off);
break;
case 3:
- Last[0] = map->read32(map,(chip->base >> chip->addrshift) + chip->start + off);
- Last[1] = map->read32(map,(chip->base >> chip->addrshift) + chip->start + off);
- Last[2] = map->read32(map,(chip->base >> chip->addrshift) + chip->start + off);
+ Last[0] = map_read32(map,(chip->base >> chip->addrshift) + chip->start + off);
+ Last[1] = map_read32(map,(chip->base >> chip->addrshift) + chip->start + off);
+ Last[2] = map_read32(map,(chip->base >> chip->addrshift) + chip->start + off);
break;
}
Count = 3;
@@ -676,40 +718,40 @@ static int flash_erase(struct mtd_info *mtd, struct erase_info *instr)
__u8 Byte3 = (Last[(Count-3)%4] >> (J*8)) & 0xFF;
if (todo[J] == 0)
continue;
-
+
if ((Byte1 & (1 << 7)) == 0 && Byte1 != Byte2)
{
// printk("Check %x %x %x\n",(short)J,(short)Byte1,(short)Byte2);
continue;
}
-
+
if (Byte1 == Byte2)
{
jedec_flash_failed(Byte3);
return -EIO;
}
-
+
todo[J] = 0;
todo_left--;
}
-
+
/* if (NoTime == 0)
Time += HZ/10 - schedule_timeout(HZ/10);*/
NoTime = 0;
switch (map->buswidth) {
case 1:
- Last[Count % 4] = map->read8(map,(chip->base >> chip->addrshift) + chip->start + off);
+ Last[Count % 4] = map_read8(map,(chip->base >> chip->addrshift) + chip->start + off);
break;
case 2:
- Last[Count % 4] = map->read16(map,(chip->base >> chip->addrshift) + chip->start + off);
+ Last[Count % 4] = map_read16(map,(chip->base >> chip->addrshift) + chip->start + off);
break;
case 4:
- Last[Count % 4] = map->read32(map,(chip->base >> chip->addrshift) + chip->start + off);
+ Last[Count % 4] = map_read32(map,(chip->base >> chip->addrshift) + chip->start + off);
break;
}
Count++;
-
+
/* // Count time, max of 15s per sector (according to AMD)
if (Time > 15*len/mtd->erasesize*HZ)
{
@@ -717,67 +759,66 @@ static int flash_erase(struct mtd_info *mtd, struct erase_info *instr)
return -EIO;
} */
}
-
+
// Skip to the next chip if we used chip erase
if (chip->length == chip->size)
off = chip->size;
else
off += chip->sectorsize;
-
+
if (off >= chip->length)
break;
NoTime = 1;
}
-
+
for (J = 0; priv->chips[J].jedec != 0 && J < MAX_JEDEC_CHIPS; J++)
{
if ((priv->chips[J].base & (~((1<<chip->addrshift)-1))) ==
(chip->base & (~((1<<chip->addrshift)-1))))
priv->chips[J].length = 0;
- }
+ }
}
-
+
//printk("done\n");
instr->state = MTD_ERASE_DONE;
- if (instr->callback)
- instr->callback(instr);
+ mtd_erase_callback(instr);
return 0;
-
+
#undef flread
#undef flwrite
}
/* This is the simple flash writing function. It writes to every byte, in
sequence. It takes care of how to properly address the flash if
- the flash is interleved. It can only be used if all the chips in the
+ the flash is interleved. It can only be used if all the chips in the
array are identical!*/
static int flash_write(struct mtd_info *mtd, loff_t start, size_t len,
size_t *retlen, const u_char *buf)
{
/* Does IO to the currently selected chip. It takes the bank addressing
- base (which is divisable by the chip size) adds the necesary lower bits
- of addrshift (interleve index) and then adds the control register index. */
- #define flread(x) map->read8(map,base+(off&((1<<chip->addrshift)-1))+((x)<<chip->addrshift))
- #define flwrite(v,x) map->write8(map,v,base+(off&((1<<chip->addrshift)-1))+((x)<<chip->addrshift))
-
- struct map_info *map = (struct map_info *)mtd->priv;
- struct jedec_private *priv = (struct jedec_private *)map->fldrv_priv;
+ base (which is divisible by the chip size) adds the necessary lower bits
+ of addrshift (interleave index) and then adds the control register index. */
+ #define flread(x) map_read8(map,base+(off&((1<<chip->addrshift)-1))+((x)<<chip->addrshift))
+ #define flwrite(v,x) map_write8(map,v,base+(off&((1<<chip->addrshift)-1))+((x)<<chip->addrshift))
+
+ struct map_info *map = mtd->priv;
+ struct jedec_private *priv = map->fldrv_priv;
unsigned long base;
unsigned long off;
size_t save_len = len;
-
+
if (start + len > mtd->size)
return -EIO;
-
+
//printk("Here");
-
+
//printk("flash_write: start is %x, len is %x\n",start,(unsigned long)len);
while (len != 0)
{
struct jedec_flash_chip *chip = priv->chips;
unsigned long bank;
unsigned long boffset;
-
+
// Compute the base of the flash.
off = ((unsigned long)start) % (chip->size << chip->addrshift);
base = start - off;
@@ -787,14 +828,14 @@ static int flash_write(struct mtd_info *mtd, loff_t start, size_t len,
boffset = base & (priv->bank_fill[0]-1);
bank = (bank/priv->bank_fill[0])*my_bank_size;
base = bank + boffset;
-
+
// printk("Flasing %X %X %X\n",base,chip->size,len);
// printk("off is %x, compare with %x\n",off,chip->size << chip->addrshift);
-
+
// Loop over this page
for (; off != (chip->size << chip->addrshift) && len != 0; start++, len--, off++,buf++)
{
- unsigned char oldbyte = map->read8(map,base+off);
+ unsigned char oldbyte = map_read8(map,base+off);
unsigned char Last[4];
unsigned long Count = 0;
@@ -804,28 +845,28 @@ static int flash_write(struct mtd_info *mtd, loff_t start, size_t len,
}
if (((~oldbyte) & *buf) != 0)
printk("mtd: warn: Trying to set a 0 to a 1\n");
-
+
// Write
flwrite(0xAA,0x555);
flwrite(0x55,0x2AA);
flwrite(0xA0,0x555);
- map->write8(map,*buf,base + off);
- Last[0] = map->read8(map,base + off);
- Last[1] = map->read8(map,base + off);
- Last[2] = map->read8(map,base + off);
-
+ map_write8(map,*buf,base + off);
+ Last[0] = map_read8(map,base + off);
+ Last[1] = map_read8(map,base + off);
+ Last[2] = map_read8(map,base + off);
+
/* Wait for the flash to finish the operation. We store the last 4
status bytes that have been retrieved so we can determine why
- it failed. The toggle bits keep toggling when there is a
+ it failed. The toggle bits keep toggling when there is a
failure */
for (Count = 3; Last[(Count - 1) % 4] != Last[(Count - 2) % 4] &&
Count < 10000; Count++)
- Last[Count % 4] = map->read8(map,base + off);
+ Last[Count % 4] = map_read8(map,base + off);
if (Last[(Count - 1) % 4] != *buf)
{
jedec_flash_failed(Last[(Count - 3) % 4]);
return -EIO;
- }
+ }
}
}
*retlen = save_len;
@@ -844,24 +885,24 @@ static void jedec_flash_chip_scan(struct jedec_private *priv,unsigned long start
// Zero the records
for (I = 0; priv->chips[I].jedec != 0 && I < MAX_JEDEC_CHIPS; I++)
priv->chips[I].start = priv->chips[I].length = 0;
-
+
// Intersect the region with each chip
for (I = 0; priv->chips[I].jedec != 0 && I < MAX_JEDEC_CHIPS; I++)
{
struct jedec_flash_chip *chip = priv->chips + I;
unsigned long ByteStart;
unsigned long ChipEndByte = chip->offset + (chip->size << chip->addrshift);
-
+
// End is before this chip or the start is after it
if (start+len < chip->offset ||
ChipEndByte - (1 << chip->addrshift) < start)
continue;
-
+
if (start < chip->offset)
{
ByteStart = chip->offset;
chip->start = 0;
- }
+ }
else
{
chip->start = (start - chip->offset + (1 << chip->addrshift)-1) >> chip->addrshift;
diff --git a/linux-2.4.x/drivers/mtd/chips/jedec_probe.c b/linux-2.4.x/drivers/mtd/chips/jedec_probe.c
index 5058acf..ecc3c83 100644
--- a/linux-2.4.x/drivers/mtd/chips/jedec_probe.c
+++ b/linux-2.4.x/drivers/mtd/chips/jedec_probe.c
@@ -1,11 +1,16 @@
-/*
+/*
Common Flash Interface probe code.
(C) 2000 Red Hat. GPL'd.
- $Id: jedec_probe.c,v 1.3 2001/10/02 15:05:12 dwmw2 Exp $
+ $Id: jedec_probe.c,v 1.68 2006/03/29 08:42:49 dwmw2 Exp $
+ See JEDEC (http://www.jedec.org/) standard JESD21C (section 3.5)
+ for the standard this probe goes back to.
+
+ Occasionally maintained by Thayne Harbaugh tharbaugh at lnxi dot com
*/
#include <linux/config.h>
#include <linux/module.h>
+#include <linux/init.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <asm/io.h>
@@ -13,49 +18,231 @@
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/mtd/mtd.h>
#include <linux/mtd/map.h>
#include <linux/mtd/cfi.h>
#include <linux/mtd/gen_probe.h>
-
/* Manufacturers */
#define MANUFACTURER_AMD 0x0001
-#define MANUFACTURER_FUJITSU 0x0004
#define MANUFACTURER_ATMEL 0x001f
-#define MANUFACTURER_ST 0x0020
+#define MANUFACTURER_FUJITSU 0x0004
+#define MANUFACTURER_HYUNDAI 0x00AD
+#define MANUFACTURER_INTEL 0x0089
+#define MANUFACTURER_MACRONIX 0x00C2
+#define MANUFACTURER_NEC 0x0010
+#define MANUFACTURER_PMC 0x009D
+#define MANUFACTURER_SHARP 0x00b0
#define MANUFACTURER_SST 0x00BF
+#define MANUFACTURER_ST 0x0020
#define MANUFACTURER_TOSHIBA 0x0098
+#define MANUFACTURER_WINBOND 0x00da
+
/* AMD */
+#define AM29DL800BB 0x22C8
+#define AM29DL800BT 0x224A
+
#define AM29F800BB 0x2258
#define AM29F800BT 0x22D6
+#define AM29LV400BB 0x22BA
+#define AM29LV400BT 0x22B9
#define AM29LV800BB 0x225B
#define AM29LV800BT 0x22DA
#define AM29LV160DT 0x22C4
#define AM29LV160DB 0x2249
+#define AM29F017D 0x003D
+#define AM29F016D 0x00AD
+#define AM29F080 0x00D5
+#define AM29F040 0x00A4
+#define AM29LV040B 0x004F
+#define AM29F032B 0x0041
+#define AM29F002T 0x00B0
/* Atmel */
-#define AT49BV16X4 0x00c0
-#define AT49BV16X4T 0x00c2
+#define AT49BV512 0x0003
+#define AT29LV512 0x003d
+#define AT49BV16X 0x00C0
+#define AT49BV16XT 0x00C2
+#define AT49BV32X 0x00C8
+#define AT49BV32XT 0x00C9
/* Fujitsu */
+#define MBM29F040C 0x00A4
+#define MBM29LV650UE 0x22D7
+#define MBM29LV320TE 0x22F6
+#define MBM29LV320BE 0x22F9
#define MBM29LV160TE 0x22C4
#define MBM29LV160BE 0x2249
#define MBM29LV800BA 0x225B
+#define MBM29LV800TA 0x22DA
+#define MBM29LV400TC 0x22B9
+#define MBM29LV400BC 0x22BA
+
+/* Hyundai */
+#define HY29F002T 0x00B0
+
+/* Intel */
+#define I28F004B3T 0x00d4
+#define I28F004B3B 0x00d5
+#define I28F400B3T 0x8894
+#define I28F400B3B 0x8895
+#define I28F008S5 0x00a6
+#define I28F016S5 0x00a0
+#define I28F008SA 0x00a2
+#define I28F008B3T 0x00d2
+#define I28F008B3B 0x00d3
+#define I28F800B3T 0x8892
+#define I28F800B3B 0x8893
+#define I28F016S3 0x00aa
+#define I28F016B3T 0x00d0
+#define I28F016B3B 0x00d1
+#define I28F160B3T 0x8890
+#define I28F160B3B 0x8891
+#define I28F320B3T 0x8896
+#define I28F320B3B 0x8897
+#define I28F640B3T 0x8898
+#define I28F640B3B 0x8899
+#define I82802AB 0x00ad
+#define I82802AC 0x00ac
+
+/* Macronix */
+#define MX29LV040C 0x004F
+#define MX29LV160T 0x22C4
+#define MX29LV160B 0x2249
+#define MX29F016 0x00AD
+#define MX29F002T 0x00B0
+#define MX29F004T 0x0045
+#define MX29F004B 0x0046
+
+/* NEC */
+#define UPD29F064115 0x221C
+
+/* PMC */
+#define PM49FL002 0x006D
+#define PM49FL004 0x006E
+#define PM49FL008 0x006A
+
+/* Sharp */
+#define LH28F640BF 0x00b0
/* ST - www.st.com */
-#define M29W800T 0x00D7
+#define M29W800DT 0x00D7
+#define M29W800DB 0x005B
#define M29W160DT 0x22C4
#define M29W160DB 0x2249
+#define M29W040B 0x00E3
+#define M50FW040 0x002C
+#define M50FW080 0x002D
+#define M50FW016 0x002E
+#define M50LPW080 0x002F
/* SST */
+#define SST29EE020 0x0010
+#define SST29LE020 0x0012
+#define SST29EE512 0x005d
+#define SST29LE512 0x003d
#define SST39LF800 0x2781
#define SST39LF160 0x2782
+#define SST39VF1601 0x234b
+#define SST39LF512 0x00D4
+#define SST39LF010 0x00D5
+#define SST39LF020 0x00D6
+#define SST39LF040 0x00D7
+#define SST39SF010A 0x00B5
+#define SST39SF020A 0x00B6
+#define SST49LF004B 0x0060
+#define SST49LF008A 0x005a
+#define SST49LF030A 0x001C
+#define SST49LF040A 0x0051
+#define SST49LF080A 0x005B
/* Toshiba */
#define TC58FVT160 0x00C2
#define TC58FVB160 0x0043
+#define TC58FVT321 0x009A
+#define TC58FVB321 0x009C
+#define TC58FVT641 0x0093
+#define TC58FVB641 0x0095
+
+/* Winbond */
+#define W49V002A 0x00b0
+
+
+/*
+ * Unlock address sets for AMD command sets.
+ * Intel command sets use the MTD_UADDR_UNNECESSARY.
+ * Each identifier, except MTD_UADDR_UNNECESSARY, and
+ * MTD_UADDR_NO_SUPPORT must be defined below in unlock_addrs[].
+ * MTD_UADDR_NOT_SUPPORTED must be 0 so that structure
+ * initialization need not require initializing all of the
+ * unlock addresses for all bit widths.
+ */
+enum uaddr {
+ MTD_UADDR_NOT_SUPPORTED = 0, /* data width not supported */
+ MTD_UADDR_0x0555_0x02AA,
+ MTD_UADDR_0x0555_0x0AAA,
+ MTD_UADDR_0x5555_0x2AAA,
+ MTD_UADDR_0x0AAA_0x0555,
+ MTD_UADDR_DONT_CARE, /* Requires an arbitrary address */
+ MTD_UADDR_UNNECESSARY, /* Does not require any address */
+};
+
+
+struct unlock_addr {
+ u32 addr1;
+ u32 addr2;
+};
+
+
+/*
+ * I don't like the fact that the first entry in unlock_addrs[]
+ * exists, but is for MTD_UADDR_NOT_SUPPORTED - and, therefore,
+ * should not be used. The problem is that structures with
+ * initializers have extra fields initialized to 0. It is _very_
+ * desireable to have the unlock address entries for unsupported
+ * data widths automatically initialized - that means that
+ * MTD_UADDR_NOT_SUPPORTED must be 0 and the first entry here
+ * must go unused.
+ */
+static const struct unlock_addr unlock_addrs[] = {
+ [MTD_UADDR_NOT_SUPPORTED] = {
+ .addr1 = 0xffff,
+ .addr2 = 0xffff
+ },
+
+ [MTD_UADDR_0x0555_0x02AA] = {
+ .addr1 = 0x0555,
+ .addr2 = 0x02aa
+ },
+
+ [MTD_UADDR_0x0555_0x0AAA] = {
+ .addr1 = 0x0555,
+ .addr2 = 0x0aaa
+ },
+
+ [MTD_UADDR_0x5555_0x2AAA] = {
+ .addr1 = 0x5555,
+ .addr2 = 0x2aaa
+ },
+
+ [MTD_UADDR_0x0AAA_0x0555] = {
+ .addr1 = 0x0AAA,
+ .addr2 = 0x0555
+ },
+
+ [MTD_UADDR_DONT_CARE] = {
+ .addr1 = 0x0000, /* Doesn't matter which address */
+ .addr2 = 0x0000 /* is used - must be last entry */
+ },
+
+ [MTD_UADDR_UNNECESSARY] = {
+ .addr1 = 0x0000,
+ .addr2 = 0x0000
+ }
+};
struct amd_flash_info {
@@ -63,371 +250,1895 @@ struct amd_flash_info {
const __u16 dev_id;
const char *name;
const int DevSize;
- const int InterfaceDesc;
const int NumEraseRegions;
- const ulong regions[4];
+ const int CmdSet;
+ const __u8 uaddr[4]; /* unlock addrs for 8, 16, 32, 64 */
+ const ulong regions[6];
};
#define ERASEINFO(size,blocks) (size<<8)|(blocks-1)
-#define SIZE_1MiB 20
-#define SIZE_2MiB 21
-#define SIZE_4MiB 22
+#define SIZE_64KiB 16
+#define SIZE_128KiB 17
+#define SIZE_256KiB 18
+#define SIZE_512KiB 19
+#define SIZE_1MiB 20
+#define SIZE_2MiB 21
+#define SIZE_4MiB 22
+#define SIZE_8MiB 23
+
+/*
+ * Please keep this list ordered by manufacturer!
+ * Fortunately, the list isn't searched often and so a
+ * slow, linear search isn't so bad.
+ */
static const struct amd_flash_info jedec_table[] = {
{
- mfr_id: MANUFACTURER_AMD,
- dev_id: AM29LV160DT,
- name: "AMD AM29LV160DT",
- DevSize: SIZE_2MiB,
- NumEraseRegions: 4,
- regions: {ERASEINFO(0x10000,31),
- ERASEINFO(0x08000,1),
- ERASEINFO(0x02000,2),
- ERASEINFO(0x04000,1)
- }
- }, {
- mfr_id: MANUFACTURER_AMD,
- dev_id: AM29LV160DB,
- name: "AMD AM29LV160DB",
- DevSize: SIZE_2MiB,
- NumEraseRegions: 4,
- regions: {ERASEINFO(0x04000,1),
- ERASEINFO(0x02000,2),
- ERASEINFO(0x08000,1),
- ERASEINFO(0x10000,31)
- }
- }, {
- mfr_id: MANUFACTURER_TOSHIBA,
- dev_id: TC58FVT160,
- name: "Toshiba TC58FVT160",
- DevSize: SIZE_2MiB,
- NumEraseRegions: 4,
- regions: {ERASEINFO(0x10000,31),
- ERASEINFO(0x08000,1),
- ERASEINFO(0x02000,2),
- ERASEINFO(0x04000,1)
- }
- }, {
- mfr_id: MANUFACTURER_FUJITSU,
- dev_id: MBM29LV160TE,
- name: "Fujitsu MBM29LV160TE",
- DevSize: SIZE_2MiB,
- NumEraseRegions: 4,
- regions: {ERASEINFO(0x10000,31),
- ERASEINFO(0x08000,1),
- ERASEINFO(0x02000,2),
- ERASEINFO(0x04000,1)
- }
- }, {
- mfr_id: MANUFACTURER_TOSHIBA,
- dev_id: TC58FVB160,
- name: "Toshiba TC58FVB160",
- DevSize: SIZE_2MiB,
- NumEraseRegions: 4,
- regions: {ERASEINFO(0x04000,1),
- ERASEINFO(0x02000,2),
- ERASEINFO(0x08000,1),
- ERASEINFO(0x10000,31)
- }
- }, {
- mfr_id: MANUFACTURER_FUJITSU,
- dev_id: MBM29LV160BE,
- name: "Fujitsu MBM29LV160BE",
- DevSize: SIZE_2MiB,
- NumEraseRegions: 4,
- regions: {ERASEINFO(0x04000,1),
- ERASEINFO(0x02000,2),
- ERASEINFO(0x08000,1),
- ERASEINFO(0x10000,31)
- }
- }, {
- mfr_id: MANUFACTURER_AMD,
- dev_id: AM29LV800BB,
- name: "AMD AM29LV800BB",
- DevSize: SIZE_1MiB,
- NumEraseRegions: 4,
- regions: {ERASEINFO(0x04000,1),
- ERASEINFO(0x02000,2),
- ERASEINFO(0x08000,1),
- ERASEINFO(0x10000,15),
- }
- }, {
- mfr_id: MANUFACTURER_AMD,
- dev_id: AM29F800BB,
- name: "AMD AM29F800BB",
- DevSize: SIZE_1MiB,
- NumEraseRegions: 4,
- regions: {ERASEINFO(0x04000,1),
- ERASEINFO(0x02000,2),
- ERASEINFO(0x08000,1),
- ERASEINFO(0x10000,15),
- }
- }, {
- mfr_id: MANUFACTURER_AMD,
- dev_id: AM29LV800BT,
- name: "AMD AM29LV800BT",
- DevSize: SIZE_1MiB,
- NumEraseRegions: 4,
- regions: {ERASEINFO(0x10000,15),
- ERASEINFO(0x08000,1),
- ERASEINFO(0x02000,2),
- ERASEINFO(0x04000,1)
- }
- }, {
- mfr_id: MANUFACTURER_AMD,
- dev_id: AM29F800BT,
- name: "AMD AM29F800BT",
- DevSize: SIZE_1MiB,
- NumEraseRegions: 4,
- regions: {ERASEINFO(0x10000,15),
- ERASEINFO(0x08000,1),
- ERASEINFO(0x02000,2),
- ERASEINFO(0x04000,1)
- }
- }, {
- mfr_id: MANUFACTURER_AMD,
- dev_id: AM29LV800BB,
- name: "AMD AM29LV800BB",
- DevSize: SIZE_1MiB,
- NumEraseRegions: 4,
- regions: {ERASEINFO(0x10000,15),
- ERASEINFO(0x08000,1),
- ERASEINFO(0x02000,2),
- ERASEINFO(0x04000,1)
- }
- }, {
- mfr_id: MANUFACTURER_ST,
- dev_id: M29W800T,
- name: "ST M29W800T",
- DevSize: SIZE_1MiB,
- NumEraseRegions: 4,
- regions: {ERASEINFO(0x10000,15),
- ERASEINFO(0x08000,1),
- ERASEINFO(0x02000,2),
- ERASEINFO(0x04000,1)
- }
- }, {
- mfr_id: MANUFACTURER_ST,
- dev_id: M29W160DT,
- name: "ST M29W160DT",
- DevSize: SIZE_2MiB,
- NumEraseRegions: 4,
- regions: {ERASEINFO(0x10000,31),
- ERASEINFO(0x08000,1),
- ERASEINFO(0x02000,2),
- ERASEINFO(0x04000,1)
- }
- }, {
- mfr_id: MANUFACTURER_ST,
- dev_id: M29W160DB,
- name: "ST M29W160DB",
- DevSize: SIZE_2MiB,
- NumEraseRegions: 4,
- regions: {ERASEINFO(0x04000,1),
- ERASEINFO(0x02000,2),
- ERASEINFO(0x08000,1),
- ERASEINFO(0x10000,31)
- }
- }, {
- mfr_id: MANUFACTURER_ATMEL,
- dev_id: AT49BV16X4,
- name: "Atmel AT49BV16X4",
- DevSize: SIZE_2MiB,
- NumEraseRegions: 3,
- regions: {ERASEINFO(0x02000,8),
- ERASEINFO(0x08000,2),
- ERASEINFO(0x10000,30)
- }
- }, {
- mfr_id: MANUFACTURER_ATMEL,
- dev_id: AT49BV16X4T,
- name: "Atmel AT49BV16X4T",
- DevSize: SIZE_2MiB,
- NumEraseRegions: 3,
- regions: {ERASEINFO(0x10000,30),
- ERASEINFO(0x08000,2),
- ERASEINFO(0x02000,8)
- }
- }, {
- mfr_id: MANUFACTURER_FUJITSU,
- dev_id: MBM29LV800BA,
- name: "Fujitsu MBM29LV800BA",
- DevSize: SIZE_1MiB,
- NumEraseRegions: 4,
- regions: {ERASEINFO(0x04000,1),
- ERASEINFO(0x02000,2),
- ERASEINFO(0x08000,1),
- ERASEINFO(0x10000,15),
- }
- }
+ .mfr_id = MANUFACTURER_AMD,
+ .dev_id = AM29F032B,
+ .name = "AMD AM29F032B",
+ .uaddr = {
+ [0] = MTD_UADDR_0x0555_0x02AA /* x8 */
+ },
+ .DevSize = SIZE_4MiB,
+ .CmdSet = P_ID_AMD_STD,
+ .NumEraseRegions= 1,
+ .regions = {
+ ERASEINFO(0x10000,64)
+ }
+ }, {
+ .mfr_id = MANUFACTURER_AMD,
+ .dev_id = AM29LV160DT,
+ .name = "AMD AM29LV160DT",
+ .uaddr = {
+ [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */
+ [1] = MTD_UADDR_0x0555_0x02AA /* x16 */
+ },
+ .DevSize = SIZE_2MiB,
+ .CmdSet = P_ID_AMD_STD,
+ .NumEraseRegions= 4,
+ .regions = {
+ ERASEINFO(0x10000,31),
+ ERASEINFO(0x08000,1),
+ ERASEINFO(0x02000,2),
+ ERASEINFO(0x04000,1)
+ }
+ }, {
+ .mfr_id = MANUFACTURER_AMD,
+ .dev_id = AM29LV160DB,
+ .name = "AMD AM29LV160DB",
+ .uaddr = {
+ [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */
+ [1] = MTD_UADDR_0x0555_0x02AA /* x16 */
+ },
+ .DevSize = SIZE_2MiB,
+ .CmdSet = P_ID_AMD_STD,
+ .NumEraseRegions= 4,
+ .regions = {
+ ERASEINFO(0x04000,1),
+ ERASEINFO(0x02000,2),
+ ERASEINFO(0x08000,1),
+ ERASEINFO(0x10000,31)
+ }
+ }, {
+ .mfr_id = MANUFACTURER_AMD,
+ .dev_id = AM29LV400BB,
+ .name = "AMD AM29LV400BB",
+ .uaddr = {
+ [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */
+ [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */
+ },
+ .DevSize = SIZE_512KiB,
+ .CmdSet = P_ID_AMD_STD,
+ .NumEraseRegions= 4,
+ .regions = {
+ ERASEINFO(0x04000,1),
+ ERASEINFO(0x02000,2),
+ ERASEINFO(0x08000,1),
+ ERASEINFO(0x10000,7)
+ }
+ }, {
+ .mfr_id = MANUFACTURER_AMD,
+ .dev_id = AM29LV400BT,
+ .name = "AMD AM29LV400BT",
+ .uaddr = {
+ [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */
+ [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */
+ },
+ .DevSize = SIZE_512KiB,
+ .CmdSet = P_ID_AMD_STD,
+ .NumEraseRegions= 4,
+ .regions = {
+ ERASEINFO(0x10000,7),
+ ERASEINFO(0x08000,1),
+ ERASEINFO(0x02000,2),
+ ERASEINFO(0x04000,1)
+ }
+ }, {
+ .mfr_id = MANUFACTURER_AMD,
+ .dev_id = AM29LV800BB,
+ .name = "AMD AM29LV800BB",
+ .uaddr = {
+ [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */
+ [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */
+ },
+ .DevSize = SIZE_1MiB,
+ .CmdSet = P_ID_AMD_STD,
+ .NumEraseRegions= 4,
+ .regions = {
+ ERASEINFO(0x04000,1),
+ ERASEINFO(0x02000,2),
+ ERASEINFO(0x08000,1),
+ ERASEINFO(0x10000,15),
+ }
+ }, {
+/* add DL */
+ .mfr_id = MANUFACTURER_AMD,
+ .dev_id = AM29DL800BB,
+ .name = "AMD AM29DL800BB",
+ .uaddr = {
+ [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */
+ [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */
+ },
+ .DevSize = SIZE_1MiB,
+ .CmdSet = P_ID_AMD_STD,
+ .NumEraseRegions= 6,
+ .regions = {
+ ERASEINFO(0x04000,1),
+ ERASEINFO(0x08000,1),
+ ERASEINFO(0x02000,4),
+ ERASEINFO(0x08000,1),
+ ERASEINFO(0x04000,1),
+ ERASEINFO(0x10000,14)
+ }
+ }, {
+ .mfr_id = MANUFACTURER_AMD,
+ .dev_id = AM29DL800BT,
+ .name = "AMD AM29DL800BT",
+ .uaddr = {
+ [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */
+ [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */
+ },
+ .DevSize = SIZE_1MiB,
+ .CmdSet = P_ID_AMD_STD,
+ .NumEraseRegions= 6,
+ .regions = {
+ ERASEINFO(0x10000,14),
+ ERASEINFO(0x04000,1),
+ ERASEINFO(0x08000,1),
+ ERASEINFO(0x02000,4),
+ ERASEINFO(0x08000,1),
+ ERASEINFO(0x04000,1)
+ }
+ }, {
+ .mfr_id = MANUFACTURER_AMD,
+ .dev_id = AM29F800BB,
+ .name = "AMD AM29F800BB",
+ .uaddr = {
+ [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */
+ [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */
+ },
+ .DevSize = SIZE_1MiB,
+ .CmdSet = P_ID_AMD_STD,
+ .NumEraseRegions= 4,
+ .regions = {
+ ERASEINFO(0x04000,1),
+ ERASEINFO(0x02000,2),
+ ERASEINFO(0x08000,1),
+ ERASEINFO(0x10000,15),
+ }
+ }, {
+ .mfr_id = MANUFACTURER_AMD,
+ .dev_id = AM29LV800BT,
+ .name = "AMD AM29LV800BT",
+ .uaddr = {
+ [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */
+ [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */
+ },
+ .DevSize = SIZE_1MiB,
+ .CmdSet = P_ID_AMD_STD,
+ .NumEraseRegions= 4,
+ .regions = {
+ ERASEINFO(0x10000,15),
+ ERASEINFO(0x08000,1),
+ ERASEINFO(0x02000,2),
+ ERASEINFO(0x04000,1)
+ }
+ }, {
+ .mfr_id = MANUFACTURER_AMD,
+ .dev_id = AM29F800BT,
+ .name = "AMD AM29F800BT",
+ .uaddr = {
+ [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */
+ [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */
+ },
+ .DevSize = SIZE_1MiB,
+ .CmdSet = P_ID_AMD_STD,
+ .NumEraseRegions= 4,
+ .regions = {
+ ERASEINFO(0x10000,15),
+ ERASEINFO(0x08000,1),
+ ERASEINFO(0x02000,2),
+ ERASEINFO(0x04000,1)
+ }
+ }, {
+ .mfr_id = MANUFACTURER_AMD,
+ .dev_id = AM29F017D,
+ .name = "AMD AM29F017D",
+ .uaddr = {
+ [0] = MTD_UADDR_DONT_CARE /* x8 */
+ },
+ .DevSize = SIZE_2MiB,
+ .CmdSet = P_ID_AMD_STD,
+ .NumEraseRegions= 1,
+ .regions = {
+ ERASEINFO(0x10000,32),
+ }
+ }, {
+ .mfr_id = MANUFACTURER_AMD,
+ .dev_id = AM29F016D,
+ .name = "AMD AM29F016D",
+ .uaddr = {
+ [0] = MTD_UADDR_0x0555_0x02AA /* x8 */
+ },
+ .DevSize = SIZE_2MiB,
+ .CmdSet = P_ID_AMD_STD,
+ .NumEraseRegions= 1,
+ .regions = {
+ ERASEINFO(0x10000,32),
+ }
+ }, {
+ .mfr_id = MANUFACTURER_AMD,
+ .dev_id = AM29F080,
+ .name = "AMD AM29F080",
+ .uaddr = {
+ [0] = MTD_UADDR_0x0555_0x02AA /* x8 */
+ },
+ .DevSize = SIZE_1MiB,
+ .CmdSet = P_ID_AMD_STD,
+ .NumEraseRegions= 1,
+ .regions = {
+ ERASEINFO(0x10000,16),
+ }
+ }, {
+ .mfr_id = MANUFACTURER_AMD,
+ .dev_id = AM29F040,
+ .name = "AMD AM29F040",
+ .uaddr = {
+ [0] = MTD_UADDR_0x0555_0x02AA /* x8 */
+ },
+ .DevSize = SIZE_512KiB,
+ .CmdSet = P_ID_AMD_STD,
+ .NumEraseRegions= 1,
+ .regions = {
+ ERASEINFO(0x10000,8),
+ }
+ }, {
+ .mfr_id = MANUFACTURER_AMD,
+ .dev_id = AM29LV040B,
+ .name = "AMD AM29LV040B",
+ .uaddr = {
+ [0] = MTD_UADDR_0x0555_0x02AA /* x8 */
+ },
+ .DevSize = SIZE_512KiB,
+ .CmdSet = P_ID_AMD_STD,
+ .NumEraseRegions= 1,
+ .regions = {
+ ERASEINFO(0x10000,8),
+ }
+ }, {
+ .mfr_id = MANUFACTURER_AMD,
+ .dev_id = AM29F002T,
+ .name = "AMD AM29F002T",
+ .uaddr = {
+ [0] = MTD_UADDR_0x0555_0x02AA /* x8 */
+ },
+ .DevSize = SIZE_256KiB,
+ .CmdSet = P_ID_AMD_STD,
+ .NumEraseRegions= 4,
+ .regions = {
+ ERASEINFO(0x10000,3),
+ ERASEINFO(0x08000,1),
+ ERASEINFO(0x02000,2),
+ ERASEINFO(0x04000,1),
+ }
+ }, {
+ .mfr_id = MANUFACTURER_ATMEL,
+ .dev_id = AT49BV512,
+ .name = "Atmel AT49BV512",
+ .uaddr = {
+ [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */
+ },
+ .DevSize = SIZE_64KiB,
+ .CmdSet = P_ID_AMD_STD,
+ .NumEraseRegions= 1,
+ .regions = {
+ ERASEINFO(0x10000,1)
+ }
+ }, {
+ .mfr_id = MANUFACTURER_ATMEL,
+ .dev_id = AT29LV512,
+ .name = "Atmel AT29LV512",
+ .uaddr = {
+ [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */
+ },
+ .DevSize = SIZE_64KiB,
+ .CmdSet = P_ID_AMD_STD,
+ .NumEraseRegions= 1,
+ .regions = {
+ ERASEINFO(0x80,256),
+ ERASEINFO(0x80,256)
+ }
+ }, {
+ .mfr_id = MANUFACTURER_ATMEL,
+ .dev_id = AT49BV16X,
+ .name = "Atmel AT49BV16X",
+ .uaddr = {
+ [0] = MTD_UADDR_0x0555_0x0AAA, /* x8 */
+ [1] = MTD_UADDR_0x0555_0x0AAA /* x16 */
+ },
+ .DevSize = SIZE_2MiB,
+ .CmdSet = P_ID_AMD_STD,
+ .NumEraseRegions= 2,
+ .regions = {
+ ERASEINFO(0x02000,8),
+ ERASEINFO(0x10000,31)
+ }
+ }, {
+ .mfr_id = MANUFACTURER_ATMEL,
+ .dev_id = AT49BV16XT,
+ .name = "Atmel AT49BV16XT",
+ .uaddr = {
+ [0] = MTD_UADDR_0x0555_0x0AAA, /* x8 */
+ [1] = MTD_UADDR_0x0555_0x0AAA /* x16 */
+ },
+ .DevSize = SIZE_2MiB,
+ .CmdSet = P_ID_AMD_STD,
+ .NumEraseRegions= 2,
+ .regions = {
+ ERASEINFO(0x10000,31),
+ ERASEINFO(0x02000,8)
+ }
+ }, {
+ .mfr_id = MANUFACTURER_ATMEL,
+ .dev_id = AT49BV32X,
+ .name = "Atmel AT49BV32X",
+ .uaddr = {
+ [0] = MTD_UADDR_0x0555_0x0AAA, /* x8 */
+ [1] = MTD_UADDR_0x0555_0x0AAA /* x16 */
+ },
+ .DevSize = SIZE_4MiB,
+ .CmdSet = P_ID_AMD_STD,
+ .NumEraseRegions= 2,
+ .regions = {
+ ERASEINFO(0x02000,8),
+ ERASEINFO(0x10000,63)
+ }
+ }, {
+ .mfr_id = MANUFACTURER_ATMEL,
+ .dev_id = AT49BV32XT,
+ .name = "Atmel AT49BV32XT",
+ .uaddr = {
+ [0] = MTD_UADDR_0x0555_0x0AAA, /* x8 */
+ [1] = MTD_UADDR_0x0555_0x0AAA /* x16 */
+ },
+ .DevSize = SIZE_4MiB,
+ .CmdSet = P_ID_AMD_STD,
+ .NumEraseRegions= 2,
+ .regions = {
+ ERASEINFO(0x10000,63),
+ ERASEINFO(0x02000,8)
+ }
+ }, {
+ .mfr_id = MANUFACTURER_FUJITSU,
+ .dev_id = MBM29F040C,
+ .name = "Fujitsu MBM29F040C",
+ .uaddr = {
+ [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */
+ },
+ .DevSize = SIZE_512KiB,
+ .CmdSet = P_ID_AMD_STD,
+ .NumEraseRegions= 1,
+ .regions = {
+ ERASEINFO(0x10000,8)
+ }
+ }, {
+ .mfr_id = MANUFACTURER_FUJITSU,
+ .dev_id = MBM29LV650UE,
+ .name = "Fujitsu MBM29LV650UE",
+ .uaddr = {
+ [0] = MTD_UADDR_DONT_CARE /* x16 */
+ },
+ .DevSize = SIZE_8MiB,
+ .CmdSet = P_ID_AMD_STD,
+ .NumEraseRegions= 1,
+ .regions = {
+ ERASEINFO(0x10000,128)
+ }
+ }, {
+ .mfr_id = MANUFACTURER_FUJITSU,
+ .dev_id = MBM29LV320TE,
+ .name = "Fujitsu MBM29LV320TE",
+ .uaddr = {
+ [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */
+ [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */
+ },
+ .DevSize = SIZE_4MiB,
+ .CmdSet = P_ID_AMD_STD,
+ .NumEraseRegions= 2,
+ .regions = {
+ ERASEINFO(0x10000,63),
+ ERASEINFO(0x02000,8)
+ }
+ }, {
+ .mfr_id = MANUFACTURER_FUJITSU,
+ .dev_id = MBM29LV320BE,
+ .name = "Fujitsu MBM29LV320BE",
+ .uaddr = {
+ [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */
+ [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */
+ },
+ .DevSize = SIZE_4MiB,
+ .CmdSet = P_ID_AMD_STD,
+ .NumEraseRegions= 2,
+ .regions = {
+ ERASEINFO(0x02000,8),
+ ERASEINFO(0x10000,63)
+ }
+ }, {
+ .mfr_id = MANUFACTURER_FUJITSU,
+ .dev_id = MBM29LV160TE,
+ .name = "Fujitsu MBM29LV160TE",
+ .uaddr = {
+ [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */
+ [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */
+ },
+ .DevSize = SIZE_2MiB,
+ .CmdSet = P_ID_AMD_STD,
+ .NumEraseRegions= 4,
+ .regions = {
+ ERASEINFO(0x10000,31),
+ ERASEINFO(0x08000,1),
+ ERASEINFO(0x02000,2),
+ ERASEINFO(0x04000,1)
+ }
+ }, {
+ .mfr_id = MANUFACTURER_FUJITSU,
+ .dev_id = MBM29LV160BE,
+ .name = "Fujitsu MBM29LV160BE",
+ .uaddr = {
+ [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */
+ [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */
+ },
+ .DevSize = SIZE_2MiB,
+ .CmdSet = P_ID_AMD_STD,
+ .NumEraseRegions= 4,
+ .regions = {
+ ERASEINFO(0x04000,1),
+ ERASEINFO(0x02000,2),
+ ERASEINFO(0x08000,1),
+ ERASEINFO(0x10000,31)
+ }
+ }, {
+ .mfr_id = MANUFACTURER_FUJITSU,
+ .dev_id = MBM29LV800BA,
+ .name = "Fujitsu MBM29LV800BA",
+ .uaddr = {
+ [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */
+ [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */
+ },
+ .DevSize = SIZE_1MiB,
+ .CmdSet = P_ID_AMD_STD,
+ .NumEraseRegions= 4,
+ .regions = {
+ ERASEINFO(0x04000,1),
+ ERASEINFO(0x02000,2),
+ ERASEINFO(0x08000,1),
+ ERASEINFO(0x10000,15)
+ }
+ }, {
+ .mfr_id = MANUFACTURER_FUJITSU,
+ .dev_id = MBM29LV800TA,
+ .name = "Fujitsu MBM29LV800TA",
+ .uaddr = {
+ [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */
+ [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */
+ },
+ .DevSize = SIZE_1MiB,
+ .CmdSet = P_ID_AMD_STD,
+ .NumEraseRegions= 4,
+ .regions = {
+ ERASEINFO(0x10000,15),
+ ERASEINFO(0x08000,1),
+ ERASEINFO(0x02000,2),
+ ERASEINFO(0x04000,1)
+ }
+ }, {
+ .mfr_id = MANUFACTURER_FUJITSU,
+ .dev_id = MBM29LV400BC,
+ .name = "Fujitsu MBM29LV400BC",
+ .uaddr = {
+ [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */
+ [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */
+ },
+ .DevSize = SIZE_512KiB,
+ .CmdSet = P_ID_AMD_STD,
+ .NumEraseRegions= 4,
+ .regions = {
+ ERASEINFO(0x04000,1),
+ ERASEINFO(0x02000,2),
+ ERASEINFO(0x08000,1),
+ ERASEINFO(0x10000,7)
+ }
+ }, {
+ .mfr_id = MANUFACTURER_FUJITSU,
+ .dev_id = MBM29LV400TC,
+ .name = "Fujitsu MBM29LV400TC",
+ .uaddr = {
+ [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */
+ [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */
+ },
+ .DevSize = SIZE_512KiB,
+ .CmdSet = P_ID_AMD_STD,
+ .NumEraseRegions= 4,
+ .regions = {
+ ERASEINFO(0x10000,7),
+ ERASEINFO(0x08000,1),
+ ERASEINFO(0x02000,2),
+ ERASEINFO(0x04000,1)
+ }
+ }, {
+ .mfr_id = MANUFACTURER_HYUNDAI,
+ .dev_id = HY29F002T,
+ .name = "Hyundai HY29F002T",
+ .uaddr = {
+ [0] = MTD_UADDR_0x0555_0x02AA /* x8 */
+ },
+ .DevSize = SIZE_256KiB,
+ .CmdSet = P_ID_AMD_STD,
+ .NumEraseRegions= 4,
+ .regions = {
+ ERASEINFO(0x10000,3),
+ ERASEINFO(0x08000,1),
+ ERASEINFO(0x02000,2),
+ ERASEINFO(0x04000,1),
+ }
+ }, {
+ .mfr_id = MANUFACTURER_INTEL,
+ .dev_id = I28F004B3B,
+ .name = "Intel 28F004B3B",
+ .uaddr = {
+ [0] = MTD_UADDR_UNNECESSARY, /* x8 */
+ },
+ .DevSize = SIZE_512KiB,
+ .CmdSet = P_ID_INTEL_STD,
+ .NumEraseRegions= 2,
+ .regions = {
+ ERASEINFO(0x02000, 8),
+ ERASEINFO(0x10000, 7),
+ }
+ }, {
+ .mfr_id = MANUFACTURER_INTEL,
+ .dev_id = I28F004B3T,
+ .name = "Intel 28F004B3T",
+ .uaddr = {
+ [0] = MTD_UADDR_UNNECESSARY, /* x8 */
+ },
+ .DevSize = SIZE_512KiB,
+ .CmdSet = P_ID_INTEL_STD,
+ .NumEraseRegions= 2,
+ .regions = {
+ ERASEINFO(0x10000, 7),
+ ERASEINFO(0x02000, 8),
+ }
+ }, {
+ .mfr_id = MANUFACTURER_INTEL,
+ .dev_id = I28F400B3B,
+ .name = "Intel 28F400B3B",
+ .uaddr = {
+ [0] = MTD_UADDR_UNNECESSARY, /* x8 */
+ [1] = MTD_UADDR_UNNECESSARY, /* x16 */
+ },
+ .DevSize = SIZE_512KiB,
+ .CmdSet = P_ID_INTEL_STD,
+ .NumEraseRegions= 2,
+ .regions = {
+ ERASEINFO(0x02000, 8),
+ ERASEINFO(0x10000, 7),
+ }
+ }, {
+ .mfr_id = MANUFACTURER_INTEL,
+ .dev_id = I28F400B3T,
+ .name = "Intel 28F400B3T",
+ .uaddr = {
+ [0] = MTD_UADDR_UNNECESSARY, /* x8 */
+ [1] = MTD_UADDR_UNNECESSARY, /* x16 */
+ },
+ .DevSize = SIZE_512KiB,
+ .CmdSet = P_ID_INTEL_STD,
+ .NumEraseRegions= 2,
+ .regions = {
+ ERASEINFO(0x10000, 7),
+ ERASEINFO(0x02000, 8),
+ }
+ }, {
+ .mfr_id = MANUFACTURER_INTEL,
+ .dev_id = I28F008B3B,
+ .name = "Intel 28F008B3B",
+ .uaddr = {
+ [0] = MTD_UADDR_UNNECESSARY, /* x8 */
+ },
+ .DevSize = SIZE_1MiB,
+ .CmdSet = P_ID_INTEL_STD,
+ .NumEraseRegions= 2,
+ .regions = {
+ ERASEINFO(0x02000, 8),
+ ERASEINFO(0x10000, 15),
+ }
+ }, {
+ .mfr_id = MANUFACTURER_INTEL,
+ .dev_id = I28F008B3T,
+ .name = "Intel 28F008B3T",
+ .uaddr = {
+ [0] = MTD_UADDR_UNNECESSARY, /* x8 */
+ },
+ .DevSize = SIZE_1MiB,
+ .CmdSet = P_ID_INTEL_STD,
+ .NumEraseRegions= 2,
+ .regions = {
+ ERASEINFO(0x10000, 15),
+ ERASEINFO(0x02000, 8),
+ }
+ }, {
+ .mfr_id = MANUFACTURER_INTEL,
+ .dev_id = I28F008S5,
+ .name = "Intel 28F008S5",
+ .uaddr = {
+ [0] = MTD_UADDR_UNNECESSARY, /* x8 */
+ },
+ .DevSize = SIZE_1MiB,
+ .CmdSet = P_ID_INTEL_EXT,
+ .NumEraseRegions= 1,
+ .regions = {
+ ERASEINFO(0x10000,16),
+ }
+ }, {
+ .mfr_id = MANUFACTURER_INTEL,
+ .dev_id = I28F016S5,
+ .name = "Intel 28F016S5",
+ .uaddr = {
+ [0] = MTD_UADDR_UNNECESSARY, /* x8 */
+ },
+ .DevSize = SIZE_2MiB,
+ .CmdSet = P_ID_INTEL_EXT,
+ .NumEraseRegions= 1,
+ .regions = {
+ ERASEINFO(0x10000,32),
+ }
+ }, {
+ .mfr_id = MANUFACTURER_INTEL,
+ .dev_id = I28F008SA,
+ .name = "Intel 28F008SA",
+ .uaddr = {
+ [0] = MTD_UADDR_UNNECESSARY, /* x8 */
+ },
+ .DevSize = SIZE_1MiB,
+ .CmdSet = P_ID_INTEL_STD,
+ .NumEraseRegions= 1,
+ .regions = {
+ ERASEINFO(0x10000, 16),
+ }
+ }, {
+ .mfr_id = MANUFACTURER_INTEL,
+ .dev_id = I28F800B3B,
+ .name = "Intel 28F800B3B",
+ .uaddr = {
+ [1] = MTD_UADDR_UNNECESSARY, /* x16 */
+ },
+ .DevSize = SIZE_1MiB,
+ .CmdSet = P_ID_INTEL_STD,
+ .NumEraseRegions= 2,
+ .regions = {
+ ERASEINFO(0x02000, 8),
+ ERASEINFO(0x10000, 15),
+ }
+ }, {
+ .mfr_id = MANUFACTURER_INTEL,
+ .dev_id = I28F800B3T,
+ .name = "Intel 28F800B3T",
+ .uaddr = {
+ [1] = MTD_UADDR_UNNECESSARY, /* x16 */
+ },
+ .DevSize = SIZE_1MiB,
+ .CmdSet = P_ID_INTEL_STD,
+ .NumEraseRegions= 2,
+ .regions = {
+ ERASEINFO(0x10000, 15),
+ ERASEINFO(0x02000, 8),
+ }
+ }, {
+ .mfr_id = MANUFACTURER_INTEL,
+ .dev_id = I28F016B3B,
+ .name = "Intel 28F016B3B",
+ .uaddr = {
+ [0] = MTD_UADDR_UNNECESSARY, /* x8 */
+ },
+ .DevSize = SIZE_2MiB,
+ .CmdSet = P_ID_INTEL_STD,
+ .NumEraseRegions= 2,
+ .regions = {
+ ERASEINFO(0x02000, 8),
+ ERASEINFO(0x10000, 31),
+ }
+ }, {
+ .mfr_id = MANUFACTURER_INTEL,
+ .dev_id = I28F016S3,
+ .name = "Intel I28F016S3",
+ .uaddr = {
+ [0] = MTD_UADDR_UNNECESSARY, /* x8 */
+ },
+ .DevSize = SIZE_2MiB,
+ .CmdSet = P_ID_INTEL_STD,
+ .NumEraseRegions= 1,
+ .regions = {
+ ERASEINFO(0x10000, 32),
+ }
+ }, {
+ .mfr_id = MANUFACTURER_INTEL,
+ .dev_id = I28F016B3T,
+ .name = "Intel 28F016B3T",
+ .uaddr = {
+ [0] = MTD_UADDR_UNNECESSARY, /* x8 */
+ },
+ .DevSize = SIZE_2MiB,
+ .CmdSet = P_ID_INTEL_STD,
+ .NumEraseRegions= 2,
+ .regions = {
+ ERASEINFO(0x10000, 31),
+ ERASEINFO(0x02000, 8),
+ }
+ }, {
+ .mfr_id = MANUFACTURER_INTEL,
+ .dev_id = I28F160B3B,
+ .name = "Intel 28F160B3B",
+ .uaddr = {
+ [1] = MTD_UADDR_UNNECESSARY, /* x16 */
+ },
+ .DevSize = SIZE_2MiB,
+ .CmdSet = P_ID_INTEL_STD,
+ .NumEraseRegions= 2,
+ .regions = {
+ ERASEINFO(0x02000, 8),
+ ERASEINFO(0x10000, 31),
+ }
+ }, {
+ .mfr_id = MANUFACTURER_INTEL,
+ .dev_id = I28F160B3T,
+ .name = "Intel 28F160B3T",
+ .uaddr = {
+ [1] = MTD_UADDR_UNNECESSARY, /* x16 */
+ },
+ .DevSize = SIZE_2MiB,
+ .CmdSet = P_ID_INTEL_STD,
+ .NumEraseRegions= 2,
+ .regions = {
+ ERASEINFO(0x10000, 31),
+ ERASEINFO(0x02000, 8),
+ }
+ }, {
+ .mfr_id = MANUFACTURER_INTEL,
+ .dev_id = I28F320B3B,
+ .name = "Intel 28F320B3B",
+ .uaddr = {
+ [1] = MTD_UADDR_UNNECESSARY, /* x16 */
+ },
+ .DevSize = SIZE_4MiB,
+ .CmdSet = P_ID_INTEL_STD,
+ .NumEraseRegions= 2,
+ .regions = {
+ ERASEINFO(0x02000, 8),
+ ERASEINFO(0x10000, 63),
+ }
+ }, {
+ .mfr_id = MANUFACTURER_INTEL,
+ .dev_id = I28F320B3T,
+ .name = "Intel 28F320B3T",
+ .uaddr = {
+ [1] = MTD_UADDR_UNNECESSARY, /* x16 */
+ },
+ .DevSize = SIZE_4MiB,
+ .CmdSet = P_ID_INTEL_STD,
+ .NumEraseRegions= 2,
+ .regions = {
+ ERASEINFO(0x10000, 63),
+ ERASEINFO(0x02000, 8),
+ }
+ }, {
+ .mfr_id = MANUFACTURER_INTEL,
+ .dev_id = I28F640B3B,
+ .name = "Intel 28F640B3B",
+ .uaddr = {
+ [1] = MTD_UADDR_UNNECESSARY, /* x16 */
+ },
+ .DevSize = SIZE_8MiB,
+ .CmdSet = P_ID_INTEL_STD,
+ .NumEraseRegions= 2,
+ .regions = {
+ ERASEINFO(0x02000, 8),
+ ERASEINFO(0x10000, 127),
+ }
+ }, {
+ .mfr_id = MANUFACTURER_INTEL,
+ .dev_id = I28F640B3T,
+ .name = "Intel 28F640B3T",
+ .uaddr = {
+ [1] = MTD_UADDR_UNNECESSARY, /* x16 */
+ },
+ .DevSize = SIZE_8MiB,
+ .CmdSet = P_ID_INTEL_STD,
+ .NumEraseRegions= 2,
+ .regions = {
+ ERASEINFO(0x10000, 127),
+ ERASEINFO(0x02000, 8),
+ }
+ }, {
+ .mfr_id = MANUFACTURER_INTEL,
+ .dev_id = I82802AB,
+ .name = "Intel 82802AB",
+ .uaddr = {
+ [0] = MTD_UADDR_UNNECESSARY, /* x8 */
+ },
+ .DevSize = SIZE_512KiB,
+ .CmdSet = P_ID_INTEL_EXT,
+ .NumEraseRegions= 1,
+ .regions = {
+ ERASEINFO(0x10000,8),
+ }
+ }, {
+ .mfr_id = MANUFACTURER_INTEL,
+ .dev_id = I82802AC,
+ .name = "Intel 82802AC",
+ .uaddr = {
+ [0] = MTD_UADDR_UNNECESSARY, /* x8 */
+ },
+ .DevSize = SIZE_1MiB,
+ .CmdSet = P_ID_INTEL_EXT,
+ .NumEraseRegions= 1,
+ .regions = {
+ ERASEINFO(0x10000,16),
+ }
+ }, {
+ .mfr_id = MANUFACTURER_MACRONIX,
+ .dev_id = MX29LV040C,
+ .name = "Macronix MX29LV040C",
+ .uaddr = {
+ [0] = MTD_UADDR_0x0555_0x02AA, /* x8 */
+ },
+ .DevSize = SIZE_512KiB,
+ .CmdSet = P_ID_AMD_STD,
+ .NumEraseRegions= 1,
+ .regions = {
+ ERASEINFO(0x10000,8),
+ }
+ }, {
+ .mfr_id = MANUFACTURER_MACRONIX,
+ .dev_id = MX29LV160T,
+ .name = "MXIC MX29LV160T",
+ .uaddr = {
+ [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */
+ [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */
+ },
+ .DevSize = SIZE_2MiB,
+ .CmdSet = P_ID_AMD_STD,
+ .NumEraseRegions= 4,
+ .regions = {
+ ERASEINFO(0x10000,31),
+ ERASEINFO(0x08000,1),
+ ERASEINFO(0x02000,2),
+ ERASEINFO(0x04000,1)
+ }
+ }, {
+ .mfr_id = MANUFACTURER_NEC,
+ .dev_id = UPD29F064115,
+ .name = "NEC uPD29F064115",
+ .uaddr = {
+ [0] = MTD_UADDR_0x0555_0x02AA, /* x8 */
+ [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */
+ },
+ .DevSize = SIZE_8MiB,
+ .CmdSet = P_ID_AMD_STD,
+ .NumEraseRegions= 3,
+ .regions = {
+ ERASEINFO(0x2000,8),
+ ERASEINFO(0x10000,126),
+ ERASEINFO(0x2000,8),
+ }
+ }, {
+ .mfr_id = MANUFACTURER_MACRONIX,
+ .dev_id = MX29LV160B,
+ .name = "MXIC MX29LV160B",
+ .uaddr = {
+ [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */
+ [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */
+ },
+ .DevSize = SIZE_2MiB,
+ .CmdSet = P_ID_AMD_STD,
+ .NumEraseRegions= 4,
+ .regions = {
+ ERASEINFO(0x04000,1),
+ ERASEINFO(0x02000,2),
+ ERASEINFO(0x08000,1),
+ ERASEINFO(0x10000,31)
+ }
+ }, {
+ .mfr_id = MANUFACTURER_MACRONIX,
+ .dev_id = MX29F016,
+ .name = "Macronix MX29F016",
+ .uaddr = {
+ [0] = MTD_UADDR_0x0555_0x02AA /* x8 */
+ },
+ .DevSize = SIZE_2MiB,
+ .CmdSet = P_ID_AMD_STD,
+ .NumEraseRegions= 1,
+ .regions = {
+ ERASEINFO(0x10000,32),
+ }
+ }, {
+ .mfr_id = MANUFACTURER_MACRONIX,
+ .dev_id = MX29F004T,
+ .name = "Macronix MX29F004T",
+ .uaddr = {
+ [0] = MTD_UADDR_0x0555_0x02AA /* x8 */
+ },
+ .DevSize = SIZE_512KiB,
+ .CmdSet = P_ID_AMD_STD,
+ .NumEraseRegions= 4,
+ .regions = {
+ ERASEINFO(0x10000,7),
+ ERASEINFO(0x08000,1),
+ ERASEINFO(0x02000,2),
+ ERASEINFO(0x04000,1),
+ }
+ }, {
+ .mfr_id = MANUFACTURER_MACRONIX,
+ .dev_id = MX29F004B,
+ .name = "Macronix MX29F004B",
+ .uaddr = {
+ [0] = MTD_UADDR_0x0555_0x02AA /* x8 */
+ },
+ .DevSize = SIZE_512KiB,
+ .CmdSet = P_ID_AMD_STD,
+ .NumEraseRegions= 4,
+ .regions = {
+ ERASEINFO(0x04000,1),
+ ERASEINFO(0x02000,2),
+ ERASEINFO(0x08000,1),
+ ERASEINFO(0x10000,7),
+ }
+ }, {
+ .mfr_id = MANUFACTURER_MACRONIX,
+ .dev_id = MX29F002T,
+ .name = "Macronix MX29F002T",
+ .uaddr = {
+ [0] = MTD_UADDR_0x0555_0x02AA /* x8 */
+ },
+ .DevSize = SIZE_256KiB,
+ .CmdSet = P_ID_AMD_STD,
+ .NumEraseRegions= 4,
+ .regions = {
+ ERASEINFO(0x10000,3),
+ ERASEINFO(0x08000,1),
+ ERASEINFO(0x02000,2),
+ ERASEINFO(0x04000,1),
+ }
+ }, {
+ .mfr_id = MANUFACTURER_PMC,
+ .dev_id = PM49FL002,
+ .name = "PMC Pm49FL002",
+ .uaddr = {
+ [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */
+ },
+ .DevSize = SIZE_256KiB,
+ .CmdSet = P_ID_AMD_STD,
+ .NumEraseRegions= 1,
+ .regions = {
+ ERASEINFO( 0x01000, 64 )
+ }
+ }, {
+ .mfr_id = MANUFACTURER_PMC,
+ .dev_id = PM49FL004,
+ .name = "PMC Pm49FL004",
+ .uaddr = {
+ [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */
+ },
+ .DevSize = SIZE_512KiB,
+ .CmdSet = P_ID_AMD_STD,
+ .NumEraseRegions= 1,
+ .regions = {
+ ERASEINFO( 0x01000, 128 )
+ }
+ }, {
+ .mfr_id = MANUFACTURER_PMC,
+ .dev_id = PM49FL008,
+ .name = "PMC Pm49FL008",
+ .uaddr = {
+ [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */
+ },
+ .DevSize = SIZE_1MiB,
+ .CmdSet = P_ID_AMD_STD,
+ .NumEraseRegions= 1,
+ .regions = {
+ ERASEINFO( 0x01000, 256 )
+ }
+ }, {
+ .mfr_id = MANUFACTURER_SHARP,
+ .dev_id = LH28F640BF,
+ .name = "LH28F640BF",
+ .uaddr = {
+ [0] = MTD_UADDR_UNNECESSARY, /* x8 */
+ },
+ .DevSize = SIZE_4MiB,
+ .CmdSet = P_ID_INTEL_STD,
+ .NumEraseRegions= 1,
+ .regions = {
+ ERASEINFO(0x40000,16),
+ }
+ }, {
+ .mfr_id = MANUFACTURER_SST,
+ .dev_id = SST39LF512,
+ .name = "SST 39LF512",
+ .uaddr = {
+ [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */
+ },
+ .DevSize = SIZE_64KiB,
+ .CmdSet = P_ID_AMD_STD,
+ .NumEraseRegions= 1,
+ .regions = {
+ ERASEINFO(0x01000,16),
+ }
+ }, {
+ .mfr_id = MANUFACTURER_SST,
+ .dev_id = SST39LF010,
+ .name = "SST 39LF010",
+ .uaddr = {
+ [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */
+ },
+ .DevSize = SIZE_128KiB,
+ .CmdSet = P_ID_AMD_STD,
+ .NumEraseRegions= 1,
+ .regions = {
+ ERASEINFO(0x01000,32),
+ }
+ }, {
+ .mfr_id = MANUFACTURER_SST,
+ .dev_id = SST29EE020,
+ .name = "SST 29EE020",
+ .uaddr = {
+ [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */
+ },
+ .DevSize = SIZE_256KiB,
+ .CmdSet = P_ID_SST_PAGE,
+ .NumEraseRegions= 1,
+ .regions = {
+ ERASEINFO(0x01000,64),
+ }
+ }, {
+ .mfr_id = MANUFACTURER_SST,
+ .dev_id = SST29LE020,
+ .name = "SST 29LE020",
+ .uaddr = {
+ [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */
+ },
+ .DevSize = SIZE_256KiB,
+ .CmdSet = P_ID_SST_PAGE,
+ .NumEraseRegions= 1,
+ .regions = {
+ ERASEINFO(0x01000,64),
+ }
+ }, {
+ .mfr_id = MANUFACTURER_SST,
+ .dev_id = SST39LF020,
+ .name = "SST 39LF020",
+ .uaddr = {
+ [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */
+ },
+ .DevSize = SIZE_256KiB,
+ .CmdSet = P_ID_AMD_STD,
+ .NumEraseRegions= 1,
+ .regions = {
+ ERASEINFO(0x01000,64),
+ }
+ }, {
+ .mfr_id = MANUFACTURER_SST,
+ .dev_id = SST39LF040,
+ .name = "SST 39LF040",
+ .uaddr = {
+ [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */
+ },
+ .DevSize = SIZE_512KiB,
+ .CmdSet = P_ID_AMD_STD,
+ .NumEraseRegions= 1,
+ .regions = {
+ ERASEINFO(0x01000,128),
+ }
+ }, {
+ .mfr_id = MANUFACTURER_SST,
+ .dev_id = SST39SF010A,
+ .name = "SST 39SF010A",
+ .uaddr = {
+ [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */
+ },
+ .DevSize = SIZE_128KiB,
+ .CmdSet = P_ID_AMD_STD,
+ .NumEraseRegions= 1,
+ .regions = {
+ ERASEINFO(0x01000,32),
+ }
+ }, {
+ .mfr_id = MANUFACTURER_SST,
+ .dev_id = SST39SF020A,
+ .name = "SST 39SF020A",
+ .uaddr = {
+ [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */
+ },
+ .DevSize = SIZE_256KiB,
+ .CmdSet = P_ID_AMD_STD,
+ .NumEraseRegions= 1,
+ .regions = {
+ ERASEINFO(0x01000,64),
+ }
+ }, {
+ .mfr_id = MANUFACTURER_SST,
+ .dev_id = SST49LF004B,
+ .name = "SST 49LF004B",
+ .uaddr = {
+ [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */
+ },
+ .DevSize = SIZE_512KiB,
+ .CmdSet = P_ID_AMD_STD,
+ .NumEraseRegions= 1,
+ .regions = {
+ ERASEINFO(0x01000,128),
+ }
+ }, {
+ .mfr_id = MANUFACTURER_SST,
+ .dev_id = SST49LF008A,
+ .name = "SST 49LF008A",
+ .uaddr = {
+ [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */
+ },
+ .DevSize = SIZE_1MiB,
+ .CmdSet = P_ID_AMD_STD,
+ .NumEraseRegions= 1,
+ .regions = {
+ ERASEINFO(0x01000,256),
+ }
+ }, {
+ .mfr_id = MANUFACTURER_SST,
+ .dev_id = SST49LF030A,
+ .name = "SST 49LF030A",
+ .uaddr = {
+ [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */
+ },
+ .DevSize = SIZE_512KiB,
+ .CmdSet = P_ID_AMD_STD,
+ .NumEraseRegions= 1,
+ .regions = {
+ ERASEINFO(0x01000,96),
+ }
+ }, {
+ .mfr_id = MANUFACTURER_SST,
+ .dev_id = SST49LF040A,
+ .name = "SST 49LF040A",
+ .uaddr = {
+ [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */
+ },
+ .DevSize = SIZE_512KiB,
+ .CmdSet = P_ID_AMD_STD,
+ .NumEraseRegions= 1,
+ .regions = {
+ ERASEINFO(0x01000,128),
+ }
+ }, {
+ .mfr_id = MANUFACTURER_SST,
+ .dev_id = SST49LF080A,
+ .name = "SST 49LF080A",
+ .uaddr = {
+ [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */
+ },
+ .DevSize = SIZE_1MiB,
+ .CmdSet = P_ID_AMD_STD,
+ .NumEraseRegions= 1,
+ .regions = {
+ ERASEINFO(0x01000,256),
+ }
+ }, {
+ .mfr_id = MANUFACTURER_SST, /* should be CFI */
+ .dev_id = SST39LF160,
+ .name = "SST 39LF160",
+ .uaddr = {
+ [0] = MTD_UADDR_0x5555_0x2AAA, /* x8 */
+ [1] = MTD_UADDR_0x5555_0x2AAA /* x16 */
+ },
+ .DevSize = SIZE_2MiB,
+ .CmdSet = P_ID_AMD_STD,
+ .NumEraseRegions= 2,
+ .regions = {
+ ERASEINFO(0x1000,256),
+ ERASEINFO(0x1000,256)
+ }
+ }, {
+ .mfr_id = MANUFACTURER_SST, /* should be CFI */
+ .dev_id = SST39VF1601,
+ .name = "SST 39VF1601",
+ .uaddr = {
+ [0] = MTD_UADDR_0x5555_0x2AAA, /* x8 */
+ [1] = MTD_UADDR_0x5555_0x2AAA /* x16 */
+ },
+ .DevSize = SIZE_2MiB,
+ .CmdSet = P_ID_AMD_STD,
+ .NumEraseRegions= 2,
+ .regions = {
+ ERASEINFO(0x1000,256),
+ ERASEINFO(0x1000,256)
+ }
+
+ }, {
+ .mfr_id = MANUFACTURER_ST, /* FIXME - CFI device? */
+ .dev_id = M29W800DT,
+ .name = "ST M29W800DT",
+ .uaddr = {
+ [0] = MTD_UADDR_0x5555_0x2AAA, /* x8 */
+ [1] = MTD_UADDR_0x5555_0x2AAA /* x16 */
+ },
+ .DevSize = SIZE_1MiB,
+ .CmdSet = P_ID_AMD_STD,
+ .NumEraseRegions= 4,
+ .regions = {
+ ERASEINFO(0x10000,15),
+ ERASEINFO(0x08000,1),
+ ERASEINFO(0x02000,2),
+ ERASEINFO(0x04000,1)
+ }
+ }, {
+ .mfr_id = MANUFACTURER_ST, /* FIXME - CFI device? */
+ .dev_id = M29W800DB,
+ .name = "ST M29W800DB",
+ .uaddr = {
+ [0] = MTD_UADDR_0x5555_0x2AAA, /* x8 */
+ [1] = MTD_UADDR_0x5555_0x2AAA /* x16 */
+ },
+ .DevSize = SIZE_1MiB,
+ .CmdSet = P_ID_AMD_STD,
+ .NumEraseRegions= 4,
+ .regions = {
+ ERASEINFO(0x04000,1),
+ ERASEINFO(0x02000,2),
+ ERASEINFO(0x08000,1),
+ ERASEINFO(0x10000,15)
+ }
+ }, {
+ .mfr_id = MANUFACTURER_ST, /* FIXME - CFI device? */
+ .dev_id = M29W160DT,
+ .name = "ST M29W160DT",
+ .uaddr = {
+ [0] = MTD_UADDR_0x0555_0x02AA, /* x8 */
+ [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */
+ },
+ .DevSize = SIZE_2MiB,
+ .CmdSet = P_ID_AMD_STD,
+ .NumEraseRegions= 4,
+ .regions = {
+ ERASEINFO(0x10000,31),
+ ERASEINFO(0x08000,1),
+ ERASEINFO(0x02000,2),
+ ERASEINFO(0x04000,1)
+ }
+ }, {
+ .mfr_id = MANUFACTURER_ST, /* FIXME - CFI device? */
+ .dev_id = M29W160DB,
+ .name = "ST M29W160DB",
+ .uaddr = {
+ [0] = MTD_UADDR_0x0555_0x02AA, /* x8 */
+ [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */
+ },
+ .DevSize = SIZE_2MiB,
+ .CmdSet = P_ID_AMD_STD,
+ .NumEraseRegions= 4,
+ .regions = {
+ ERASEINFO(0x04000,1),
+ ERASEINFO(0x02000,2),
+ ERASEINFO(0x08000,1),
+ ERASEINFO(0x10000,31)
+ }
+ }, {
+ .mfr_id = MANUFACTURER_ST,
+ .dev_id = M29W040B,
+ .name = "ST M29W040B",
+ .uaddr = {
+ [0] = MTD_UADDR_0x0555_0x02AA /* x8 */
+ },
+ .DevSize = SIZE_512KiB,
+ .CmdSet = P_ID_AMD_STD,
+ .NumEraseRegions= 1,
+ .regions = {
+ ERASEINFO(0x10000,8),
+ }
+ }, {
+ .mfr_id = MANUFACTURER_ST,
+ .dev_id = M50FW040,
+ .name = "ST M50FW040",
+ .uaddr = {
+ [0] = MTD_UADDR_UNNECESSARY, /* x8 */
+ },
+ .DevSize = SIZE_512KiB,
+ .CmdSet = P_ID_INTEL_EXT,
+ .NumEraseRegions= 1,
+ .regions = {
+ ERASEINFO(0x10000,8),
+ }
+ }, {
+ .mfr_id = MANUFACTURER_ST,
+ .dev_id = M50FW080,
+ .name = "ST M50FW080",
+ .uaddr = {
+ [0] = MTD_UADDR_UNNECESSARY, /* x8 */
+ },
+ .DevSize = SIZE_1MiB,
+ .CmdSet = P_ID_INTEL_EXT,
+ .NumEraseRegions= 1,
+ .regions = {
+ ERASEINFO(0x10000,16),
+ }
+ }, {
+ .mfr_id = MANUFACTURER_ST,
+ .dev_id = M50FW016,
+ .name = "ST M50FW016",
+ .uaddr = {
+ [0] = MTD_UADDR_UNNECESSARY, /* x8 */
+ },
+ .DevSize = SIZE_2MiB,
+ .CmdSet = P_ID_INTEL_EXT,
+ .NumEraseRegions= 1,
+ .regions = {
+ ERASEINFO(0x10000,32),
+ }
+ }, {
+ .mfr_id = MANUFACTURER_ST,
+ .dev_id = M50LPW080,
+ .name = "ST M50LPW080",
+ .uaddr = {
+ [0] = MTD_UADDR_UNNECESSARY, /* x8 */
+ },
+ .DevSize = SIZE_1MiB,
+ .CmdSet = P_ID_INTEL_EXT,
+ .NumEraseRegions= 1,
+ .regions = {
+ ERASEINFO(0x10000,16),
+ }
+ }, {
+ .mfr_id = MANUFACTURER_TOSHIBA,
+ .dev_id = TC58FVT160,
+ .name = "Toshiba TC58FVT160",
+ .uaddr = {
+ [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */
+ [1] = MTD_UADDR_0x0555_0x02AA /* x16 */
+ },
+ .DevSize = SIZE_2MiB,
+ .CmdSet = P_ID_AMD_STD,
+ .NumEraseRegions= 4,
+ .regions = {
+ ERASEINFO(0x10000,31),
+ ERASEINFO(0x08000,1),
+ ERASEINFO(0x02000,2),
+ ERASEINFO(0x04000,1)
+ }
+ }, {
+ .mfr_id = MANUFACTURER_TOSHIBA,
+ .dev_id = TC58FVB160,
+ .name = "Toshiba TC58FVB160",
+ .uaddr = {
+ [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */
+ [1] = MTD_UADDR_0x0555_0x02AA /* x16 */
+ },
+ .DevSize = SIZE_2MiB,
+ .CmdSet = P_ID_AMD_STD,
+ .NumEraseRegions= 4,
+ .regions = {
+ ERASEINFO(0x04000,1),
+ ERASEINFO(0x02000,2),
+ ERASEINFO(0x08000,1),
+ ERASEINFO(0x10000,31)
+ }
+ }, {
+ .mfr_id = MANUFACTURER_TOSHIBA,
+ .dev_id = TC58FVB321,
+ .name = "Toshiba TC58FVB321",
+ .uaddr = {
+ [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */
+ [1] = MTD_UADDR_0x0555_0x02AA /* x16 */
+ },
+ .DevSize = SIZE_4MiB,
+ .CmdSet = P_ID_AMD_STD,
+ .NumEraseRegions= 2,
+ .regions = {
+ ERASEINFO(0x02000,8),
+ ERASEINFO(0x10000,63)
+ }
+ }, {
+ .mfr_id = MANUFACTURER_TOSHIBA,
+ .dev_id = TC58FVT321,
+ .name = "Toshiba TC58FVT321",
+ .uaddr = {
+ [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */
+ [1] = MTD_UADDR_0x0555_0x02AA /* x16 */
+ },
+ .DevSize = SIZE_4MiB,
+ .CmdSet = P_ID_AMD_STD,
+ .NumEraseRegions= 2,
+ .regions = {
+ ERASEINFO(0x10000,63),
+ ERASEINFO(0x02000,8)
+ }
+ }, {
+ .mfr_id = MANUFACTURER_TOSHIBA,
+ .dev_id = TC58FVB641,
+ .name = "Toshiba TC58FVB641",
+ .uaddr = {
+ [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */
+ [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */
+ },
+ .DevSize = SIZE_8MiB,
+ .CmdSet = P_ID_AMD_STD,
+ .NumEraseRegions= 2,
+ .regions = {
+ ERASEINFO(0x02000,8),
+ ERASEINFO(0x10000,127)
+ }
+ }, {
+ .mfr_id = MANUFACTURER_TOSHIBA,
+ .dev_id = TC58FVT641,
+ .name = "Toshiba TC58FVT641",
+ .uaddr = {
+ [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */
+ [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */
+ },
+ .DevSize = SIZE_8MiB,
+ .CmdSet = P_ID_AMD_STD,
+ .NumEraseRegions= 2,
+ .regions = {
+ ERASEINFO(0x10000,127),
+ ERASEINFO(0x02000,8)
+ }
+ }, {
+ .mfr_id = MANUFACTURER_WINBOND,
+ .dev_id = W49V002A,
+ .name = "Winbond W49V002A",
+ .uaddr = {
+ [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */
+ },
+ .DevSize = SIZE_256KiB,
+ .CmdSet = P_ID_AMD_STD,
+ .NumEraseRegions= 4,
+ .regions = {
+ ERASEINFO(0x10000, 3),
+ ERASEINFO(0x08000, 1),
+ ERASEINFO(0x02000, 2),
+ ERASEINFO(0x04000, 1),
+ }
+ }
};
static int cfi_jedec_setup(struct cfi_private *p_cfi, int index);
static int jedec_probe_chip(struct map_info *map, __u32 base,
- struct flchip *chips, struct cfi_private *cfi);
+ unsigned long *chip_map, struct cfi_private *cfi);
+
+static struct mtd_info *jedec_probe(struct map_info *map);
+
+static inline u32 jedec_read_mfr(struct map_info *map, __u32 base,
+ struct cfi_private *cfi)
+{
+ map_word result;
+ unsigned long mask;
+ u32 ofs = cfi_build_cmd_addr(0, cfi_interleave(cfi), cfi->device_type);
+ mask = (1 << (cfi->device_type * 8)) -1;
+ result = map_read(map, base + ofs);
+ return result.x[0] & mask;
+}
+
+static inline u32 jedec_read_id(struct map_info *map, __u32 base,
+ struct cfi_private *cfi)
+{
+ map_word result;
+ unsigned long mask;
+ u32 ofs = cfi_build_cmd_addr(1, cfi_interleave(cfi), cfi->device_type);
+ mask = (1 << (cfi->device_type * 8)) -1;
+ result = map_read(map, base + ofs);
+ return result.x[0] & mask;
+}
+
+static inline void jedec_reset(u32 base, struct map_info *map,
+ struct cfi_private *cfi)
+{
+ /* Reset */
+
+ /* after checking the datasheets for SST, MACRONIX and ATMEL
+ * (oh and incidentaly the jedec spec - 3.5.3.3) the reset
+ * sequence is *supposed* to be 0xaa at 0x5555, 0x55 at
+ * 0x2aaa, 0xF0 at 0x5555 this will not affect the AMD chips
+ * as they will ignore the writes and dont care what address
+ * the F0 is written to */
+ if(cfi->addr_unlock1) {
+ DEBUG( MTD_DEBUG_LEVEL3,
+ "reset unlock called %x %x \n",
+ cfi->addr_unlock1,cfi->addr_unlock2);
+ cfi_send_gen_cmd(0xaa, cfi->addr_unlock1, base, map, cfi, cfi->device_type, NULL);
+ cfi_send_gen_cmd(0x55, cfi->addr_unlock2, base, map, cfi, cfi->device_type, NULL);
+ }
+
+ cfi_send_gen_cmd(0xF0, cfi->addr_unlock1, base, map, cfi, cfi->device_type, NULL);
+ /* Some misdesigned intel chips do not respond for 0xF0 for a reset,
+ * so ensure we're in read mode. Send both the Intel and the AMD command
+ * for this. Intel uses 0xff for this, AMD uses 0xff for NOP, so
+ * this should be safe.
+ */
+ cfi_send_gen_cmd(0xFF, 0, base, map, cfi, cfi->device_type, NULL);
+ /* FIXME - should have reset delay before continuing */
+}
+
+
+static inline __u8 finfo_uaddr(const struct amd_flash_info *finfo, int device_type)
+{
+ int uaddr_idx;
+ __u8 uaddr = MTD_UADDR_NOT_SUPPORTED;
+
+ switch ( device_type ) {
+ case CFI_DEVICETYPE_X8: uaddr_idx = 0; break;
+ case CFI_DEVICETYPE_X16: uaddr_idx = 1; break;
+ case CFI_DEVICETYPE_X32: uaddr_idx = 2; break;
+ default:
+ printk(KERN_NOTICE "MTD: %s(): unknown device_type %d\n",
+ __func__, device_type);
+ goto uaddr_done;
+ }
+
+ uaddr = finfo->uaddr[uaddr_idx];
+
+ if (uaddr != MTD_UADDR_NOT_SUPPORTED ) {
+ /* ASSERT("The unlock addresses for non-8-bit mode
+ are bollocks. We don't really need an array."); */
+ uaddr = finfo->uaddr[0];
+ }
+
+ uaddr_done:
+ return uaddr;
+}
-struct mtd_info *jedec_probe(struct map_info *map);
-#define jedec_read_mfr(map, base, osf) cfi_read(map, base)
-#define jedec_read_id(map, base, osf) cfi_read(map, (base)+(osf))
static int cfi_jedec_setup(struct cfi_private *p_cfi, int index)
{
int i,num_erase_regions;
+ __u8 uaddr;
printk("Found: %s\n",jedec_table[index].name);
num_erase_regions = jedec_table[index].NumEraseRegions;
-
+
p_cfi->cfiq = kmalloc(sizeof(struct cfi_ident) + num_erase_regions * 4, GFP_KERNEL);
if (!p_cfi->cfiq) {
//xx printk(KERN_WARNING "%s: kmalloc failed for CFI ident structure\n", map->name);
return 0;
}
- memset(p_cfi->cfiq,0,sizeof(struct cfi_ident));
-
- p_cfi->cfiq->P_ID = P_ID_AMD_STD;
+ memset(p_cfi->cfiq,0,sizeof(struct cfi_ident));
+
+ p_cfi->cfiq->P_ID = jedec_table[index].CmdSet;
p_cfi->cfiq->NumEraseRegions = jedec_table[index].NumEraseRegions;
p_cfi->cfiq->DevSize = jedec_table[index].DevSize;
+ p_cfi->cfi_mode = CFI_MODE_JEDEC;
for (i=0; i<num_erase_regions; i++){
p_cfi->cfiq->EraseRegionInfo[i] = jedec_table[index].regions[i];
- }
+ }
+ p_cfi->cmdset_priv = NULL;
+
+ /* This may be redundant for some cases, but it doesn't hurt */
+ p_cfi->mfr = jedec_table[index].mfr_id;
+ p_cfi->id = jedec_table[index].dev_id;
+
+ uaddr = finfo_uaddr(&jedec_table[index], p_cfi->device_type);
+ if ( uaddr == MTD_UADDR_NOT_SUPPORTED ) {
+ kfree( p_cfi->cfiq );
+ return 0;
+ }
+
+ p_cfi->addr_unlock1 = unlock_addrs[uaddr].addr1;
+ p_cfi->addr_unlock2 = unlock_addrs[uaddr].addr2;
+
return 1; /* ok */
}
+
+/*
+ * There is a BIG problem properly ID'ing the JEDEC devic and guaranteeing
+ * the mapped address, unlock addresses, and proper chip ID. This function
+ * attempts to minimize errors. It is doubtfull that this probe will ever
+ * be perfect - consequently there should be some module parameters that
+ * could be manually specified to force the chip info.
+ */
+static inline int jedec_match( __u32 base,
+ struct map_info *map,
+ struct cfi_private *cfi,
+ const struct amd_flash_info *finfo )
+{
+ int rc = 0; /* failure until all tests pass */
+ u32 mfr, id;
+ __u8 uaddr;
+
+ /*
+ * The IDs must match. For X16 and X32 devices operating in
+ * a lower width ( X8 or X16 ), the device ID's are usually just
+ * the lower byte(s) of the larger device ID for wider mode. If
+ * a part is found that doesn't fit this assumption (device id for
+ * smaller width mode is completely unrealated to full-width mode)
+ * then the jedec_table[] will have to be augmented with the IDs
+ * for different widths.
+ */
+ switch (cfi->device_type) {
+ case CFI_DEVICETYPE_X8:
+ mfr = (__u8)finfo->mfr_id;
+ id = (__u8)finfo->dev_id;
+
+ /* bjd: it seems that if we do this, we can end up
+ * detecting 16bit flashes as an 8bit device, even though
+ * there aren't.
+ */
+ if (finfo->dev_id > 0xff) {
+ DEBUG( MTD_DEBUG_LEVEL3, "%s(): ID is not 8bit\n",
+ __func__);
+ goto match_done;
+ }
+ break;
+ case CFI_DEVICETYPE_X16:
+ mfr = (__u16)finfo->mfr_id;
+ id = (__u16)finfo->dev_id;
+ break;
+ case CFI_DEVICETYPE_X32:
+ mfr = (__u16)finfo->mfr_id;
+ id = (__u32)finfo->dev_id;
+ break;
+ default:
+ printk(KERN_WARNING
+ "MTD %s(): Unsupported device type %d\n",
+ __func__, cfi->device_type);
+ goto match_done;
+ }
+ if ( cfi->mfr != mfr || cfi->id != id ) {
+ goto match_done;
+ }
+
+ /* the part size must fit in the memory window */
+ DEBUG( MTD_DEBUG_LEVEL3,
+ "MTD %s(): Check fit 0x%.8x + 0x%.8x = 0x%.8x\n",
+ __func__, base, 1 << finfo->DevSize, base + (1 << finfo->DevSize) );
+ if ( base + cfi_interleave(cfi) * ( 1 << finfo->DevSize ) > map->size ) {
+ DEBUG( MTD_DEBUG_LEVEL3,
+ "MTD %s(): 0x%.4x 0x%.4x %dKiB doesn't fit\n",
+ __func__, finfo->mfr_id, finfo->dev_id,
+ 1 << finfo->DevSize );
+ goto match_done;
+ }
+
+ uaddr = finfo_uaddr(finfo, cfi->device_type);
+ if ( uaddr == MTD_UADDR_NOT_SUPPORTED ) {
+ goto match_done;
+ }
+
+ DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): check unlock addrs 0x%.4x 0x%.4x\n",
+ __func__, cfi->addr_unlock1, cfi->addr_unlock2 );
+ if ( MTD_UADDR_UNNECESSARY != uaddr && MTD_UADDR_DONT_CARE != uaddr
+ && ( unlock_addrs[uaddr].addr1 != cfi->addr_unlock1 ||
+ unlock_addrs[uaddr].addr2 != cfi->addr_unlock2 ) ) {
+ DEBUG( MTD_DEBUG_LEVEL3,
+ "MTD %s(): 0x%.4x 0x%.4x did not match\n",
+ __func__,
+ unlock_addrs[uaddr].addr1,
+ unlock_addrs[uaddr].addr2);
+ goto match_done;
+ }
+
+ /*
+ * Make sure the ID's dissappear when the device is taken out of
+ * ID mode. The only time this should fail when it should succeed
+ * is when the ID's are written as data to the same
+ * addresses. For this rare and unfortunate case the chip
+ * cannot be probed correctly.
+ * FIXME - write a driver that takes all of the chip info as
+ * module parameters, doesn't probe but forces a load.
+ */
+ DEBUG( MTD_DEBUG_LEVEL3,
+ "MTD %s(): check ID's disappear when not in ID mode\n",
+ __func__ );
+ jedec_reset( base, map, cfi );
+ mfr = jedec_read_mfr( map, base, cfi );
+ id = jedec_read_id( map, base, cfi );
+ if ( mfr == cfi->mfr && id == cfi->id ) {
+ DEBUG( MTD_DEBUG_LEVEL3,
+ "MTD %s(): ID 0x%.2x:0x%.2x did not change after reset:\n"
+ "You might need to manually specify JEDEC parameters.\n",
+ __func__, cfi->mfr, cfi->id );
+ goto match_done;
+ }
+
+ /* all tests passed - mark as success */
+ rc = 1;
+
+ /*
+ * Put the device back in ID mode - only need to do this if we
+ * were truly frobbing a real device.
+ */
+ DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): return to ID mode\n", __func__ );
+ if(cfi->addr_unlock1) {
+ cfi_send_gen_cmd(0xaa, cfi->addr_unlock1, base, map, cfi, cfi->device_type, NULL);
+ cfi_send_gen_cmd(0x55, cfi->addr_unlock2, base, map, cfi, cfi->device_type, NULL);
+ }
+ cfi_send_gen_cmd(0x90, cfi->addr_unlock1, base, map, cfi, cfi->device_type, NULL);
+ /* FIXME - should have a delay before continuing */
+
+ match_done:
+ return rc;
+}
+
+
static int jedec_probe_chip(struct map_info *map, __u32 base,
- struct flchip *chips, struct cfi_private *cfi)
+ unsigned long *chip_map, struct cfi_private *cfi)
{
int i;
- int osf = cfi->interleave * cfi->device_type;
- int retried = 0;
- int busmask;
-
- switch(map->buswidth) {
- case 1: busmask = 0xff; break;
- case 2: busmask = 0xffff; break;
- case 4: busmask = 0xffffffff; break;
- default: printk("Unknown buswidth in jedec_probe_chip\n"); return(0);
- }
+ enum uaddr uaddr_idx = MTD_UADDR_NOT_SUPPORTED;
+ u32 probe_offset1, probe_offset2;
+ retry:
if (!cfi->numchips) {
- switch (cfi->device_type) {
- case CFI_DEVICETYPE_X8:
- cfi->addr_unlock1 = 0x555;
- cfi->addr_unlock2 = 0x2aa;
- break;
- case CFI_DEVICETYPE_X16:
- cfi->addr_unlock1 = 0xaaa;
- if (map->buswidth == cfi->interleave) {
- /* X16 chip(s) in X8 mode */
- cfi->addr_unlock2 = 0x555;
- } else {
- cfi->addr_unlock2 = 0x554;
- }
- break;
- case CFI_DEVICETYPE_X32:
- cfi->addr_unlock1 = 0x1555;
- cfi->addr_unlock2 = 0xaaa;
- break;
- default:
- printk(KERN_NOTICE "Eep. Unknown jedec_probe device type %d\n", cfi->device_type);
+ uaddr_idx++;
+
+ if (MTD_UADDR_UNNECESSARY == uaddr_idx)
+ return 0;
+
+ cfi->addr_unlock1 = unlock_addrs[uaddr_idx].addr1;
+ cfi->addr_unlock2 = unlock_addrs[uaddr_idx].addr2;
+ }
+
+ /* Make certain we aren't probing past the end of map */
+ if (base >= map->size) {
+ printk(KERN_NOTICE
+ "Probe at base(0x%08x) past the end of the map(0x%08lx)\n",
+ base, map->size -1);
return 0;
- }
+
+ }
+ /* Ensure the unlock addresses we try stay inside the map */
+ probe_offset1 = cfi_build_cmd_addr(
+ cfi->addr_unlock1,
+ cfi_interleave(cfi),
+ cfi->device_type);
+ probe_offset2 = cfi_build_cmd_addr(
+ cfi->addr_unlock1,
+ cfi_interleave(cfi),
+ cfi->device_type);
+ if ( ((base + probe_offset1 + map_bankwidth(map)) >= map->size) ||
+ ((base + probe_offset2 + map_bankwidth(map)) >= map->size))
+ {
+ goto retry;
}
- retry:
/* Reset */
- cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL);
+ jedec_reset(base, map, cfi);
/* Autoselect Mode */
- cfi_send_gen_cmd(0xaa, cfi->addr_unlock1, base, map, cfi, CFI_DEVICETYPE_X8, NULL);
- cfi_send_gen_cmd(0x55, cfi->addr_unlock2, base, map, cfi, CFI_DEVICETYPE_X8, NULL);
- cfi_send_gen_cmd(0x90, cfi->addr_unlock1, base, map, cfi, CFI_DEVICETYPE_X8, NULL);
+ if(cfi->addr_unlock1) {
+ cfi_send_gen_cmd(0xaa, cfi->addr_unlock1, base, map, cfi, cfi->device_type, NULL);
+ cfi_send_gen_cmd(0x55, cfi->addr_unlock2, base, map, cfi, cfi->device_type, NULL);
+ }
+ cfi_send_gen_cmd(0x90, cfi->addr_unlock1, base, map, cfi, cfi->device_type, NULL);
+ /* FIXME - should have a delay before continuing */
if (!cfi->numchips) {
- /* This is the first time we're called. Set up the CFI
+ /* This is the first time we're called. Set up the CFI
stuff accordingly and return */
-
- cfi->mfr = jedec_read_mfr(map, base, osf);
- cfi->id = jedec_read_id(map, base, osf);
-
- for (i=0; i<sizeof(jedec_table)/sizeof(jedec_table[0]); i++) {
- if (cfi->mfr == (jedec_table[i].mfr_id&busmask) &&
- cfi->id == (jedec_table[i].dev_id&busmask))
- return cfi_jedec_setup(cfi, i);
- }
- if (!retried++) {
- /* Deal with whichever strange chips these were */
- cfi->addr_unlock1 |= cfi->addr_unlock1 << 8;
- cfi->addr_unlock2 |= cfi->addr_unlock2 << 8;
- goto retry;
+
+ cfi->mfr = jedec_read_mfr(map, base, cfi);
+ cfi->id = jedec_read_id(map, base, cfi);
+ DEBUG(MTD_DEBUG_LEVEL3,
+ "Search for id:(%02x %02x) interleave(%d) type(%d)\n",
+ cfi->mfr, cfi->id, cfi_interleave(cfi), cfi->device_type);
+ for (i = 0; i < ARRAY_SIZE(jedec_table); i++) {
+ if ( jedec_match( base, map, cfi, &jedec_table[i] ) ) {
+ DEBUG( MTD_DEBUG_LEVEL3,
+ "MTD %s(): matched device 0x%x,0x%x unlock_addrs: 0x%.4x 0x%.4x\n",
+ __func__, cfi->mfr, cfi->id,
+ cfi->addr_unlock1, cfi->addr_unlock2 );
+ if (!cfi_jedec_setup(cfi, i))
+ return 0;
+ goto ok_out;
+ }
+ }
+ goto retry;
+ } else {
+ __u16 mfr;
+ __u16 id;
+
+ /* Make sure it is a chip of the same manufacturer and id */
+ mfr = jedec_read_mfr(map, base, cfi);
+ id = jedec_read_id(map, base, cfi);
+
+ if ((mfr != cfi->mfr) || (id != cfi->id)) {
+ printk(KERN_DEBUG "%s: Found different chip or no chip at all (mfr 0x%x, id 0x%x) at 0x%x\n",
+ map->name, mfr, id, base);
+ jedec_reset(base, map, cfi);
+ return 0;
}
- return 0;
}
-
- /* Check each previous chip to see if it's an alias */
- for (i=0; i<cfi->numchips; i++) {
- /* This chip should be in read mode if it's one
- we've already touched. */
- if (jedec_read_mfr(map, base, osf) == cfi->mfr &&
- jedec_read_id(map, base, osf) == cfi->id) {
+
+ /* Check each previous chip locations to see if it's an alias */
+ for (i=0; i < (base >> cfi->chipshift); i++) {
+ unsigned long start;
+ if(!test_bit(i, chip_map)) {
+ continue; /* Skip location; no valid chip at this address */
+ }
+ start = i << cfi->chipshift;
+ if (jedec_read_mfr(map, start, cfi) == cfi->mfr &&
+ jedec_read_id(map, start, cfi) == cfi->id) {
/* Eep. This chip also looks like it's in autoselect mode.
Is it an alias for the new one? */
-
- cfi_send_gen_cmd(0xF0, 0, chips[i].start, map, cfi, cfi->device_type, NULL);
+ jedec_reset(start, map, cfi);
+
/* If the device IDs go away, it's an alias */
- if (jedec_read_mfr(map, base, osf) != cfi->mfr ||
- jedec_read_id(map, base, osf) != cfi->id) {
+ if (jedec_read_mfr(map, base, cfi) != cfi->mfr ||
+ jedec_read_id(map, base, cfi) != cfi->id) {
printk(KERN_DEBUG "%s: Found an alias at 0x%x for the chip at 0x%lx\n",
- map->name, base, chips[i].start);
+ map->name, base, start);
return 0;
}
-
+
/* Yes, it's actually got the device IDs as data. Most
* unfortunate. Stick the new chip in read mode
* too and if it's the same, assume it's an alias. */
/* FIXME: Use other modes to do a proper check */
- cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL);
- if (jedec_read_mfr(map, base, osf) == cfi->mfr &&
- jedec_read_id(map, base, osf) == cfi->id) {
+ jedec_reset(base, map, cfi);
+ if (jedec_read_mfr(map, base, cfi) == cfi->mfr &&
+ jedec_read_id(map, base, cfi) == cfi->id) {
printk(KERN_DEBUG "%s: Found an alias at 0x%x for the chip at 0x%lx\n",
- map->name, base, chips[i].start);
+ map->name, base, start);
return 0;
}
}
}
-
+
/* OK, if we got to here, then none of the previous chips appear to
be aliases for the current one. */
- if (cfi->numchips == MAX_CFI_CHIPS) {
- printk(KERN_WARNING"%s: Too many flash chips detected. Increase MAX_CFI_CHIPS from %d.\n", map->name, MAX_CFI_CHIPS);
- /* Doesn't matter about resetting it to Read Mode - we're not going to talk to it anyway */
- return -1;
- }
- chips[cfi->numchips].start = base;
- chips[cfi->numchips].state = FL_READY;
+ set_bit((base >> cfi->chipshift), chip_map); /* Update chip map */
cfi->numchips++;
-
+
+ok_out:
/* Put it back into Read Mode */
- cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL);
+ jedec_reset(base, map, cfi);
+
+ printk(KERN_INFO "%s: Found %d x%d devices at 0x%x in %d-bit bank\n",
+ map->name, cfi_interleave(cfi), cfi->device_type*8, base,
+ map->bankwidth*8);
- printk(KERN_INFO "%s: Found %d x%d devices at 0x%x in %d-bit mode\n",
- map->name, cfi->interleave, cfi->device_type*8, base,
- map->buswidth*8);
-
return 1;
}
static struct chip_probe jedec_chip_probe = {
- name: "JEDEC",
- probe_chip: jedec_probe_chip
+ .name = "JEDEC",
+ .probe_chip = jedec_probe_chip
};
-struct mtd_info *jedec_probe(struct map_info *map)
+static struct mtd_info *jedec_probe(struct map_info *map)
{
/*
* Just use the generic probe stuff to call our CFI-specific
@@ -437,12 +2148,12 @@ struct mtd_info *jedec_probe(struct map_info *map)
}
static struct mtd_chip_driver jedec_chipdrv = {
- probe: jedec_probe,
- name: "jedec_probe",
- module: THIS_MODULE
+ .probe = jedec_probe,
+ .name = "jedec_probe",
+ .module = THIS_MODULE
};
-int __init jedec_probe_init(void)
+static int __init jedec_probe_init(void)
{
register_mtd_chip_driver(&jedec_chipdrv);
return 0;
diff --git a/linux-2.4.x/drivers/mtd/chips/map_absent.c b/linux-2.4.x/drivers/mtd/chips/map_absent.c
index 9478d0e..a611de9 100644
--- a/linux-2.4.x/drivers/mtd/chips/map_absent.c
+++ b/linux-2.4.x/drivers/mtd/chips/map_absent.c
@@ -1,11 +1,11 @@
/*
* Common code to handle absent "placeholder" devices
* Copyright 2001 Resilience Corporation <ebrower@resilience.com>
- * $Id: map_absent.c,v 1.2 2001/10/02 15:05:12 dwmw2 Exp $
+ * $Id: map_absent.c,v 1.6 2005/11/07 11:14:23 gleixner Exp $
*
* This map driver is used to allocate "placeholder" MTD
- * devices on systems that have socketed/removable media.
- * Use of this driver as a fallback preserves the expected
+ * devices on systems that have socketed/removable media.
+ * Use of this driver as a fallback preserves the expected
* registration of MTD device nodes regardless of probe outcome.
* A usage example is as follows:
*
@@ -23,9 +23,10 @@
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/slab.h>
-
+#include <linux/init.h>
+#include <linux/mtd/mtd.h>
#include <linux/mtd/map.h>
-
+#include <linux/mtd/compatmac.h>
static int map_absent_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
static int map_absent_write (struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
@@ -36,10 +37,10 @@ static void map_absent_destroy (struct mtd_info *);
static struct mtd_chip_driver map_absent_chipdrv = {
- probe: map_absent_probe,
- destroy: map_absent_destroy,
- name: "map_absent",
- module: THIS_MODULE
+ .probe = map_absent_probe,
+ .destroy = map_absent_destroy,
+ .name = "map_absent",
+ .module = THIS_MODULE
};
static struct mtd_info *map_absent_probe(struct map_info *map)
@@ -65,7 +66,7 @@ static struct mtd_info *map_absent_probe(struct map_info *map)
mtd->flags = 0;
mtd->erasesize = PAGE_SIZE;
- MOD_INC_USE_COUNT;
+ __module_get(THIS_MODULE);
return mtd;
}
@@ -79,7 +80,7 @@ static int map_absent_read(struct mtd_info *mtd, loff_t from, size_t len, size_t
static int map_absent_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, const u_char *buf)
{
*retlen = 0;
- return -ENODEV;
+ return -ENODEV;
}
static int map_absent_erase(struct mtd_info *mtd, struct erase_info *instr)
@@ -97,7 +98,7 @@ static void map_absent_destroy(struct mtd_info *mtd)
/* nop */
}
-int __init map_absent_init(void)
+static int __init map_absent_init(void)
{
register_mtd_chip_driver(&map_absent_chipdrv);
return 0;
diff --git a/linux-2.4.x/drivers/mtd/chips/map_ram.c b/linux-2.4.x/drivers/mtd/chips/map_ram.c
index 287a255..bd2e876 100644
--- a/linux-2.4.x/drivers/mtd/chips/map_ram.c
+++ b/linux-2.4.x/drivers/mtd/chips/map_ram.c
@@ -1,7 +1,7 @@
/*
* Common code to handle map devices which are simple RAM
* (C) 2000 Red Hat. GPL'd.
- * $Id: map_ram.c,v 1.14 2001/10/02 15:05:12 dwmw2 Exp $
+ * $Id: map_ram.c,v 1.22 2005/01/05 18:05:12 dwmw2 Exp $
*/
#include <linux/module.h>
@@ -11,8 +11,10 @@
#include <asm/byteorder.h>
#include <linux/errno.h>
#include <linux/slab.h>
-
+#include <linux/init.h>
+#include <linux/mtd/mtd.h>
#include <linux/mtd/map.h>
+#include <linux/mtd/compatmac.h>
static int mapram_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
@@ -23,9 +25,9 @@ static struct mtd_info *map_ram_probe(struct map_info *map);
static struct mtd_chip_driver mapram_chipdrv = {
- probe: map_ram_probe,
- name: "map_ram",
- module: THIS_MODULE
+ .probe = map_ram_probe,
+ .name = "map_ram",
+ .module = THIS_MODULE
};
static struct mtd_info *map_ram_probe(struct map_info *map)
@@ -34,21 +36,21 @@ static struct mtd_info *map_ram_probe(struct map_info *map)
/* Check the first byte is RAM */
#if 0
- map->write8(map, 0x55, 0);
- if (map->read8(map, 0) != 0x55)
+ map_write8(map, 0x55, 0);
+ if (map_read8(map, 0) != 0x55)
return NULL;
- map->write8(map, 0xAA, 0);
- if (map->read8(map, 0) != 0xAA)
+ map_write8(map, 0xAA, 0);
+ if (map_read8(map, 0) != 0xAA)
return NULL;
/* Check the last byte is RAM */
- map->write8(map, 0x55, map->size-1);
- if (map->read8(map, map->size-1) != 0x55)
+ map_write8(map, 0x55, map->size-1);
+ if (map_read8(map, map->size-1) != 0x55)
return NULL;
- map->write8(map, 0xAA, map->size-1);
- if (map->read8(map, map->size-1) != 0xAA)
+ map_write8(map, 0xAA, map->size-1);
+ if (map_read8(map, map->size-1) != 0xAA)
return NULL;
#endif
/* OK. It seems to be RAM. */
@@ -74,25 +76,25 @@ static struct mtd_info *map_ram_probe(struct map_info *map)
while(mtd->size & (mtd->erasesize - 1))
mtd->erasesize >>= 1;
- MOD_INC_USE_COUNT;
+ __module_get(THIS_MODULE);
return mtd;
}
static int mapram_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
{
- struct map_info *map = (struct map_info *)mtd->priv;
+ struct map_info *map = mtd->priv;
- map->copy_from(map, buf, from, len);
+ map_copy_from(map, buf, from, len);
*retlen = len;
return 0;
}
static int mapram_write (struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, const u_char *buf)
{
- struct map_info *map = (struct map_info *)mtd->priv;
+ struct map_info *map = mtd->priv;
- map->copy_to(map, to, buf, len);
+ map_copy_to(map, to, buf, len);
*retlen = len;
return 0;
}
@@ -101,14 +103,18 @@ static int mapram_erase (struct mtd_info *mtd, struct erase_info *instr)
{
/* Yeah, it's inefficient. Who cares? It's faster than a _real_
flash erase. */
- struct map_info *map = (struct map_info *)mtd->priv;
+ struct map_info *map = mtd->priv;
+ map_word allff;
unsigned long i;
- for (i=0; i<instr->len; i++)
- map->write8(map, 0xFF, instr->addr + i);
+ allff = map_word_ff(map);
+
+ for (i=0; i<instr->len; i += map_bankwidth(map))
+ map_write(map, allff, instr->addr + i);
+
+ instr->state = MTD_ERASE_DONE;
- if (instr->callback)
- instr->callback(instr);
+ mtd_erase_callback(instr);
return 0;
}
@@ -118,7 +124,7 @@ static void mapram_nop(struct mtd_info *mtd)
/* Nothing to see here */
}
-int __init map_ram_init(void)
+static int __init map_ram_init(void)
{
register_mtd_chip_driver(&mapram_chipdrv);
return 0;
diff --git a/linux-2.4.x/drivers/mtd/chips/map_rom.c b/linux-2.4.x/drivers/mtd/chips/map_rom.c
index d077bac..624c12c 100644
--- a/linux-2.4.x/drivers/mtd/chips/map_rom.c
+++ b/linux-2.4.x/drivers/mtd/chips/map_rom.c
@@ -1,10 +1,9 @@
/*
* Common code to handle map devices which are simple ROM
* (C) 2000 Red Hat. GPL'd.
- * $Id: map_rom.c,v 1.17 2001/10/02 15:05:12 dwmw2 Exp $
+ * $Id: map_rom.c,v 1.23 2005/01/05 18:05:12 dwmw2 Exp $
*/
-#include <linux/version.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
@@ -12,21 +11,23 @@
#include <asm/byteorder.h>
#include <linux/errno.h>
#include <linux/slab.h>
-
+#include <linux/init.h>
+#include <linux/mtd/mtd.h>
#include <linux/mtd/map.h>
+#include <linux/mtd/compatmac.h>
static int maprom_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
static int maprom_write (struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
static void maprom_nop (struct mtd_info *);
-struct mtd_info *map_rom_probe(struct map_info *map);
+static struct mtd_info *map_rom_probe(struct map_info *map);
static struct mtd_chip_driver maprom_chipdrv = {
- probe: map_rom_probe,
- name: "map_rom",
- module: THIS_MODULE
+ .probe = map_rom_probe,
+ .name = "map_rom",
+ .module = THIS_MODULE
};
-struct mtd_info *map_rom_probe(struct map_info *map)
+static struct mtd_info *map_rom_probe(struct map_info *map)
{
struct mtd_info *mtd;
@@ -49,16 +50,16 @@ struct mtd_info *map_rom_probe(struct map_info *map)
while(mtd->size & (mtd->erasesize - 1))
mtd->erasesize >>= 1;
- MOD_INC_USE_COUNT;
+ __module_get(THIS_MODULE);
return mtd;
}
static int maprom_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
{
- struct map_info *map = (struct map_info *)mtd->priv;
+ struct map_info *map = mtd->priv;
- map->copy_from(map, buf, from, len);
+ map_copy_from(map, buf, from, len);
*retlen = len;
return 0;
}
@@ -74,7 +75,7 @@ static int maprom_write (struct mtd_info *mtd, loff_t to, size_t len, size_t *re
return -EIO;
}
-int __init map_rom_init(void)
+static int __init map_rom_init(void)
{
register_mtd_chip_driver(&maprom_chipdrv);
return 0;
diff --git a/linux-2.4.x/drivers/mtd/chips/sharp.c b/linux-2.4.x/drivers/mtd/chips/sharp.c
index 95f0641..f777fb1 100644
--- a/linux-2.4.x/drivers/mtd/chips/sharp.c
+++ b/linux-2.4.x/drivers/mtd/chips/sharp.c
@@ -4,7 +4,7 @@
* Copyright 2000,2001 David A. Schleef <ds@schleef.org>
* 2000,2001 Lineo, Inc.
*
- * $Id: sharp.c,v 1.7 2002/02/13 15:49:07 dwmw2 Exp $
+ * $Id: sharp.c,v 1.18 2006/03/29 08:27:08 dwmw2 Exp $
*
* Devices supported:
* LH28F016SCT Symmetrical block flash memory, 2Mx8
@@ -22,14 +22,16 @@
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/version.h>
#include <linux/types.h>
#include <linux/sched.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
#include <linux/mtd/map.h>
+#include <linux/mtd/mtd.h>
#include <linux/mtd/cfi.h>
#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/slab.h>
#define CMD_RESET 0xffffffff
#define CMD_READ_ID 0x90909090
@@ -62,7 +64,7 @@
#undef AUTOUNLOCK /* automatically unlocks blocks before erasing */
-struct mtd_info *sharp_probe(struct map_info *);
+static struct mtd_info *sharp_probe(struct map_info *);
static int sharp_probe_map(struct map_info *map,struct mtd_info *mtd);
@@ -94,18 +96,17 @@ struct sharp_info{
struct flchip chips[1];
};
-struct mtd_info *sharp_probe(struct map_info *map);
static void sharp_destroy(struct mtd_info *mtd);
static struct mtd_chip_driver sharp_chipdrv = {
- probe: sharp_probe,
- destroy: sharp_destroy,
- name: "sharp",
- module: THIS_MODULE
+ .probe = sharp_probe,
+ .destroy = sharp_destroy,
+ .name = "sharp",
+ .module = THIS_MODULE
};
-struct mtd_info *sharp_probe(struct map_info *map)
+static struct mtd_info *sharp_probe(struct map_info *map)
{
struct mtd_info *mtd = NULL;
struct sharp_info *sharp = NULL;
@@ -116,8 +117,10 @@ struct mtd_info *sharp_probe(struct map_info *map)
return NULL;
sharp = kmalloc(sizeof(*sharp), GFP_KERNEL);
- if(!sharp)
+ if(!sharp) {
+ kfree(mtd);
return NULL;
+ }
memset(mtd, 0, sizeof(*mtd));
@@ -152,26 +155,32 @@ struct mtd_info *sharp_probe(struct map_info *map)
map->fldrv = &sharp_chipdrv;
map->fldrv_priv = sharp;
- MOD_INC_USE_COUNT;
+ __module_get(THIS_MODULE);
return mtd;
}
+static inline void sharp_send_cmd(struct map_info *map, unsigned long cmd, unsigned long adr)
+{
+ map_word map_cmd;
+ map_cmd.x[0] = cmd;
+ map_write(map, map_cmd, adr);
+}
+
static int sharp_probe_map(struct map_info *map,struct mtd_info *mtd)
{
- unsigned long tmp;
+ map_word tmp, read0, read4;
unsigned long base = 0;
- u32 read0, read4;
int width = 4;
- tmp = map->read32(map, base+0);
+ tmp = map_read(map, base+0);
- map->write32(map, CMD_READ_ID, base+0);
+ sharp_send_cmd(map, CMD_READ_ID, base+0);
- read0=map->read32(map, base+0);
- read4=map->read32(map, base+4);
- if(read0 == 0x89898989){
+ read0 = map_read(map, base+0);
+ read4 = map_read(map, base+4);
+ if(read0.x[0] == 0x89898989){
printk("Looks like sharp flash\n");
- switch(read4){
+ switch(read4.x[0]){
case 0xaaaaaaaa:
case 0xa0a0a0a0:
/* aa - LH28F016SCT-L95 2Mx8, 32 64k blocks*/
@@ -193,16 +202,16 @@ static int sharp_probe_map(struct map_info *map,struct mtd_info *mtd)
return width;
#endif
default:
- printk("Sort-of looks like sharp flash, 0x%08x 0x%08x\n",
- read0,read4);
+ printk("Sort-of looks like sharp flash, 0x%08lx 0x%08lx\n",
+ read0.x[0], read4.x[0]);
}
- }else if((map->read32(map, base+0) == CMD_READ_ID)){
+ }else if((map_read(map, base+0).x[0] == CMD_READ_ID)){
/* RAM, probably */
printk("Looks like RAM\n");
- map->write32(map, tmp, base+0);
+ map_write(map, tmp, base+0);
}else{
- printk("Doesn't look like sharp flash, 0x%08x 0x%08x\n",
- read0,read4);
+ printk("Doesn't look like sharp flash, 0x%08lx 0x%08lx\n",
+ read0.x[0], read4.x[0]);
}
return 0;
@@ -211,7 +220,8 @@ static int sharp_probe_map(struct map_info *map,struct mtd_info *mtd)
/* This function returns with the chip->mutex lock held. */
static int sharp_wait(struct map_info *map, struct flchip *chip)
{
- __u16 status;
+ int i;
+ map_word status;
unsigned long timeo = jiffies + HZ;
DECLARE_WAITQUEUE(wait, current);
int adr = 0;
@@ -221,16 +231,14 @@ retry:
switch(chip->state){
case FL_READY:
- map->write32(map,CMD_READ_STATUS,adr);
+ sharp_send_cmd(map, CMD_READ_STATUS, adr);
chip->state = FL_STATUS;
case FL_STATUS:
- status = map->read32(map,adr);
-//printk("status=%08x\n",status);
-
- udelay(100);
- if((status & SR_READY)!=SR_READY){
-//printk(".status=%08x\n",status);
- udelay(100);
+ for(i=0;i<100;i++){
+ status = map_read(map, adr);
+ if((status.x[0] & SR_READY)==SR_READY)
+ break;
+ udelay(1);
}
break;
default:
@@ -252,7 +260,7 @@ retry:
goto retry;
}
- map->write32(map,CMD_RESET, adr);
+ sharp_send_cmd(map, CMD_RESET, adr);
chip->state = FL_READY;
@@ -293,7 +301,7 @@ static int sharp_read(struct mtd_info *mtd, loff_t from, size_t len,
if(ret<0)
break;
- map->copy_from(map,buf,ofs,thislen);
+ map_copy_from(map,buf,ofs,thislen);
sharp_release(&sharp->chips[chipnum]);
@@ -349,37 +357,39 @@ static int sharp_write_oneword(struct map_info *map, struct flchip *chip,
int timeo;
int try;
int i;
- int status = 0;
+ map_word data, status;
+ status.x[0] = 0;
ret = sharp_wait(map,chip);
for(try=0;try<10;try++){
- map->write32(map,CMD_BYTE_WRITE,adr);
+ sharp_send_cmd(map, CMD_BYTE_WRITE, adr);
/* cpu_to_le32 -> hack to fix the writel be->le conversion */
- map->write32(map,cpu_to_le32(datum),adr);
+ data.x[0] = cpu_to_le32(datum);
+ map_write(map, data, adr);
chip->state = FL_WRITING;
timeo = jiffies + (HZ/2);
- map->write32(map,CMD_READ_STATUS,adr);
+ sharp_send_cmd(map, CMD_READ_STATUS, adr);
for(i=0;i<100;i++){
- status = map->read32(map,adr);
- if((status & SR_READY)==SR_READY)
+ status = map_read(map, adr);
+ if((status.x[0] & SR_READY) == SR_READY)
break;
}
if(i==100){
printk("sharp: timed out writing\n");
}
- if(!(status&SR_ERRORS))
+ if(!(status.x[0] & SR_ERRORS))
break;
- printk("sharp: error writing byte at addr=%08lx status=%08x\n",adr,status);
+ printk("sharp: error writing byte at addr=%08lx status=%08lx\n", adr, status.x[0]);
- map->write32(map,CMD_CLEAR_STATUS,adr);
+ sharp_send_cmd(map, CMD_CLEAR_STATUS, adr);
}
- map->write32(map,CMD_RESET,adr);
+ sharp_send_cmd(map, CMD_RESET, adr);
chip->state = FL_READY;
wake_up(&chip->wq);
@@ -421,8 +431,8 @@ static int sharp_erase(struct mtd_info *mtd, struct erase_info *instr)
}
}
- if(instr->callback)
- instr->callback(instr);
+ instr->state = MTD_ERASE_DONE;
+ mtd_erase_callback(instr);
return 0;
}
@@ -431,19 +441,19 @@ static int sharp_do_wait_for_ready(struct map_info *map, struct flchip *chip,
unsigned long adr)
{
int ret;
- int timeo;
- int status;
+ unsigned long timeo;
+ map_word status;
DECLARE_WAITQUEUE(wait, current);
- map->write32(map,CMD_READ_STATUS,adr);
- status = map->read32(map,adr);
+ sharp_send_cmd(map, CMD_READ_STATUS, adr);
+ status = map_read(map, adr);
timeo = jiffies + HZ;
while(time_before(jiffies, timeo)){
- map->write32(map,CMD_READ_STATUS,adr);
- status = map->read32(map,adr);
- if((status & SR_READY)==SR_READY){
+ sharp_send_cmd(map, CMD_READ_STATUS, adr);
+ status = map_read(map, adr);
+ if((status.x[0] & SR_READY)==SR_READY){
ret = 0;
goto out;
}
@@ -457,12 +467,12 @@ static int sharp_do_wait_for_ready(struct map_info *map, struct flchip *chip,
remove_wait_queue(&chip->wq, &wait);
//spin_lock_bh(chip->mutex);
-
+
if (signal_pending(current)){
ret = -EINTR;
goto out;
}
-
+
}
ret = -ETIME;
out:
@@ -474,7 +484,7 @@ static int sharp_erase_oneblock(struct map_info *map, struct flchip *chip,
{
int ret;
//int timeo;
- int status;
+ map_word status;
//int i;
//printk("sharp_erase_oneblock()\n");
@@ -484,26 +494,26 @@ static int sharp_erase_oneblock(struct map_info *map, struct flchip *chip,
sharp_unlock_oneblock(map,chip,adr);
#endif
- map->write32(map,CMD_BLOCK_ERASE_1,adr);
- map->write32(map,CMD_BLOCK_ERASE_2,adr);
+ sharp_send_cmd(map, CMD_BLOCK_ERASE_1, adr);
+ sharp_send_cmd(map, CMD_BLOCK_ERASE_2, adr);
chip->state = FL_ERASING;
ret = sharp_do_wait_for_ready(map,chip,adr);
if(ret<0)return ret;
- map->write32(map,CMD_READ_STATUS,adr);
- status = map->read32(map,adr);
+ sharp_send_cmd(map, CMD_READ_STATUS, adr);
+ status = map_read(map, adr);
- if(!(status&SR_ERRORS)){
- map->write32(map,CMD_RESET,adr);
+ if(!(status.x[0] & SR_ERRORS)){
+ sharp_send_cmd(map, CMD_RESET, adr);
chip->state = FL_READY;
//spin_unlock_bh(chip->mutex);
return 0;
}
- printk("sharp: error erasing block at addr=%08lx status=%08x\n",adr,status);
- map->write32(map,CMD_CLEAR_STATUS,adr);
+ printk("sharp: error erasing block at addr=%08lx status=%08lx\n", adr, status.x[0]);
+ sharp_send_cmd(map, CMD_CLEAR_STATUS, adr);
//spin_unlock_bh(chip->mutex);
@@ -515,20 +525,20 @@ static void sharp_unlock_oneblock(struct map_info *map, struct flchip *chip,
unsigned long adr)
{
int i;
- int status;
+ map_word status;
- map->write32(map,CMD_CLEAR_BLOCK_LOCKS_1,adr);
- map->write32(map,CMD_CLEAR_BLOCK_LOCKS_2,adr);
+ sharp_send_cmd(map, CMD_CLEAR_BLOCK_LOCKS_1, adr);
+ sharp_send_cmd(map, CMD_CLEAR_BLOCK_LOCKS_2, adr);
udelay(100);
- status = map->read32(map,adr);
- printk("status=%08x\n",status);
+ status = map_read(map, adr);
+ printk("status=%08lx\n", status.x[0]);
for(i=0;i<1000;i++){
- //map->write32(map,CMD_READ_STATUS,adr);
- status = map->read32(map,adr);
- if((status & SR_READY)==SR_READY)
+ //sharp_send_cmd(map, CMD_READ_STATUS, adr);
+ status = map_read(map, adr);
+ if((status.x[0] & SR_READY) == SR_READY)
break;
udelay(100);
}
@@ -536,14 +546,14 @@ static void sharp_unlock_oneblock(struct map_info *map, struct flchip *chip,
printk("sharp: timed out unlocking block\n");
}
- if(!(status&SR_ERRORS)){
- map->write32(map,CMD_RESET,adr);
+ if(!(status.x[0] & SR_ERRORS)){
+ sharp_send_cmd(map, CMD_RESET, adr);
chip->state = FL_READY;
return;
}
- printk("sharp: error unlocking block at addr=%08lx status=%08x\n",adr,status);
- map->write32(map,CMD_CLEAR_STATUS,adr);
+ printk("sharp: error unlocking block at addr=%08lx status=%08lx\n", adr, status.x[0]);
+ sharp_send_cmd(map, CMD_CLEAR_STATUS, adr);
}
#endif
@@ -561,7 +571,7 @@ static int sharp_suspend(struct mtd_info *mtd)
static void sharp_resume(struct mtd_info *mtd)
{
printk("sharp_resume()\n");
-
+
}
static void sharp_destroy(struct mtd_info *mtd)
@@ -570,7 +580,7 @@ static void sharp_destroy(struct mtd_info *mtd)
}
-int __init sharp_probe_init(void)
+static int __init sharp_probe_init(void)
{
printk("MTD Sharp chip driver <ds@lineo.com>\n");
diff --git a/linux-2.4.x/drivers/mtd/cmdlinepart.c b/linux-2.4.x/drivers/mtd/cmdlinepart.c
new file mode 100644
index 0000000..08a3fce
--- /dev/null
+++ b/linux-2.4.x/drivers/mtd/cmdlinepart.c
@@ -0,0 +1,372 @@
+/*
+ * $Id: cmdlinepart.c,v 1.20 2006/03/29 08:35:18 dwmw2 Exp $
+ *
+ * Read flash partition table from command line
+ *
+ * Copyright 2002 SYSGO Real-Time Solutions GmbH
+ *
+ * The format for the command line is as follows:
+ *
+ * mtdparts=<mtddef>[;<mtddef]
+ * <mtddef> := <mtd-id>:<partdef>[,<partdef>]
+ * <partdef> := <size>[@offset][<name>][ro]
+ * <mtd-id> := unique name used in mapping driver/device (mtd->name)
+ * <size> := standard linux memsize OR "-" to denote all remaining space
+ * <name> := '(' NAME ')'
+ *
+ * Examples:
+ *
+ * 1 NOR Flash, with 1 single writable partition:
+ * edb7312-nor:-
+ *
+ * 1 NOR Flash with 2 partitions, 1 NAND with one
+ * edb7312-nor:256k(ARMboot)ro,-(root);edb7312-nand:-(home)
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <linux/bootmem.h>
+
+/* error message prefix */
+#define ERRP "mtd: "
+
+/* debug macro */
+#if 0
+#define dbg(x) do { printk("DEBUG-CMDLINE-PART: "); printk x; } while(0)
+#else
+#define dbg(x)
+#endif
+
+
+/* special size referring to all the remaining space in a partition */
+#define SIZE_REMAINING UINT_MAX
+#define OFFSET_CONTINUOUS UINT_MAX
+
+struct cmdline_mtd_partition {
+ struct cmdline_mtd_partition *next;
+ char *mtd_id;
+ int num_parts;
+ struct mtd_partition *parts;
+};
+
+/* mtdpart_setup() parses into here */
+static struct cmdline_mtd_partition *partitions;
+
+/* the command line passed to mtdpart_setupd() */
+static char *cmdline;
+static int cmdline_parsed = 0;
+
+/*
+ * Parse one partition definition for an MTD. Since there can be many
+ * comma separated partition definitions, this function calls itself
+ * recursively until no more partition definitions are found. Nice side
+ * effect: the memory to keep the mtd_partition structs and the names
+ * is allocated upon the last definition being found. At that point the
+ * syntax has been verified ok.
+ */
+static struct mtd_partition * newpart(char *s,
+ char **retptr,
+ int *num_parts,
+ int this_part,
+ unsigned char **extra_mem_ptr,
+ int extra_mem_size)
+{
+ struct mtd_partition *parts;
+ unsigned long size;
+ unsigned long offset = OFFSET_CONTINUOUS;
+ char *name;
+ int name_len;
+ unsigned char *extra_mem;
+ char delim;
+ unsigned int mask_flags;
+
+ /* fetch the partition size */
+ if (*s == '-')
+ { /* assign all remaining space to this partition */
+ size = SIZE_REMAINING;
+ s++;
+ }
+ else
+ {
+ size = memparse(s, &s);
+ if (size < PAGE_SIZE)
+ {
+ printk(KERN_ERR ERRP "partition size too small (%lx)\n", size);
+ return NULL;
+ }
+ }
+
+ /* fetch partition name and flags */
+ mask_flags = 0; /* this is going to be a regular partition */
+ delim = 0;
+ /* check for offset */
+ if (*s == '@')
+ {
+ s++;
+ offset = memparse(s, &s);
+ }
+ /* now look for name */
+ if (*s == '(')
+ {
+ delim = ')';
+ }
+
+ if (delim)
+ {
+ char *p;
+
+ name = ++s;
+ if ((p = strchr(name, delim)) == 0)
+ {
+ printk(KERN_ERR ERRP "no closing %c found in partition name\n", delim);
+ return NULL;
+ }
+ name_len = p - name;
+ s = p + 1;
+ }
+ else
+ {
+ name = NULL;
+ name_len = 13; /* Partition_000 */
+ }
+
+ /* record name length for memory allocation later */
+ extra_mem_size += name_len + 1;
+
+ /* test for options */
+ if (strncmp(s, "ro", 2) == 0)
+ {
+ mask_flags |= MTD_WRITEABLE;
+ s += 2;
+ }
+
+ /* test if more partitions are following */
+ if (*s == ',')
+ {
+ if (size == SIZE_REMAINING)
+ {
+ printk(KERN_ERR ERRP "no partitions allowed after a fill-up partition\n");
+ return NULL;
+ }
+ /* more partitions follow, parse them */
+ if ((parts = newpart(s + 1, &s, num_parts,
+ this_part + 1, &extra_mem, extra_mem_size)) == 0)
+ return NULL;
+ }
+ else
+ { /* this is the last partition: allocate space for all */
+ int alloc_size;
+
+ *num_parts = this_part + 1;
+ alloc_size = *num_parts * sizeof(struct mtd_partition) +
+ extra_mem_size;
+ parts = kmalloc(alloc_size, GFP_KERNEL);
+ if (!parts)
+ {
+ printk(KERN_ERR ERRP "out of memory\n");
+ return NULL;
+ }
+ memset(parts, 0, alloc_size);
+ extra_mem = (unsigned char *)(parts + *num_parts);
+ }
+ /* enter this partition (offset will be calculated later if it is zero at this point) */
+ parts[this_part].size = size;
+ parts[this_part].offset = offset;
+ parts[this_part].mask_flags = mask_flags;
+ if (name)
+ {
+ strlcpy(extra_mem, name, name_len + 1);
+ }
+ else
+ {
+ sprintf(extra_mem, "Partition_%03d", this_part);
+ }
+ parts[this_part].name = extra_mem;
+ extra_mem += name_len + 1;
+
+ dbg(("partition %d: name <%s>, offset %x, size %x, mask flags %x\n",
+ this_part,
+ parts[this_part].name,
+ parts[this_part].offset,
+ parts[this_part].size,
+ parts[this_part].mask_flags));
+
+ /* return (updated) pointer to extra_mem memory */
+ if (extra_mem_ptr)
+ *extra_mem_ptr = extra_mem;
+
+ /* return (updated) pointer command line string */
+ *retptr = s;
+
+ /* return partition table */
+ return parts;
+}
+
+/*
+ * Parse the command line.
+ */
+static int mtdpart_setup_real(char *s)
+{
+ cmdline_parsed = 1;
+
+ for( ; s != NULL; )
+ {
+ struct cmdline_mtd_partition *this_mtd;
+ struct mtd_partition *parts;
+ int mtd_id_len;
+ int num_parts;
+ char *p, *mtd_id;
+
+ mtd_id = s;
+ /* fetch <mtd-id> */
+ if (!(p = strchr(s, ':')))
+ {
+ printk(KERN_ERR ERRP "no mtd-id\n");
+ return 0;
+ }
+ mtd_id_len = p - mtd_id;
+
+ dbg(("parsing <%s>\n", p+1));
+
+ /*
+ * parse one mtd. have it reserve memory for the
+ * struct cmdline_mtd_partition and the mtd-id string.
+ */
+ parts = newpart(p + 1, /* cmdline */
+ &s, /* out: updated cmdline ptr */
+ &num_parts, /* out: number of parts */
+ 0, /* first partition */
+ (unsigned char**)&this_mtd, /* out: extra mem */
+ mtd_id_len + 1 + sizeof(*this_mtd) +
+ sizeof(void*)-1 /*alignment*/);
+ if(!parts)
+ {
+ /*
+ * An error occurred. We're either:
+ * a) out of memory, or
+ * b) in the middle of the partition spec
+ * Either way, this mtd is hosed and we're
+ * unlikely to succeed in parsing any more
+ */
+ return 0;
+ }
+
+ /* align this_mtd */
+ this_mtd = (struct cmdline_mtd_partition *)
+ ALIGN((unsigned long)this_mtd, sizeof(void*));
+ /* enter results */
+ this_mtd->parts = parts;
+ this_mtd->num_parts = num_parts;
+ this_mtd->mtd_id = (char*)(this_mtd + 1);
+ strlcpy(this_mtd->mtd_id, mtd_id, mtd_id_len + 1);
+
+ /* link into chain */
+ this_mtd->next = partitions;
+ partitions = this_mtd;
+
+ dbg(("mtdid=<%s> num_parts=<%d>\n",
+ this_mtd->mtd_id, this_mtd->num_parts));
+
+
+ /* EOS - we're done */
+ if (*s == 0)
+ break;
+
+ /* does another spec follow? */
+ if (*s != ';')
+ {
+ printk(KERN_ERR ERRP "bad character after partition (%c)\n", *s);
+ return 0;
+ }
+ s++;
+ }
+ return 1;
+}
+
+/*
+ * Main function to be called from the MTD mapping driver/device to
+ * obtain the partitioning information. At this point the command line
+ * arguments will actually be parsed and turned to struct mtd_partition
+ * information. It returns partitions for the requested mtd device, or
+ * the first one in the chain if a NULL mtd_id is passed in.
+ */
+static int parse_cmdline_partitions(struct mtd_info *master,
+ struct mtd_partition **pparts,
+ unsigned long origin)
+{
+ unsigned long offset;
+ int i;
+ struct cmdline_mtd_partition *part;
+ char *mtd_id = master->name;
+
+ if(!cmdline)
+ return -EINVAL;
+
+ /* parse command line */
+ if (!cmdline_parsed)
+ mtdpart_setup_real(cmdline);
+
+ for(part = partitions; part; part = part->next)
+ {
+ if ((!mtd_id) || (!strcmp(part->mtd_id, mtd_id)))
+ {
+ for(i = 0, offset = 0; i < part->num_parts; i++)
+ {
+ if (part->parts[i].offset == OFFSET_CONTINUOUS)
+ part->parts[i].offset = offset;
+ else
+ offset = part->parts[i].offset;
+ if (part->parts[i].size == SIZE_REMAINING)
+ part->parts[i].size = master->size - offset;
+ if (offset + part->parts[i].size > master->size)
+ {
+ printk(KERN_WARNING ERRP
+ "%s: partitioning exceeds flash size, truncating\n",
+ part->mtd_id);
+ part->parts[i].size = master->size - offset;
+ part->num_parts = i;
+ }
+ offset += part->parts[i].size;
+ }
+ *pparts = part->parts;
+ return part->num_parts;
+ }
+ }
+ return -EINVAL;
+}
+
+
+/*
+ * This is the handler for our kernel parameter, called from
+ * main.c::checksetup(). Note that we can not yet kmalloc() anything,
+ * so we only save the commandline for later processing.
+ *
+ * This function needs to be visible for bootloaders.
+ */
+int mtdpart_setup(char *s)
+{
+ cmdline = s;
+ return 1;
+}
+
+__setup("mtdparts=", mtdpart_setup);
+
+static struct mtd_part_parser cmdline_parser = {
+ .owner = THIS_MODULE,
+ .parse_fn = parse_cmdline_partitions,
+ .name = "cmdlinepart",
+};
+
+static int __init cmdline_parser_init(void)
+{
+ return register_mtd_parser(&cmdline_parser);
+}
+
+module_init(cmdline_parser_init);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Marius Groeger <mag@sysgo.de>");
+MODULE_DESCRIPTION("Command line configuration of MTD partitions");
diff --git a/linux-2.4.x/drivers/mtd/devices/Config.in b/linux-2.4.x/drivers/mtd/devices/Config.in
index 92316b4..717e67a 100644
--- a/linux-2.4.x/drivers/mtd/devices/Config.in
+++ b/linux-2.4.x/drivers/mtd/devices/Config.in
@@ -1,6 +1,6 @@
-# drivers/mtd/maps/Config.in
+# drivers/mtd/devices/Config.in
-# $Id: Config.in,v 1.5 2001/09/23 15:33:10 dwmw2 Exp $
+# $Id: Config.in,v 1.15 2004/10/01 21:47:13 gleixner Exp $
mainmenu_option next_comment
@@ -10,12 +10,12 @@ if [ "$CONFIG_MTD_PMC551" = "y" -o "$CONFIG_MTD_PMC551" = "m" ]; then
bool ' PMC551 256M DRAM Bugfix' CONFIG_MTD_PMC551_BUGFIX
bool ' PMC551 Debugging' CONFIG_MTD_PMC551_DEBUG
fi
-if [ "$CONFIG_DECSTATION" = "y" ]; then
- dep_tristate ' DEC MS02-NV NVRAM module support' CONFIG_MTD_MS02NV $CONFIG_MTD $CONFIG_DECSTATION
+if [ "$CONFIG_MACH__DECSTATION" = "y" ]; then
+ dep_tristate ' DEC MS02-NV NVRAM module support' CONFIG_MTD_MS02NV $CONFIG_MTD
fi
dep_tristate ' Uncached system RAM' CONFIG_MTD_SLRAM $CONFIG_MTD
if [ "$CONFIG_SA1100_LART" = "y" ]; then
- dep_tristate ' 28F160xx flash driver for LART' CONFIG_MTD_LART $CONFIG_MTD
+ dep_tristate ' 28F160xx flash driver for LART' CONFIG_MTD_LART $CONFIG_MTD
fi
dep_tristate ' Test driver using RAM' CONFIG_MTD_MTDRAM $CONFIG_MTD
if [ "$CONFIG_MTD_MTDRAM" = "y" -o "$CONFIG_MTD_MTDRAM" = "m" ]; then
@@ -28,19 +28,29 @@ fi
dep_tristate ' MTD emulation using block device' CONFIG_MTD_BLKMTD $CONFIG_MTD
comment 'Disk-On-Chip Device Drivers'
- dep_tristate ' M-Systems Disk-On-Chip 1000' CONFIG_MTD_DOC1000 $CONFIG_MTD
- dep_tristate ' M-Systems Disk-On-Chip 2000 and Millennium' CONFIG_MTD_DOC2000 $CONFIG_MTD
- dep_tristate ' M-Systems Disk-On-Chip Millennium-only alternative driver (see help)' CONFIG_MTD_DOC2001 $CONFIG_MTD
- if [ "$CONFIG_MTD_DOC2001" = "y" -o "$CONFIG_MTD_DOC2000" = "y" ]; then
+ dep_tristate ' M-Systems Disk-On-Chip 2000 and Millennium (DEPRECATED)' CONFIG_MTD_DOC2000 $CONFIG_MTD
+ dep_tristate ' M-Systems Disk-On-Chip Millennium-only alternative driver (DEPRECATED)' CONFIG_MTD_DOC2001 $CONFIG_MTD
+ dep_tristate ' M-Systems Disk-On-Chip Millennium Plus driver (see help)' CONFIG_MTD_DOC2001PLUS $CONFIG_MTD
+ if [ "$CONFIG_MTD_DOC2001PLUS" = "y" -o "$CONFIG_MTD_DOC2001" = "y" -o "$CONFIG_MTD_DOC2000" = "y" ]; then
define_bool CONFIG_MTD_DOCPROBE y
else
- if [ "$CONFIG_MTD_DOC2001" = "m" -o "$CONFIG_MTD_DOC2000" = "m" ]; then
+ if [ "$CONFIG_MTD_DOC2001PLUS" = "m" -o "$CONFIG_MTD_DOC2001" = "m" -o "$CONFIG_MTD_DOC2000" = "m" ]; then
define_bool CONFIG_MTD_DOCPROBE m
else
define_bool CONFIG_MTD_DOCPROBE n
fi
fi
+ if [ "$CONFIG_MTD_DOCPROBE" = "y" ]; then
+ define_bool CONFIG_MTD_DOCECC y
+ else
+ if [ "$CONFIG_MTD_DOCPROBE" = "m" ]; then
+ define_bool CONFIG_MTD_DOCECC m
+ else
+ define_bool CONFIG_MTD_DOCECC n
+ fi
+ fi
+
if [ "$CONFIG_MTD_DOCPROBE" = "y" -o "$CONFIG_MTD_DOCPROBE" = "m" ]; then
bool ' Advanced detection options for DiskOnChip' CONFIG_MTD_DOCPROBE_ADVANCED
if [ "$CONFIG_MTD_DOCPROBE_ADVANCED" = "n" ]; then
diff --git a/linux-2.4.x/drivers/mtd/devices/Makefile b/linux-2.4.x/drivers/mtd/devices/Makefile
index f86ddf2..9deee09 100644
--- a/linux-2.4.x/drivers/mtd/devices/Makefile
+++ b/linux-2.4.x/drivers/mtd/devices/Makefile
@@ -1,26 +1,23 @@
#
-# linux/drivers/devices/Makefile
+# linux/drivers/maps/Makefile.24
+# Makefile for obsolete kernels
#
-# $Id: Makefile,v 1.4 2001/06/26 21:10:05 spse Exp $
+# $Id: Makefile.24,v 1.2 2004/08/09 18:46:04 dmarlin Exp $
O_TARGET := devlink.o
+export-objs := docecc.o
-# *** BIG UGLY NOTE ***
-#
-# The removal of get_module_symbol() and replacement with
-# inter_module_register() et al has introduced a link order dependency
-# here where previously there was none. We now have to ensure that
-# doc200[01].o are linked before docprobe.o
-
-obj-$(CONFIG_MTD_DOC1000) += doc1000.o
-obj-$(CONFIG_MTD_DOC2000) += doc2000.o
-obj-$(CONFIG_MTD_DOC2001) += doc2001.o
-obj-$(CONFIG_MTD_DOCPROBE) += docprobe.o docecc.o
-obj-$(CONFIG_MTD_SLRAM) += slram.o
-obj-$(CONFIG_MTD_PMC551) += pmc551.o
-obj-$(CONFIG_MTD_MS02NV) += ms02-nv.o
-obj-$(CONFIG_MTD_MTDRAM) += mtdram.o
-obj-$(CONFIG_MTD_LART) += lart.o
-obj-$(CONFIG_MTD_BLKMTD) += blkmtd.o
+obj-$(CONFIG_MTD_DOC2000) += doc2000.o
+obj-$(CONFIG_MTD_DOC2001) += doc2001.o
+obj-$(CONFIG_MTD_DOC2001PLUS) += doc2001plus.o
+obj-$(CONFIG_MTD_DOCPROBE) += docprobe.o
+obj-$(CONFIG_MTD_DOCECC) += docecc.o
+obj-$(CONFIG_MTD_SLRAM) += slram.o
+obj-$(CONFIG_MTD_PHRAM) += phram.o
+obj-$(CONFIG_MTD_PMC551) += pmc551.o
+obj-$(CONFIG_MTD_MS02NV) += ms02-nv.o
+obj-$(CONFIG_MTD_MTDRAM) += mtdram.o
+obj-$(CONFIG_MTD_LART) += lart.o
+obj-$(CONFIG_MTD_BLKMTD) += blkmtd-24.o
include $(TOPDIR)/Rules.make
diff --git a/linux-2.4.x/drivers/mtd/devices/Makefile.common b/linux-2.4.x/drivers/mtd/devices/Makefile.common
new file mode 100644
index 0000000..c77294e
--- /dev/null
+++ b/linux-2.4.x/drivers/mtd/devices/Makefile.common
@@ -0,0 +1,26 @@
+#
+# linux/drivers/devices/Makefile
+#
+# $Id: Makefile.common,v 1.8 2005/05/13 09:45:48 joern Exp $
+
+# *** BIG UGLY NOTE ***
+#
+# The removal of get_module_symbol() and replacement with
+# inter_module_register() et al has introduced a link order dependency
+# here where previously there was none. We now have to ensure that
+# doc200[01].o are linked before docprobe.o
+
+obj-$(CONFIG_MTD_DOC2000) += doc2000.o
+obj-$(CONFIG_MTD_DOC2001) += doc2001.o
+obj-$(CONFIG_MTD_DOC2001PLUS) += doc2001plus.o
+obj-$(CONFIG_MTD_DOCPROBE) += docprobe.o
+obj-$(CONFIG_MTD_DOCECC) += docecc.o
+obj-$(CONFIG_MTD_SLRAM) += slram.o
+obj-$(CONFIG_MTD_PHRAM) += phram.o
+obj-$(CONFIG_MTD_PMC551) += pmc551.o
+obj-$(CONFIG_MTD_MS02NV) += ms02-nv.o
+obj-$(CONFIG_MTD_MTDRAM) += mtdram.o
+obj-$(CONFIG_MTD_LART) += lart.o
+obj-$(CONFIG_MTD_BLKMTD) += blkmtd.o
+obj-$(CONFIG_MTD_BLOCK2MTD) += block2mtd.o
+obj-$(CONFIG_RAMTD) += ramtd.o
diff --git a/linux-2.4.x/drivers/mtd/devices/blkmtd-24.c b/linux-2.4.x/drivers/mtd/devices/blkmtd-24.c
new file mode 100644
index 0000000..1672fc5
--- /dev/null
+++ b/linux-2.4.x/drivers/mtd/devices/blkmtd-24.c
@@ -0,0 +1,1056 @@
+/*
+ * $Id: blkmtd-24.c,v 1.24 2005/11/07 11:14:24 gleixner Exp $
+ *
+ * blkmtd.c - use a block device as a fake MTD
+ *
+ * Author: Simon Evans <spse@secret.org.uk>
+ *
+ * Copyright (C) 2001,2002 Simon Evans
+ *
+ * Licence: GPL
+ *
+ * How it works:
+ * The driver uses raw/io to read/write the device and the page
+ * cache to cache access. Writes update the page cache with the
+ * new data and mark it dirty and add the page into a kiobuf.
+ * When the kiobuf becomes full or the next extry is to an earlier
+ * block in the kiobuf then it is flushed to disk. This allows
+ * writes to remained ordered and gives a small and simple outgoing
+ * write cache.
+ *
+ * It can be loaded Read-Only to prevent erases and writes to the
+ * medium.
+ *
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/blkdev.h>
+#include <linux/iobuf.h>
+#include <linux/slab.h>
+#include <linux/pagemap.h>
+#include <linux/list.h>
+#include <linux/mtd/mtd.h>
+
+#ifdef CONFIG_MTD_DEBUG
+#ifdef CONFIG_PROC_FS
+# include <linux/proc_fs.h>
+# define BLKMTD_PROC_DEBUG
+ static struct proc_dir_entry *blkmtd_proc;
+#endif
+#endif
+
+
+#define err(format, arg...) printk(KERN_ERR "blkmtd: " format "\n" , ## arg)
+#define info(format, arg...) printk(KERN_INFO "blkmtd: " format "\n" , ## arg)
+#define warn(format, arg...) printk(KERN_WARNING "blkmtd: " format "\n" , ## arg)
+#define crit(format, arg...) printk(KERN_CRIT "blkmtd: " format "\n" , ## arg)
+
+
+/* Default erase size in KiB, always make it a multiple of PAGE_SIZE */
+#define CONFIG_MTD_BLKDEV_ERASESIZE (128 << 10) /* 128KiB */
+#define VERSION "1.10"
+
+/* Info for the block device */
+struct blkmtd_dev {
+ struct list_head list;
+ struct block_device *binding;
+ struct mtd_info mtd_info;
+ struct kiobuf *rd_buf, *wr_buf;
+ long iobuf_locks;
+ struct semaphore wrbuf_mutex;
+};
+
+
+/* Static info about the MTD, used in cleanup_module */
+static LIST_HEAD(blkmtd_device_list);
+
+
+static void blkmtd_sync(struct mtd_info *mtd);
+
+#define MAX_DEVICES 4
+
+/* Module parameters passed by insmod/modprobe */
+char *device[MAX_DEVICES]; /* the block device to use */
+int erasesz[MAX_DEVICES]; /* optional default erase size */
+int ro[MAX_DEVICES]; /* optional read only flag */
+int sync;
+
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Simon Evans <spse@secret.org.uk>");
+MODULE_DESCRIPTION("Emulate an MTD using a block device");
+MODULE_PARM(device, "1-4s");
+MODULE_PARM_DESC(device, "block device to use");
+MODULE_PARM(erasesz, "1-4i");
+MODULE_PARM_DESC(erasesz, "optional erase size to use in KiB. eg 4=4KiB.");
+MODULE_PARM(ro, "1-4i");
+MODULE_PARM_DESC(ro, "1=Read only, writes and erases cause errors");
+MODULE_PARM(sync, "i");
+MODULE_PARM_DESC(sync, "1=Synchronous writes");
+
+
+/**
+ * read_pages - read in pages via the page cache
+ * @dev: device to read from
+ * @pagenrs: list of page numbers wanted
+ * @pagelst: storage for struce page * pointers
+ * @pages: count of pages wanted
+ *
+ * Read pages, getting them from the page cache if available
+ * else reading them in from disk if not. pagelst must be preallocated
+ * to hold the page count.
+ */
+static int read_pages(struct blkmtd_dev *dev, int pagenrs[], struct page **pagelst, int pages)
+{
+ kdev_t kdev;
+ struct page *page;
+ int cnt = 0;
+ struct kiobuf *iobuf;
+ int err = 0;
+
+ if(!dev) {
+ err("read_pages: PANIC dev == NULL");
+ return -EIO;
+ }
+ kdev = to_kdev_t(dev->binding->bd_dev);
+
+ DEBUG(2, "read_pages: reading %d pages\n", pages);
+ if(test_and_set_bit(0, &dev->iobuf_locks)) {
+ err = alloc_kiovec(1, &iobuf);
+ if (err) {
+ crit("cant allocate kiobuf");
+ return -ENOMEM;
+ }
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,4)
+ iobuf->blocks = kmalloc(KIO_MAX_SECTORS * sizeof(unsigned long), GFP_KERNEL);
+ if(iobuf->blocks == NULL) {
+ crit("cant allocate iobuf blocks");
+ free_kiovec(1, &iobuf);
+ return -ENOMEM;
+ }
+#endif
+ } else {
+ iobuf = dev->rd_buf;
+ }
+
+ iobuf->nr_pages = 0;
+ iobuf->length = 0;
+ iobuf->offset = 0;
+ iobuf->locked = 1;
+
+ for(cnt = 0; cnt < pages; cnt++) {
+ page = grab_cache_page(dev->binding->bd_inode->i_mapping, pagenrs[cnt]);
+ pagelst[cnt] = page;
+ if(!Page_Uptodate(page)) {
+ iobuf->blocks[iobuf->nr_pages] = pagenrs[cnt];
+ iobuf->maplist[iobuf->nr_pages++] = page;
+ }
+ }
+
+ if(iobuf->nr_pages) {
+ iobuf->length = iobuf->nr_pages << PAGE_SHIFT;
+ err = brw_kiovec(READ, 1, &iobuf, kdev, iobuf->blocks, PAGE_SIZE);
+ DEBUG(3, "blkmtd: read_pages: finished, err = %d\n", err);
+ if(err < 0) {
+ while(pages--) {
+ ClearPageUptodate(pagelst[pages]);
+ unlock_page(pagelst[pages]);
+ page_cache_release(pagelst[pages]);
+ }
+ } else {
+ while(iobuf->nr_pages--) {
+ SetPageUptodate(iobuf->maplist[iobuf->nr_pages]);
+ }
+ err = 0;
+ }
+ }
+
+
+ if(iobuf != dev->rd_buf) {
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,4)
+ kfree(iobuf->blocks);
+#endif
+ free_kiovec(1, &iobuf);
+ } else {
+ clear_bit(0, &dev->iobuf_locks);
+ }
+ DEBUG(2, "read_pages: done, err = %d\n", err);
+ return err;
+}
+
+
+/**
+ * commit_pages - commit pages in the writeout kiobuf to disk
+ * @dev: device to write to
+ *
+ * If the current dev has pages in the dev->wr_buf kiobuf,
+ * they are written to disk using brw_kiovec()
+ */
+static int commit_pages(struct blkmtd_dev *dev)
+{
+ struct kiobuf *iobuf = dev->wr_buf;
+ kdev_t kdev = to_kdev_t(dev->binding->bd_dev);
+ int err = 0;
+
+ iobuf->length = iobuf->nr_pages << PAGE_SHIFT;
+ iobuf->locked = 1;
+ if(iobuf->length) {
+ int i;
+ DEBUG(2, "blkmtd: commit_pages: nrpages = %d\n", iobuf->nr_pages);
+ /* Check all the pages are dirty and lock them */
+ for(i = 0; i < iobuf->nr_pages; i++) {
+ struct page *page = iobuf->maplist[i];
+ BUG_ON(!PageDirty(page));
+ lock_page(page);
+ }
+ err = brw_kiovec(WRITE, 1, &iobuf, kdev, iobuf->blocks, PAGE_SIZE);
+ DEBUG(3, "commit_write: committed %d pages err = %d\n", iobuf->nr_pages, err);
+ while(iobuf->nr_pages) {
+ struct page *page = iobuf->maplist[--iobuf->nr_pages];
+ ClearPageDirty(page);
+ SetPageUptodate(page);
+ unlock_page(page);
+ page_cache_release(page);
+ }
+ }
+
+ DEBUG(2, "blkmtd: sync: end, err = %d\n", err);
+ iobuf->offset = 0;
+ iobuf->nr_pages = 0;
+ iobuf->length = 0;
+ return err;
+}
+
+
+/**
+ * write_pages - write block of data to device via the page cache
+ * @dev: device to write to
+ * @buf: data source or NULL if erase (output is set to 0xff)
+ * @to: offset into output device
+ * @len: amount to data to write
+ * @retlen: amount of data written
+ *
+ * Grab pages from the page cache and fill them with the source data.
+ * Non page aligned start and end result in a readin of the page and
+ * part of the page being modified. Pages are added to the wr_buf kiobuf
+ * until this becomes full or the next page written to has a lower pagenr
+ * then the current max pagenr in the kiobuf.
+ */
+static int write_pages(struct blkmtd_dev *dev, const u_char *buf, loff_t to,
+ size_t len, int *retlen)
+{
+ int pagenr, offset;
+ size_t start_len = 0, end_len;
+ int pagecnt = 0;
+ struct kiobuf *iobuf = dev->wr_buf;
+ int err = 0;
+ struct page *pagelst[2];
+ int pagenrs[2];
+ int readpages = 0;
+ int ignorepage = -1;
+
+ pagenr = to >> PAGE_SHIFT;
+ offset = to & ~PAGE_MASK;
+
+ DEBUG(2, "blkmtd: write_pages: buf = %p to = %ld len = %zd pagenr = %d offset = %d\n",
+ buf, (long)to, len, pagenr, offset);
+
+ *retlen = 0;
+ /* see if we have to do a partial write at the start */
+ if(offset) {
+ start_len = ((offset + len) > PAGE_SIZE) ? PAGE_SIZE - offset : len;
+ len -= start_len;
+ }
+
+ /* calculate the length of the other two regions */
+ end_len = len & ~PAGE_MASK;
+ len -= end_len;
+
+ if(start_len) {
+ pagenrs[0] = pagenr;
+ readpages++;
+ pagecnt++;
+ }
+ if(len)
+ pagecnt += len >> PAGE_SHIFT;
+ if(end_len) {
+ pagenrs[readpages] = pagenr + pagecnt;
+ readpages++;
+ pagecnt++;
+ }
+
+ DEBUG(3, "blkmtd: write: start_len = %zd len = %zd end_len = %zd pagecnt = %d\n",
+ start_len, len, end_len, pagecnt);
+
+ down(&dev->wrbuf_mutex);
+
+ if(iobuf->nr_pages && ((pagenr <= iobuf->blocks[iobuf->nr_pages-1])
+ || (iobuf->nr_pages + pagecnt) >= KIO_STATIC_PAGES)) {
+
+ if((pagenr == iobuf->blocks[iobuf->nr_pages-1])
+ && ((iobuf->nr_pages + pagecnt) < KIO_STATIC_PAGES)) {
+ iobuf->nr_pages--;
+ ignorepage = pagenr;
+ } else {
+ DEBUG(3, "blkmtd: doing writeout pagenr = %d max_pagenr = %ld pagecnt = %d idx = %d\n",
+ pagenr, iobuf->blocks[iobuf->nr_pages-1],
+ pagecnt, iobuf->nr_pages);
+ commit_pages(dev);
+ }
+ }
+
+ if(readpages) {
+ err = read_pages(dev, pagenrs, pagelst, readpages);
+ if(err < 0)
+ goto readin_err;
+ }
+
+ if(start_len) {
+ /* do partial start region */
+ struct page *page;
+
+ DEBUG(3, "blkmtd: write: doing partial start, page = %d len = %zd offset = %d\n",
+ pagenr, start_len, offset);
+ page = pagelst[0];
+ BUG_ON(!buf);
+ if(PageDirty(page) && pagenr != ignorepage) {
+ err("to = %lld start_len = %zd len = %zd end_len = %zd pagenr = %d ignorepage = %d\n",
+ to, start_len, len, end_len, pagenr, ignorepage);
+ BUG();
+ }
+ memcpy(page_address(page)+offset, buf, start_len);
+ SetPageDirty(page);
+ SetPageUptodate(page);
+ unlock_page(page);
+ buf += start_len;
+ *retlen = start_len;
+ err = 0;
+ iobuf->blocks[iobuf->nr_pages] = pagenr++;
+ iobuf->maplist[iobuf->nr_pages] = page;
+ iobuf->nr_pages++;
+ }
+
+ /* Now do the main loop to a page aligned, n page sized output */
+ if(len) {
+ int pagesc = len >> PAGE_SHIFT;
+ DEBUG(3, "blkmtd: write: whole pages start = %d, count = %d\n",
+ pagenr, pagesc);
+ while(pagesc) {
+ struct page *page;
+
+ /* see if page is in the page cache */
+ DEBUG(3, "blkmtd: write: grabbing page %d from page cache\n", pagenr);
+ page = grab_cache_page(dev->binding->bd_inode->i_mapping, pagenr);
+ if(PageDirty(page) && pagenr != ignorepage) {
+ BUG();
+ }
+ if(!page) {
+ warn("write: cant grab cache page %d", pagenr);
+ err = -ENOMEM;
+ goto write_err;
+ }
+ if(!buf) {
+ memset(page_address(page), 0xff, PAGE_SIZE);
+ } else {
+ memcpy(page_address(page), buf, PAGE_SIZE);
+ buf += PAGE_SIZE;
+ }
+ iobuf->blocks[iobuf->nr_pages] = pagenr++;
+ iobuf->maplist[iobuf->nr_pages] = page;
+ iobuf->nr_pages++;
+ SetPageDirty(page);
+ SetPageUptodate(page);
+ unlock_page(page);
+ pagesc--;
+ *retlen += PAGE_SIZE;
+ }
+ }
+
+ if(end_len) {
+ /* do the third region */
+ struct page *page;
+ DEBUG(3, "blkmtd: write: doing partial end, page = %d len = %zd\n",
+ pagenr, end_len);
+ page = pagelst[readpages-1];
+ BUG_ON(!buf);
+ if(PageDirty(page) && pagenr != ignorepage) {
+ err("to = %lld start_len = %zd len = %zd end_len = %zd pagenr = %d ignorepage = %d\n",
+ to, start_len, len, end_len, pagenr, ignorepage);
+ BUG();
+ }
+ memcpy(page_address(page), buf, end_len);
+ SetPageDirty(page);
+ SetPageUptodate(page);
+ unlock_page(page);
+ DEBUG(3, "blkmtd: write: writing out partial end\n");
+ *retlen += end_len;
+ err = 0;
+ iobuf->blocks[iobuf->nr_pages] = pagenr;
+ iobuf->maplist[iobuf->nr_pages] = page;
+ iobuf->nr_pages++;
+ }
+
+ DEBUG(2, "blkmtd: write: end, retlen = %zd, err = %d\n", *retlen, err);
+
+ if(sync) {
+write_err:
+ commit_pages(dev);
+ }
+
+readin_err:
+ up(&dev->wrbuf_mutex);
+ return err;
+}
+
+
+/* erase a specified part of the device */
+static int blkmtd_erase(struct mtd_info *mtd, struct erase_info *instr)
+{
+ struct blkmtd_dev *dev = mtd->priv;
+ struct mtd_erase_region_info *einfo = mtd->eraseregions;
+ int numregions = mtd->numeraseregions;
+ size_t from;
+ u_long len;
+ int err = -EIO;
+ size_t retlen;
+
+ /* check readonly */
+ if(!dev->wr_buf) {
+ err("error: mtd%d trying to erase readonly device %s",
+ mtd->index, mtd->name);
+ instr->state = MTD_ERASE_FAILED;
+ goto erase_callback;
+ }
+
+ instr->state = MTD_ERASING;
+ from = instr->addr;
+ len = instr->len;
+
+ /* check erase region has valid start and length */
+ DEBUG(2, "blkmtd: erase: dev = `%s' from = 0x%zx len = 0x%lx\n",
+ bdevname(dev->binding->bd_dev), from, len);
+ while(numregions) {
+ DEBUG(3, "blkmtd: checking erase region = 0x%08X size = 0x%X num = 0x%x\n",
+ einfo->offset, einfo->erasesize, einfo->numblocks);
+ if(from >= einfo->offset
+ && from < einfo->offset + (einfo->erasesize * einfo->numblocks)) {
+ if(len == einfo->erasesize
+ && ( (from - einfo->offset) % einfo->erasesize == 0))
+ break;
+ }
+ numregions--;
+ einfo++;
+ }
+
+ if(!numregions) {
+ /* Not a valid erase block */
+ err("erase: invalid erase request 0x%lX @ 0x%08zX", len, from);
+ instr->state = MTD_ERASE_FAILED;
+ err = -EIO;
+ }
+
+ if(instr->state != MTD_ERASE_FAILED) {
+ /* do the erase */
+ DEBUG(3, "Doing erase from = %zd len = %ld\n", from, len);
+ err = write_pages(dev, NULL, from, len, &retlen);
+ if(err < 0) {
+ err("erase failed err = %d", err);
+ instr->state = MTD_ERASE_FAILED;
+ } else {
+ instr->state = MTD_ERASE_DONE;
+ err = 0;
+ }
+ }
+
+ DEBUG(3, "blkmtd: erase: checking callback\n");
+ erase_callback:
+ mtd_erase_callback(instr);
+ DEBUG(2, "blkmtd: erase: finished (err = %d)\n", err);
+ return err;
+}
+
+
+/* read a range of the data via the page cache */
+static int blkmtd_read(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, u_char *buf)
+{
+ struct blkmtd_dev *dev = mtd->priv;
+ int err = 0;
+ int offset;
+ int pagenr, pages;
+ struct page **pagelst;
+ int *pagenrs;
+ int i;
+
+ *retlen = 0;
+
+ DEBUG(2, "blkmtd: read: dev = `%s' from = %lld len = %zd buf = %p\n",
+ bdevname(dev->binding->bd_dev), from, len, buf);
+
+ pagenr = from >> PAGE_SHIFT;
+ offset = from - (pagenr << PAGE_SHIFT);
+
+ pages = (offset+len+PAGE_SIZE-1) >> PAGE_SHIFT;
+ DEBUG(3, "blkmtd: read: pagenr = %d offset = %d, pages = %d\n",
+ pagenr, offset, pages);
+
+ pagelst = kmalloc(sizeof(struct page *) * pages, GFP_KERNEL);
+ if(!pagelst)
+ return -ENOMEM;
+ pagenrs = kmalloc(sizeof(int) * pages, GFP_KERNEL);
+ if(!pagenrs) {
+ kfree(pagelst);
+ return -ENOMEM;
+ }
+ for(i = 0; i < pages; i++)
+ pagenrs[i] = pagenr+i;
+
+ err = read_pages(dev, pagenrs, pagelst, pages);
+ if(err)
+ goto readerr;
+
+ pagenr = 0;
+ while(pages) {
+ struct page *page;
+ int cpylen;
+
+ DEBUG(3, "blkmtd: read: looking for page: %d\n", pagenr);
+ page = pagelst[pagenr];
+
+ cpylen = (PAGE_SIZE > len) ? len : PAGE_SIZE;
+ if(offset+cpylen > PAGE_SIZE)
+ cpylen = PAGE_SIZE-offset;
+
+ memcpy(buf + *retlen, page_address(page) + offset, cpylen);
+ offset = 0;
+ len -= cpylen;
+ *retlen += cpylen;
+ pagenr++;
+ pages--;
+ unlock_page(page);
+ if(!PageDirty(page))
+ page_cache_release(page);
+ }
+
+ readerr:
+ kfree(pagelst);
+ kfree(pagenrs);
+ DEBUG(2, "blkmtd: end read: retlen = %zd, err = %d\n", *retlen, err);
+ return err;
+}
+
+
+/* write data to the underlying device */
+static int blkmtd_write(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t *retlen, const u_char *buf)
+{
+ struct blkmtd_dev *dev = mtd->priv;
+ int err;
+
+ *retlen = 0;
+ if(!len)
+ return 0;
+
+ DEBUG(2, "blkmtd: write: dev = `%s' to = %lld len = %zd buf = %p\n",
+ bdevname(dev->binding->bd_dev), to, len, buf);
+
+ /* handle readonly and out of range numbers */
+
+ if(!dev->wr_buf) {
+ err("error: trying to write to a readonly device %s", mtd->name);
+ return -EROFS;
+ }
+
+ if(to >= mtd->size) {
+ return -ENOSPC;
+ }
+
+ if(to + len > mtd->size) {
+ len = (mtd->size - to);
+ }
+
+ err = write_pages(dev, buf, to, len, retlen);
+ if(err < 0)
+ *retlen = 0;
+ else
+ err = 0;
+ DEBUG(2, "blkmtd: write: end, err = %d\n", err);
+ return err;
+}
+
+
+/* sync the device - wait until the write queue is empty */
+static void blkmtd_sync(struct mtd_info *mtd)
+{
+ struct blkmtd_dev *dev = mtd->priv;
+ struct kiobuf *iobuf = dev->wr_buf;
+
+ DEBUG(2, "blkmtd: sync: called\n");
+ if(iobuf == NULL)
+ return;
+
+ DEBUG(3, "blkmtd: kiovec: length = %d nr_pages = %d\n",
+ iobuf->length, iobuf->nr_pages);
+ down(&dev->wrbuf_mutex);
+ if(iobuf->nr_pages)
+ commit_pages(dev);
+ up(&dev->wrbuf_mutex);
+}
+
+
+#ifdef BLKMTD_PROC_DEBUG
+/* procfs stuff */
+static int blkmtd_proc_read(char *page, char **start, off_t off,
+ int count, int *eof, void *data)
+{
+ int len;
+ struct list_head *temp1, *temp2;
+
+ MOD_INC_USE_COUNT;
+
+ /* Count the size of the page lists */
+
+ len = sprintf(page, "dev\twr_idx\tmax_idx\tnrpages\tclean\tdirty\tlocked\tlru\n");
+ list_for_each_safe(temp1, temp2, &blkmtd_device_list) {
+ struct blkmtd_dev *dev = list_entry(temp1, struct blkmtd_dev,
+ list);
+ struct list_head *temp;
+ struct page *pagei;
+
+ int clean = 0, dirty = 0, locked = 0, lru = 0;
+ /* Count the size of the page lists */
+ list_for_each(temp, &dev->binding->bd_inode->i_mapping->clean_pages) {
+ pagei = list_entry(temp, struct page, list);
+ clean++;
+ if(PageLocked(pagei))
+ locked++;
+ if(PageDirty(pagei))
+ dirty++;
+ if(PageLRU(pagei))
+ lru++;
+ }
+ list_for_each(temp, &dev->binding->bd_inode->i_mapping->dirty_pages) {
+ pagei = list_entry(temp, struct page, list);
+ if(PageLocked(pagei))
+ locked++;
+ if(PageDirty(pagei))
+ dirty++;
+ if(PageLRU(pagei))
+ lru++;
+ }
+ list_for_each(temp, &dev->binding->bd_inode->i_mapping->locked_pages) {
+ pagei = list_entry(temp, struct page, list);
+ if(PageLocked(pagei))
+ locked++;
+ if(PageDirty(pagei))
+ dirty++;
+ if(PageLRU(pagei))
+ lru++;
+ }
+
+ len += sprintf(page+len, "mtd%d:\t%ld\t%d\t%ld\t%d\t%d\t%d\t%d\n",
+ dev->mtd_info.index,
+ (dev->wr_buf && dev->wr_buf->nr_pages) ?
+ dev->wr_buf->blocks[dev->wr_buf->nr_pages-1] : 0,
+ (dev->wr_buf) ? dev->wr_buf->nr_pages : 0,
+ dev->binding->bd_inode->i_mapping->nrpages,
+ clean, dirty, locked, lru);
+ }
+
+ if(len <= count)
+ *eof = 1;
+
+ MOD_DEC_USE_COUNT;
+ return len;
+}
+#endif
+
+
+static void free_device(struct blkmtd_dev *dev)
+{
+ DEBUG(2, "blkmtd: free_device() dev = %p\n", dev);
+ if(dev) {
+ del_mtd_device(&dev->mtd_info);
+ info("mtd%d: [%s] removed", dev->mtd_info.index,
+ dev->mtd_info.name + strlen("blkmtd: "));
+ if(dev->mtd_info.eraseregions)
+ kfree(dev->mtd_info.eraseregions);
+ if(dev->mtd_info.name)
+ kfree(dev->mtd_info.name);
+
+ if(dev->rd_buf) {
+ dev->rd_buf->locked = 0;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,4)
+ if(dev->rd_buf->blocks)
+ kfree(dev->rd_buf->blocks);
+#endif
+ free_kiovec(1, &dev->rd_buf);
+ }
+ if(dev->wr_buf) {
+ dev->wr_buf->locked = 0;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,4)
+ if(dev->wr_buf->blocks)
+ kfree(dev->rw_buf->blocks);
+#endif
+ free_kiovec(1, &dev->wr_buf);
+ }
+
+ if(dev->binding) {
+ kdev_t kdev = to_kdev_t(dev->binding->bd_dev);
+ invalidate_inode_pages(dev->binding->bd_inode);
+ set_blocksize(kdev, 1 << 10);
+ blkdev_put(dev->binding, BDEV_RAW);
+ }
+ kfree(dev);
+ }
+}
+
+
+/* For a given size and initial erase size, calculate the number
+ * and size of each erase region. Goes round the loop twice,
+ * once to find out how many regions, then allocates space,
+ * then round the loop again to fill it in.
+ */
+static struct mtd_erase_region_info *calc_erase_regions(
+ size_t erase_size, size_t total_size, int *regions)
+{
+ struct mtd_erase_region_info *info = NULL;
+
+ DEBUG(2, "calc_erase_regions, es = %zd size = %zd regions = %d\n",
+ erase_size, total_size, *regions);
+ /* Make any user specified erasesize be a power of 2
+ and at least PAGE_SIZE */
+ if(erase_size) {
+ int es = erase_size;
+ erase_size = 1;
+ while(es != 1) {
+ es >>= 1;
+ erase_size <<= 1;
+ }
+ if(erase_size < PAGE_SIZE)
+ erase_size = PAGE_SIZE;
+ } else {
+ erase_size = CONFIG_MTD_BLKDEV_ERASESIZE;
+ }
+
+ *regions = 0;
+
+ do {
+ int tot_size = total_size;
+ int er_size = erase_size;
+ int count = 0, offset = 0, regcnt = 0;
+
+ while(tot_size) {
+ count = tot_size / er_size;
+ if(count) {
+ tot_size = tot_size % er_size;
+ if(info) {
+ DEBUG(2, "adding to erase info off=%d er=%d cnt=%d\n",
+ offset, er_size, count);
+ (info+regcnt)->offset = offset;
+ (info+regcnt)->erasesize = er_size;
+ (info+regcnt)->numblocks = count;
+ (*regions)++;
+ }
+ regcnt++;
+ offset += (count * er_size);
+ }
+ while(er_size > tot_size)
+ er_size >>= 1;
+ }
+ if(info == NULL) {
+ info = kmalloc(regcnt * sizeof(struct mtd_erase_region_info), GFP_KERNEL);
+ if(!info)
+ break;
+ }
+ } while(!(*regions));
+ DEBUG(2, "calc_erase_regions done, es = %zd size = %zd regions = %d\n",
+ erase_size, total_size, *regions);
+ return info;
+}
+
+
+extern kdev_t name_to_kdev_t(char *line) __init;
+
+
+static struct blkmtd_dev *add_device(char *devname, int readonly, int erase_size)
+{
+ int maj, min;
+ kdev_t kdev;
+ int mode;
+ struct blkmtd_dev *dev;
+
+#ifdef MODULE
+ struct file *file = NULL;
+ struct inode *inode;
+#endif
+
+ if(!devname)
+ return NULL;
+
+ /* Get a handle on the device */
+ mode = (readonly) ? O_RDONLY : O_RDWR;
+
+#ifdef MODULE
+
+ file = filp_open(devname, mode, 0);
+ if(IS_ERR(file)) {
+ err("error: cant open device %s", devname);
+ DEBUG(2, "blkmtd: filp_open returned %ld\n", PTR_ERR(file));
+ return NULL;
+ }
+
+ /* determine is this is a block device and
+ * if so get its major and minor numbers
+ */
+ inode = file->f_dentry->d_inode;
+ if(!S_ISBLK(inode->i_mode)) {
+ err("%s not a block device", devname);
+ filp_close(file, NULL);
+ return NULL;
+ }
+ kdev = inode->i_rdev;
+ filp_close(file, NULL);
+#else
+ kdev = name_to_kdev_t(devname);
+#endif /* MODULE */
+
+ if(!kdev) {
+ err("bad block device: `%s'", devname);
+ return NULL;
+ }
+
+ maj = MAJOR(kdev);
+ min = MINOR(kdev);
+ DEBUG(1, "blkmtd: found a block device major = %d, minor = %d\n",
+ maj, min);
+
+ if(maj == MTD_BLOCK_MAJOR) {
+ err("attempting to use an MTD device as a block device");
+ return NULL;
+ }
+
+ DEBUG(1, "blkmtd: devname = %s\n", bdevname(kdev));
+
+ dev = kmalloc(sizeof(struct blkmtd_dev), GFP_KERNEL);
+ if(dev == NULL)
+ return NULL;
+
+ memset(dev, 0, sizeof(struct blkmtd_dev));
+ if(alloc_kiovec(1, &dev->rd_buf)) {
+ err("cant allocate read iobuf");
+ goto devinit_err;
+ }
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,4)
+ dev->rd_buf->blocks = kmalloc(KIO_MAX_SECTORS * sizeof(unsigned long), GFP_KERNEL);
+ if(dev->rd_buf->blocks == NULL) {
+ crit("cant allocate rd_buf blocks");
+ goto devinit_err;
+ }
+#endif
+
+ if(!readonly) {
+ if(alloc_kiovec(1, &dev->wr_buf)) {
+ err("cant allocate kiobuf - readonly enabled");
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,4)
+ } else {
+ dev->wr_buf->blocks = kmalloc(KIO_MAX_SECTORS * sizeof(unsigned long), GFP_KERNEL);
+ if(dev->wr_buf->blocks == NULL) {
+ crit("cant allocate wr_buf blocks - readonly enabled");
+ free_kiovec(1, &iobuf);
+ }
+#endif
+ }
+ if(dev->wr_buf)
+ init_MUTEX(&dev->wrbuf_mutex);
+ }
+
+ /* get the block device */
+ dev->binding = bdget(kdev_t_to_nr(MKDEV(maj, min)));
+ if(blkdev_get(dev->binding, mode, 0, BDEV_RAW))
+ goto devinit_err;
+
+ if(set_blocksize(kdev, PAGE_SIZE)) {
+ err("cant set block size to PAGE_SIZE on %s", bdevname(kdev));
+ goto devinit_err;
+ }
+
+ dev->mtd_info.size = dev->binding->bd_inode->i_size & PAGE_MASK;
+
+ /* Setup the MTD structure */
+ /* make the name contain the block device in */
+ dev->mtd_info.name = kmalloc(sizeof("blkmtd: ") + strlen(devname), GFP_KERNEL);
+ if(dev->mtd_info.name == NULL)
+ goto devinit_err;
+
+ sprintf(dev->mtd_info.name, "blkmtd: %s", devname);
+ dev->mtd_info.eraseregions = calc_erase_regions(erase_size, dev->mtd_info.size,
+ &dev->mtd_info.numeraseregions);
+ if(dev->mtd_info.eraseregions == NULL)
+ goto devinit_err;
+
+ dev->mtd_info.erasesize = dev->mtd_info.eraseregions->erasesize;
+ DEBUG(1, "blkmtd: init: found %d erase regions\n",
+ dev->mtd_info.numeraseregions);
+
+ if(readonly) {
+ dev->mtd_info.type = MTD_ROM;
+ dev->mtd_info.flags = MTD_CAP_ROM;
+ } else {
+ dev->mtd_info.type = MTD_RAM;
+ dev->mtd_info.flags = MTD_CAP_RAM;
+ }
+ dev->mtd_info.erase = blkmtd_erase;
+ dev->mtd_info.read = blkmtd_read;
+ dev->mtd_info.write = blkmtd_write;
+ dev->mtd_info.sync = blkmtd_sync;
+ dev->mtd_info.point = 0;
+ dev->mtd_info.unpoint = 0;
+ dev->mtd_info.priv = dev;
+ dev->mtd_info.owner = THIS_MODULE;
+
+ list_add(&dev->list, &blkmtd_device_list);
+ if (add_mtd_device(&dev->mtd_info)) {
+ /* Device didnt get added, so free the entry */
+ list_del(&dev->list);
+ free_device(dev);
+ return NULL;
+ } else {
+ info("mtd%d: [%s] erase_size = %dKiB %s",
+ dev->mtd_info.index, dev->mtd_info.name + strlen("blkmtd: "),
+ dev->mtd_info.erasesize >> 10,
+ (dev->wr_buf) ? "" : "(read-only)");
+ }
+
+ return dev;
+
+ devinit_err:
+ free_device(dev);
+ return NULL;
+}
+
+
+/* Cleanup and exit - sync the device and kill of the kernel thread */
+static void __devexit cleanup_blkmtd(void)
+{
+ struct list_head *temp1, *temp2;
+#ifdef BLKMTD_PROC_DEBUG
+ if(blkmtd_proc) {
+ remove_proc_entry("blkmtd_debug", NULL);
+ }
+#endif
+
+ /* Remove the MTD devices */
+ list_for_each_safe(temp1, temp2, &blkmtd_device_list) {
+ struct blkmtd_dev *dev = list_entry(temp1, struct blkmtd_dev,
+ list);
+ blkmtd_sync(&dev->mtd_info);
+ free_device(dev);
+ }
+}
+
+#ifndef MODULE
+
+/* Handle kernel boot params */
+
+
+static int __init param_blkmtd_device(char *str)
+{
+ int i;
+
+ for(i = 0; i < MAX_DEVICES; i++) {
+ device[i] = str;
+ DEBUG(2, "blkmtd: device setup: %d = %s\n", i, device[i]);
+ strsep(&str, ",");
+ }
+ return 1;
+}
+
+
+static int __init param_blkmtd_erasesz(char *str)
+{
+ int i;
+ for(i = 0; i < MAX_DEVICES; i++) {
+ char *val = strsep(&str, ",");
+ if(val)
+ erasesz[i] = simple_strtoul(val, NULL, 0);
+ DEBUG(2, "blkmtd: erasesz setup: %d = %d\n", i, erasesz[i]);
+ }
+
+ return 1;
+}
+
+
+static int __init param_blkmtd_ro(char *str)
+{
+ int i;
+ for(i = 0; i < MAX_DEVICES; i++) {
+ char *val = strsep(&str, ",");
+ if(val)
+ ro[i] = simple_strtoul(val, NULL, 0);
+ DEBUG(2, "blkmtd: ro setup: %d = %d\n", i, ro[i]);
+ }
+
+ return 1;
+}
+
+
+static int __init param_blkmtd_sync(char *str)
+{
+ if(str[0] == '1')
+ sync = 1;
+ return 1;
+}
+
+__setup("blkmtd_device=", param_blkmtd_device);
+__setup("blkmtd_erasesz=", param_blkmtd_erasesz);
+__setup("blkmtd_ro=", param_blkmtd_ro);
+__setup("blkmtd_sync=", param_blkmtd_sync);
+
+#endif
+
+
+/* Startup */
+static int __init init_blkmtd(void)
+{
+ int i;
+
+ /* Check args - device[0] is the bare minimum*/
+ if(!device[0]) {
+ err("error: missing `device' name\n");
+ return -EINVAL;
+ }
+
+ for(i = 0; i < MAX_DEVICES; i++)
+ add_device(device[i], ro[i], erasesz[i] << 10);
+
+ if(list_empty(&blkmtd_device_list))
+ goto init_err;
+
+ info("version " VERSION);
+
+#ifdef BLKMTD_PROC_DEBUG
+ /* create proc entry */
+ DEBUG(2, "Creating /proc/blkmtd_debug\n");
+ blkmtd_proc = create_proc_read_entry("blkmtd_debug", 0444,
+ NULL, blkmtd_proc_read, NULL);
+ if(blkmtd_proc == NULL) {
+ err("Cant create /proc/blkmtd_debug");
+ } else {
+ blkmtd_proc->owner = THIS_MODULE;
+ }
+#endif
+
+ if(!list_empty(&blkmtd_device_list))
+ /* Everything is ok if we got here */
+ return 0;
+
+ init_err:
+ return -EINVAL;
+}
+
+module_init(init_blkmtd);
+module_exit(cleanup_blkmtd);
diff --git a/linux-2.4.x/drivers/mtd/devices/blkmtd.c b/linux-2.4.x/drivers/mtd/devices/blkmtd.c
index 31e7174..34a0b38 100644
--- a/linux-2.4.x/drivers/mtd/devices/blkmtd.c
+++ b/linux-2.4.x/drivers/mtd/devices/blkmtd.c
@@ -1,979 +1,737 @@
-/*
- * $Id: blkmtd.c,v 1.7 2001/11/10 17:06:30 spse Exp $
+/*
+ * $Id: blkmtd.c,v 1.31 2006/03/29 08:44:02 dwmw2 Exp $
*
* blkmtd.c - use a block device as a fake MTD
*
* Author: Simon Evans <spse@secret.org.uk>
*
- * Copyright (C) 2001 Simon Evans
- *
+ * Copyright (C) 2001,2002 Simon Evans
+ *
* Licence: GPL
*
* How it works:
- * The driver uses raw/io to read/write the device and the page
- * cache to cache access. Writes update the page cache with the
- * new data but make a copy of the new page(s) and then a kernel
- * thread writes pages out to the device in the background. This
- * ensures that writes are order even if a page is updated twice.
- * Also, since pages in the page cache are never marked as dirty,
- * we dont have to worry about writepage() being called on some
- * random page which may not be in the write order.
- *
- * Erases are handled like writes, so the callback is called after
- * the page cache has been updated. Sync()ing will wait until it is
- * all done.
+ * The driver uses raw/io to read/write the device and the page
+ * cache to cache access. Writes update the page cache with the
+ * new data and mark it dirty and add the page into a BIO which
+ * is then written out.
*
- * It can be loaded Read-Only to prevent erases and writes to the
- * medium.
+ * It can be loaded Read-Only to prevent erases and writes to the
+ * medium.
*
- * Todo:
- * Make the write queue size dynamic so this it is not too big on
- * small memory systems and too small on large memory systems.
- *
- * Page cache usage may still be a bit wrong. Check we are doing
- * everything properly.
- *
- * Somehow allow writes to dirty the page cache so we dont use too
- * much memory making copies of outgoing pages. Need to handle case
- * where page x is written to, then page y, then page x again before
- * any of them have been committed to disk.
- *
- * Reading should read multiple pages at once rather than using
- * readpage() for each one. This is easy and will be fixed asap.
*/
-
#include <linux/config.h>
#include <linux/module.h>
-
#include <linux/fs.h>
+#include <linux/blkdev.h>
+#include <linux/bio.h>
#include <linux/pagemap.h>
-#include <linux/iobuf.h>
-#include <linux/slab.h>
-#include <linux/pagemap.h>
-#include <linux/mtd/compatmac.h>
+#include <linux/list.h>
+#include <linux/init.h>
+#include <linux/mount.h>
#include <linux/mtd/mtd.h>
+#include <linux/mutex.h>
-#ifdef CONFIG_MTD_DEBUG
-#ifdef CONFIG_PROC_FS
-# include <linux/proc_fs.h>
-# define BLKMTD_PROC_DEBUG
- static struct proc_dir_entry *blkmtd_proc;
-#endif
-#endif
+#define err(format, arg...) printk(KERN_ERR "blkmtd: " format "\n" , ## arg)
+#define info(format, arg...) printk(KERN_INFO "blkmtd: " format "\n" , ## arg)
+#define warn(format, arg...) printk(KERN_WARNING "blkmtd: " format "\n" , ## arg)
+#define crit(format, arg...) printk(KERN_CRIT "blkmtd: " format "\n" , ## arg)
/* Default erase size in K, always make it a multiple of PAGE_SIZE */
-#define CONFIG_MTD_BLKDEV_ERASESIZE 128
-#define VERSION "1.7"
-extern int *blk_size[];
-extern int *blksize_size[];
+#define CONFIG_MTD_BLKDEV_ERASESIZE (128 << 10) /* 128KiB */
+#define VERSION "$Revision: 1.31 $"
/* Info for the block device */
-typedef struct mtd_raw_dev_data_s {
- struct block_device *binding;
- int sector_size, sector_bits;
- int partial_last_page; // 0 if device ends on page boundary, else page no of last page
- int last_page_sectors; // Number of sectors in last page if partial_last_page != 0
- size_t totalsize;
- int readonly;
- struct address_space as;
- struct mtd_info mtd_info;
-} mtd_raw_dev_data_t;
-
-/* Info for each queue item in the write queue */
-typedef struct mtdblkdev_write_queue_s {
- mtd_raw_dev_data_t *rawdevice;
- struct page **pages;
- int pagenr;
- int pagecnt;
- int iserase;
-} mtdblkdev_write_queue_t;
-
-
-/* Our erase page - always remains locked. */
-static struct page *erase_page;
+struct blkmtd_dev {
+ struct list_head list;
+ struct block_device *blkdev;
+ struct mtd_info mtd_info;
+ struct mutex wrbuf_mutex;
+};
+
/* Static info about the MTD, used in cleanup_module */
-static mtd_raw_dev_data_t *mtd_rawdevice;
-
-/* Write queue fixed size */
-#define WRITE_QUEUE_SZ 512
-
-/* Storage for the write queue */
-static mtdblkdev_write_queue_t *write_queue;
-static int write_queue_sz = WRITE_QUEUE_SZ;
-static int volatile write_queue_head;
-static int volatile write_queue_tail;
-static int volatile write_queue_cnt;
-static spinlock_t mbd_writeq_lock = SPIN_LOCK_UNLOCKED;
-
-/* Tell the write thread to finish */
-static volatile int write_task_finish;
-
-/* ipc with the write thread */
-#if LINUX_VERSION_CODE > KERNEL_VERSION(2,3,0)
-static DECLARE_MUTEX_LOCKED(thread_sem);
-static DECLARE_WAIT_QUEUE_HEAD(thr_wq);
-static DECLARE_WAIT_QUEUE_HEAD(mtbd_sync_wq);
-#else
-static struct semaphore thread_sem = MUTEX_LOCKED;
-DECLARE_WAIT_QUEUE_HEAD(thr_wq);
-DECLARE_WAIT_QUEUE_HEAD(mtbd_sync_wq);
-#endif
+static LIST_HEAD(blkmtd_device_list);
+static void blkmtd_sync(struct mtd_info *mtd);
+
+#define MAX_DEVICES 4
+
/* Module parameters passed by insmod/modprobe */
-char *device; /* the block device to use */
-int erasesz; /* optional default erase size */
-int ro; /* optional read only flag */
-int bs; /* optionally force the block size (avoid using) */
-int count; /* optionally force the block count (avoid using) */
-int wqs; /* optionally set the write queue size */
+static char *device[MAX_DEVICES]; /* the block device to use */
+static int erasesz[MAX_DEVICES]; /* optional default erase size */
+static int ro[MAX_DEVICES]; /* optional read only flag */
+static int sync;
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,2,0)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Simon Evans <spse@secret.org.uk>");
MODULE_DESCRIPTION("Emulate an MTD using a block device");
-MODULE_PARM(device, "s");
+module_param_array(device, charp, NULL, 0);
MODULE_PARM_DESC(device, "block device to use");
-MODULE_PARM(erasesz, "i");
-MODULE_PARM_DESC(erasesz, "optional erase size to use in KB. eg 4=4K.");
-MODULE_PARM(ro, "i");
+module_param_array(erasesz, int, NULL, 0);
+MODULE_PARM_DESC(erasesz, "optional erase size to use in KiB. eg 4=4KiB.");
+module_param_array(ro, bool, NULL, 0);
MODULE_PARM_DESC(ro, "1=Read only, writes and erases cause errors");
-MODULE_PARM(bs, "i");
-MODULE_PARM_DESC(bs, "force the block size in bytes");
-MODULE_PARM(count, "i");
-MODULE_PARM_DESC(count, "force the block count");
-MODULE_PARM(wqs, "i");
-#endif
+module_param(sync, bool, 0);
+MODULE_PARM_DESC(sync, "1=Synchronous writes");
+
+/* completion handler for BIO reads */
+static int bi_read_complete(struct bio *bio, unsigned int bytes_done, int error)
+{
+ if (bio->bi_size)
+ return 1;
+
+ complete((struct completion*)bio->bi_private);
+ return 0;
+}
-/* Page cache stuff */
-/* writepage() - should never be called - catch it anyway */
-static int blkmtd_writepage(struct page *page)
+/* completion handler for BIO writes */
+static int bi_write_complete(struct bio *bio, unsigned int bytes_done, int error)
{
- printk("blkmtd: writepage called!!!\n");
- return -EIO;
+ const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
+ struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
+
+ if (bio->bi_size)
+ return 1;
+
+ if(!uptodate)
+ err("bi_write_complete: not uptodate\n");
+
+ do {
+ struct page *page = bvec->bv_page;
+ DEBUG(3, "Cleaning up page %ld\n", page->index);
+ if (--bvec >= bio->bi_io_vec)
+ prefetchw(&bvec->bv_page->flags);
+
+ if (uptodate) {
+ SetPageUptodate(page);
+ } else {
+ ClearPageUptodate(page);
+ SetPageError(page);
+ }
+ clear_page_dirty(page);
+ unlock_page(page);
+ page_cache_release(page);
+ } while (bvec >= bio->bi_io_vec);
+
+ complete((struct completion*)bio->bi_private);
+ return 0;
}
-/* readpage() - reads one page from the block device */
-static int blkmtd_readpage(mtd_raw_dev_data_t *rawdevice, struct page *page)
-{
- int err;
- int sectornr, sectors, i;
- struct kiobuf *iobuf;
- kdev_t dev;
- unsigned long *blocks;
-
- if(!rawdevice) {
- printk("blkmtd: readpage: PANIC file->private_data == NULL\n");
- return -EIO;
- }
- dev = to_kdev_t(rawdevice->binding->bd_dev);
-
- DEBUG(2, "blkmtd: readpage called, dev = `%s' page = %p index = %ld\n",
- bdevname(dev), page, page->index);
-
- if(Page_Uptodate(page)) {
- DEBUG(2, "blkmtd: readpage page %ld is already upto date\n", page->index);
- UnlockPage(page);
- return 0;
- }
-
- ClearPageUptodate(page);
- ClearPageError(page);
-
- /* see if page is in the outgoing write queue */
- spin_lock(&mbd_writeq_lock);
- if(write_queue_cnt) {
- int i = write_queue_tail;
- while(i != write_queue_head) {
- mtdblkdev_write_queue_t *item = &write_queue[i];
- if(page->index >= item->pagenr && page->index < item->pagenr+item->pagecnt) {
- /* yes it is */
- int index = page->index - item->pagenr;
-
- DEBUG(2, "blkmtd: readpage: found page %ld in outgoing write queue\n",
- page->index);
- if(item->iserase) {
- memset(page_address(page), 0xff, PAGE_SIZE);
- } else {
- memcpy(page_address(page), page_address(item->pages[index]), PAGE_SIZE);
+/* read one page from the block device */
+static int blkmtd_readpage(struct blkmtd_dev *dev, struct page *page)
+{
+ struct bio *bio;
+ struct completion event;
+ int err = -ENOMEM;
+
+ if(PageUptodate(page)) {
+ DEBUG(2, "blkmtd: readpage page %ld is already upto date\n", page->index);
+ unlock_page(page);
+ return 0;
}
- SetPageUptodate(page);
- flush_dcache_page(page);
- UnlockPage(page);
- spin_unlock(&mbd_writeq_lock);
- return 0;
- }
- i++;
- i %= write_queue_sz;
- }
- }
- spin_unlock(&mbd_writeq_lock);
-
-
- DEBUG(3, "blkmtd: readpage: getting kiovec\n");
- err = alloc_kiovec(1, &iobuf);
- if (err) {
- printk("blkmtd: cant allocate kiobuf\n");
- SetPageError(page);
- return err;
- }
-
- /* Pre 2.4.4 doesn't have space for the block list in the kiobuf */
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,4)
- blocks = kmalloc(KIO_MAX_SECTORS * sizeof(unsigned long));
- if(blocks == NULL) {
- printk("blkmtd: cant allocate iobuf blocks\n");
- free_kiovec(1, &iobuf);
- SetPageError(page);
- return -ENOMEM;
- }
-#else
- blocks = iobuf->blocks;
-#endif
- iobuf->offset = 0;
- iobuf->nr_pages = 1;
- iobuf->length = PAGE_SIZE;
- iobuf->locked = 1;
- iobuf->maplist[0] = page;
- sectornr = page->index << (PAGE_SHIFT - rawdevice->sector_bits);
- sectors = 1 << (PAGE_SHIFT - rawdevice->sector_bits);
- if(rawdevice->partial_last_page && page->index == rawdevice->partial_last_page) {
- DEBUG(3, "blkmtd: handling partial last page\n");
- sectors = rawdevice->last_page_sectors;
- }
- DEBUG(3, "blkmtd: readpage: sectornr = %d sectors = %d\n", sectornr, sectors);
- for(i = 0; i < sectors; i++) {
- blocks[i] = sectornr++;
- }
- /* If only a partial page read in, clear the rest of the page */
- if(rawdevice->partial_last_page && page->index == rawdevice->partial_last_page) {
- int offset = rawdevice->last_page_sectors << rawdevice->sector_bits;
- int count = PAGE_SIZE-offset;
- DEBUG(3, "blkmtd: clear last partial page: offset = %d count = %d\n", offset, count);
- memset(page_address(page)+offset, 0, count);
- sectors = rawdevice->last_page_sectors;
- }
-
-
- DEBUG(3, "bklmtd: readpage: starting brw_kiovec\n");
- err = brw_kiovec(READ, 1, &iobuf, dev, blocks, rawdevice->sector_size);
- DEBUG(3, "blkmtd: readpage: finished, err = %d\n", err);
- iobuf->locked = 0;
- free_kiovec(1, &iobuf);
-
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,4)
- kfree(blocks);
-#endif
+ ClearPageUptodate(page);
+ ClearPageError(page);
+
+ bio = bio_alloc(GFP_KERNEL, 1);
+ if(bio) {
+ init_completion(&event);
+ bio->bi_bdev = dev->blkdev;
+ bio->bi_sector = page->index << (PAGE_SHIFT-9);
+ bio->bi_private = &event;
+ bio->bi_end_io = bi_read_complete;
+ if(bio_add_page(bio, page, PAGE_SIZE, 0) == PAGE_SIZE) {
+ submit_bio(READ_SYNC, bio);
+ wait_for_completion(&event);
+ err = test_bit(BIO_UPTODATE, &bio->bi_flags) ? 0 : -EIO;
+ bio_put(bio);
+ }
+ }
- if(err != PAGE_SIZE) {
- printk("blkmtd: readpage: error reading page %ld\n", page->index);
- memset(page_address(page), 0, PAGE_SIZE);
- SetPageError(page);
- err = -EIO;
- } else {
- DEBUG(3, "blkmtd: readpage: setting page upto date\n");
- SetPageUptodate(page);
- err = 0;
- }
- flush_dcache_page(page);
- UnlockPage(page);
- DEBUG(2, "blkmtd: readpage: finished, err = %d\n", err);
- return 0;
+ if(err)
+ SetPageError(page);
+ else
+ SetPageUptodate(page);
+ flush_dcache_page(page);
+ unlock_page(page);
+ return err;
}
-
-static struct address_space_operations blkmtd_aops = {
- writepage: blkmtd_writepage,
- readpage: NULL,
-};
-
-/* This is the kernel thread that empties the write queue to disk */
-static int write_queue_task(void *data)
+/* write out the current BIO and wait for it to finish */
+static int blkmtd_write_out(struct bio *bio)
{
- int err;
- struct task_struct *tsk = current;
- struct kiobuf *iobuf;
- unsigned long *blocks;
-
- DECLARE_WAITQUEUE(wait, tsk);
- DEBUG(1, "blkmtd: writetask: starting (pid = %d)\n", tsk->pid);
- daemonize();
- strcpy(tsk->comm, "blkmtdd");
- tsk->tty = NULL;
- spin_lock_irq(&tsk->sigmask_lock);
- sigfillset(&tsk->blocked);
- recalc_sigpending(tsk);
- spin_unlock_irq(&tsk->sigmask_lock);
- exit_sighand(tsk);
-
- if(alloc_kiovec(1, &iobuf)) {
- printk("blkmtd: write_queue_task cant allocate kiobuf\n");
- return 0;
- }
-
- /* Pre 2.4.4 doesn't have space for the block list in the kiobuf */
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,4)
- blocks = kmalloc(KIO_MAX_SECTORS * sizeof(unsigned long));
- if(blocks == NULL) {
- printk("blkmtd: write_queue_task cant allocate iobuf blocks\n");
- free_kiovec(1, &iobuf);
- return 0;
- }
-#else
- blocks = iobuf->blocks;
-#endif
+ struct completion event;
+ int err;
- DEBUG(2, "blkmtd: writetask: entering main loop\n");
- add_wait_queue(&thr_wq, &wait);
-
- while(1) {
- spin_lock(&mbd_writeq_lock);
-
- if(!write_queue_cnt) {
- /* If nothing in the queue, wake up anyone wanting to know when there
- is space in the queue then sleep for 2*HZ */
- spin_unlock(&mbd_writeq_lock);
- DEBUG(4, "blkmtd: writetask: queue empty\n");
- if(waitqueue_active(&mtbd_sync_wq))
- wake_up(&mtbd_sync_wq);
- interruptible_sleep_on_timeout(&thr_wq, 2*HZ);
- DEBUG(4, "blkmtd: writetask: woken up\n");
- if(write_task_finish)
- break;
- } else {
- /* we have stuff to write */
- mtdblkdev_write_queue_t *item = &write_queue[write_queue_tail];
- struct page **pages = item->pages;
-
- int i;
- int sectornr = item->pagenr << (PAGE_SHIFT - item->rawdevice->sector_bits);
- int sectorcnt = item->pagecnt << (PAGE_SHIFT - item->rawdevice->sector_bits);
- int max_sectors = KIO_MAX_SECTORS >> (item->rawdevice->sector_bits - 9);
- kdev_t dev = to_kdev_t(item->rawdevice->binding->bd_dev);
-
- /* If we are writing to the last page on the device and it doesn't end
- * on a page boundary, subtract the number of sectors that dont exist.
- */
- if(item->rawdevice->partial_last_page &&
- (item->pagenr + item->pagecnt -1) == item->rawdevice->partial_last_page) {
- sectorcnt -= (1 << (PAGE_SHIFT - item->rawdevice->sector_bits));
- sectorcnt += item->rawdevice->last_page_sectors;
- }
-
- DEBUG(3, "blkmtd: writetask: got %d queue items\n", write_queue_cnt);
- set_current_state(TASK_RUNNING);
- spin_unlock(&mbd_writeq_lock);
-
- DEBUG(2, "blkmtd: writetask: writing pagenr = %d pagecnt = %d sectornr = %d sectorcnt = %d\n",
- item->pagenr, item->pagecnt, sectornr, sectorcnt);
-
- iobuf->offset = 0;
- iobuf->locked = 1;
-
- /* Loop through all the pages to be written in the queue item, remembering
- we can only write KIO_MAX_SECTORS at a time */
-
- while(sectorcnt) {
- int cursectors = (sectorcnt < max_sectors) ? sectorcnt : max_sectors;
- int cpagecnt = (cursectors << item->rawdevice->sector_bits) + PAGE_SIZE-1;
- cpagecnt >>= PAGE_SHIFT;
-
- for(i = 0; i < cpagecnt; i++) {
- if(item->iserase) {
- iobuf->maplist[i] = erase_page;
- } else {
- iobuf->maplist[i] = *(pages++);
- }
- }
-
- for(i = 0; i < cursectors; i++) {
- blocks[i] = sectornr++;
- }
-
- iobuf->nr_pages = cpagecnt;
- iobuf->length = cursectors << item->rawdevice->sector_bits;
- DEBUG(3, "blkmtd: write_task: about to kiovec\n");
- err = brw_kiovec(WRITE, 1, &iobuf, dev, blocks, item->rawdevice->sector_size);
- DEBUG(3, "bklmtd: write_task: done, err = %d\n", err);
- if(err != (cursectors << item->rawdevice->sector_bits)) {
- /* if an error occured - set this to exit the loop */
- sectorcnt = 0;
- } else {
- sectorcnt -= cursectors;
+ if(!bio->bi_vcnt) {
+ bio_put(bio);
+ return 0;
}
- }
-
- /* free up the pages used in the write and list of pages used in the write
- queue item */
- iobuf->locked = 0;
- spin_lock(&mbd_writeq_lock);
- write_queue_cnt--;
- write_queue_tail++;
- write_queue_tail %= write_queue_sz;
- if(!item->iserase) {
- for(i = 0 ; i < item->pagecnt; i++) {
- UnlockPage(item->pages[i]);
- __free_pages(item->pages[i], 0);
+
+ init_completion(&event);
+ bio->bi_private = &event;
+ bio->bi_end_io = bi_write_complete;
+ submit_bio(WRITE_SYNC, bio);
+ wait_for_completion(&event);
+ DEBUG(3, "submit_bio completed, bi_vcnt = %d\n", bio->bi_vcnt);
+ err = test_bit(BIO_UPTODATE, &bio->bi_flags) ? 0 : -EIO;
+ bio_put(bio);
+ return err;
+}
+
+
+/**
+ * blkmtd_add_page - add a page to the current BIO
+ * @bio: bio to add to (NULL to alloc initial bio)
+ * @blkdev: block device
+ * @page: page to add
+ * @pagecnt: pages left to add
+ *
+ * Adds a page to the current bio, allocating it if necessary. If it cannot be
+ * added, the current bio is written out and a new one is allocated. Returns
+ * the new bio to add or NULL on error
+ */
+static struct bio *blkmtd_add_page(struct bio *bio, struct block_device *blkdev,
+ struct page *page, int pagecnt)
+{
+
+ retry:
+ if(!bio) {
+ bio = bio_alloc(GFP_KERNEL, pagecnt);
+ if(!bio)
+ return NULL;
+ bio->bi_sector = page->index << (PAGE_SHIFT-9);
+ bio->bi_bdev = blkdev;
}
- kfree(item->pages);
- }
- item->pages = NULL;
- spin_unlock(&mbd_writeq_lock);
- /* Tell others there is some space in the write queue */
- if(waitqueue_active(&mtbd_sync_wq))
- wake_up(&mtbd_sync_wq);
- }
- }
- remove_wait_queue(&thr_wq, &wait);
- DEBUG(1, "blkmtd: writetask: exiting\n");
- free_kiovec(1, &iobuf);
-
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,4)
- kfree(blocks);
-#endif
- /* Tell people we have exitd */
- up(&thread_sem);
- return 0;
+ if(bio_add_page(bio, page, PAGE_SIZE, 0) != PAGE_SIZE) {
+ blkmtd_write_out(bio);
+ bio = NULL;
+ goto retry;
+ }
+ return bio;
}
-/* Add a range of pages into the outgoing write queue, making copies of them */
-static int queue_page_write(mtd_raw_dev_data_t *rawdevice, struct page **pages,
- int pagenr, int pagecnt, int iserase)
+/**
+ * write_pages - write block of data to device via the page cache
+ * @dev: device to write to
+ * @buf: data source or NULL if erase (output is set to 0xff)
+ * @to: offset into output device
+ * @len: amount to data to write
+ * @retlen: amount of data written
+ *
+ * Grab pages from the page cache and fill them with the source data.
+ * Non page aligned start and end result in a readin of the page and
+ * part of the page being modified. Pages are added to the bio and then written
+ * out.
+ */
+static int write_pages(struct blkmtd_dev *dev, const u_char *buf, loff_t to,
+ size_t len, size_t *retlen)
{
- struct page *outpage;
- struct page **new_pages = NULL;
- mtdblkdev_write_queue_t *item;
- int i;
- DECLARE_WAITQUEUE(wait, current);
- DEBUG(2, "blkmtd: queue_page_write: adding pagenr = %d pagecnt = %d\n", pagenr, pagecnt);
-
- if(!pagecnt)
- return 0;
-
- if(pages == NULL && !iserase)
- return -EINVAL;
-
- /* create a array for the list of pages */
- if(!iserase) {
- new_pages = kmalloc(pagecnt * sizeof(struct page *), GFP_KERNEL);
- if(new_pages == NULL)
- return -ENOMEM;
-
- /* make copies of the pages in the page cache */
- for(i = 0; i < pagecnt; i++) {
- outpage = alloc_pages(GFP_KERNEL, 0);
- if(!outpage) {
- while(i--) {
- UnlockPage(new_pages[i]);
- __free_pages(new_pages[i], 0);
+ int pagenr, offset;
+ size_t start_len = 0, end_len;
+ int pagecnt = 0;
+ int err = 0;
+ struct bio *bio = NULL;
+ size_t thislen = 0;
+
+ pagenr = to >> PAGE_SHIFT;
+ offset = to & ~PAGE_MASK;
+
+ DEBUG(2, "blkmtd: write_pages: buf = %p to = %ld len = %zd pagenr = %d offset = %d\n",
+ buf, (long)to, len, pagenr, offset);
+
+ /* see if we have to do a partial write at the start */
+ if(offset) {
+ start_len = ((offset + len) > PAGE_SIZE) ? PAGE_SIZE - offset : len;
+ len -= start_len;
}
- kfree(new_pages);
- return -ENOMEM;
- }
- lock_page(outpage);
- memcpy(page_address(outpage), page_address(pages[i]), PAGE_SIZE);
- new_pages[i] = outpage;
- }
- }
-
- /* wait until there is some space in the write queue */
- test_lock:
- spin_lock(&mbd_writeq_lock);
- if(write_queue_cnt == write_queue_sz) {
- spin_unlock(&mbd_writeq_lock);
- DEBUG(3, "blkmtd: queue_page: Queue full\n");
- current->state = TASK_UNINTERRUPTIBLE;
- add_wait_queue(&mtbd_sync_wq, &wait);
- wake_up_interruptible(&thr_wq);
- schedule();
- current->state = TASK_RUNNING;
- remove_wait_queue(&mtbd_sync_wq, &wait);
- DEBUG(3, "blkmtd: queue_page_write: Queue has %d items in it\n", write_queue_cnt);
- goto test_lock;
- }
-
- DEBUG(3, "blkmtd: queue_page_write: qhead: %d qtail: %d qcnt: %d\n",
- write_queue_head, write_queue_tail, write_queue_cnt);
-
- /* fix up the queue item */
- item = &write_queue[write_queue_head];
- item->pages = new_pages;
- item->pagenr = pagenr;
- item->pagecnt = pagecnt;
- item->rawdevice = rawdevice;
- item->iserase = iserase;
-
- write_queue_head++;
- write_queue_head %= write_queue_sz;
- write_queue_cnt++;
- DEBUG(3, "blkmtd: queue_page_write: qhead: %d qtail: %d qcnt: %d\n",
- write_queue_head, write_queue_tail, write_queue_cnt);
- spin_unlock(&mbd_writeq_lock);
- DEBUG(2, "blkmtd: queue_page_write: finished\n");
- return 0;
+
+ /* calculate the length of the other two regions */
+ end_len = len & ~PAGE_MASK;
+ len -= end_len;
+
+ if(start_len)
+ pagecnt++;
+
+ if(len)
+ pagecnt += len >> PAGE_SHIFT;
+
+ if(end_len)
+ pagecnt++;
+
+ mutex_lock(&dev->wrbuf_mutex);
+
+ DEBUG(3, "blkmtd: write: start_len = %zd len = %zd end_len = %zd pagecnt = %d\n",
+ start_len, len, end_len, pagecnt);
+
+ if(start_len) {
+ /* do partial start region */
+ struct page *page;
+
+ DEBUG(3, "blkmtd: write: doing partial start, page = %d len = %zd offset = %d\n",
+ pagenr, start_len, offset);
+
+ BUG_ON(!buf);
+ page = read_cache_page(dev->blkdev->bd_inode->i_mapping, pagenr, (filler_t *)blkmtd_readpage, dev);
+ lock_page(page);
+ if(PageDirty(page)) {
+ err("to = %lld start_len = %zd len = %zd end_len = %zd pagenr = %d\n",
+ to, start_len, len, end_len, pagenr);
+ BUG();
+ }
+ memcpy(page_address(page)+offset, buf, start_len);
+ set_page_dirty(page);
+ SetPageUptodate(page);
+ buf += start_len;
+ thislen = start_len;
+ bio = blkmtd_add_page(bio, dev->blkdev, page, pagecnt);
+ if(!bio) {
+ err = -ENOMEM;
+ err("bio_add_page failed\n");
+ goto write_err;
+ }
+ pagecnt--;
+ pagenr++;
+ }
+
+ /* Now do the main loop to a page aligned, n page sized output */
+ if(len) {
+ int pagesc = len >> PAGE_SHIFT;
+ DEBUG(3, "blkmtd: write: whole pages start = %d, count = %d\n",
+ pagenr, pagesc);
+ while(pagesc) {
+ struct page *page;
+
+ /* see if page is in the page cache */
+ DEBUG(3, "blkmtd: write: grabbing page %d from page cache\n", pagenr);
+ page = grab_cache_page(dev->blkdev->bd_inode->i_mapping, pagenr);
+ if(PageDirty(page)) {
+ BUG();
+ }
+ if(!page) {
+ warn("write: cannot grab cache page %d", pagenr);
+ err = -ENOMEM;
+ goto write_err;
+ }
+ if(!buf) {
+ memset(page_address(page), 0xff, PAGE_SIZE);
+ } else {
+ memcpy(page_address(page), buf, PAGE_SIZE);
+ buf += PAGE_SIZE;
+ }
+ bio = blkmtd_add_page(bio, dev->blkdev, page, pagecnt);
+ if(!bio) {
+ err = -ENOMEM;
+ err("bio_add_page failed\n");
+ goto write_err;
+ }
+ pagenr++;
+ pagecnt--;
+ set_page_dirty(page);
+ SetPageUptodate(page);
+ pagesc--;
+ thislen += PAGE_SIZE;
+ }
+ }
+
+ if(end_len) {
+ /* do the third region */
+ struct page *page;
+ DEBUG(3, "blkmtd: write: doing partial end, page = %d len = %zd\n",
+ pagenr, end_len);
+ BUG_ON(!buf);
+ page = read_cache_page(dev->blkdev->bd_inode->i_mapping, pagenr, (filler_t *)blkmtd_readpage, dev);
+ lock_page(page);
+ if(PageDirty(page)) {
+ err("to = %lld start_len = %zd len = %zd end_len = %zd pagenr = %d\n",
+ to, start_len, len, end_len, pagenr);
+ BUG();
+ }
+ memcpy(page_address(page), buf, end_len);
+ set_page_dirty(page);
+ SetPageUptodate(page);
+ DEBUG(3, "blkmtd: write: writing out partial end\n");
+ thislen += end_len;
+ bio = blkmtd_add_page(bio, dev->blkdev, page, pagecnt);
+ if(!bio) {
+ err = -ENOMEM;
+ err("bio_add_page failed\n");
+ goto write_err;
+ }
+ pagenr++;
+ }
+
+ DEBUG(3, "blkmtd: write: got %d vectors to write\n", bio->bi_vcnt);
+ write_err:
+ if(bio)
+ blkmtd_write_out(bio);
+
+ DEBUG(2, "blkmtd: write: end, retlen = %zd, err = %d\n", *retlen, err);
+ mutex_unlock(&dev->wrbuf_mutex);
+
+ if(retlen)
+ *retlen = thislen;
+ return err;
}
/* erase a specified part of the device */
static int blkmtd_erase(struct mtd_info *mtd, struct erase_info *instr)
{
- mtd_raw_dev_data_t *rawdevice = mtd->priv;
- struct mtd_erase_region_info *einfo = mtd->eraseregions;
- int numregions = mtd->numeraseregions;
- size_t from;
- u_long len;
- int err = 0;
-
- /* check readonly */
- if(rawdevice->readonly) {
- printk("blkmtd: error: trying to erase readonly device %s\n", device);
- instr->state = MTD_ERASE_FAILED;
- goto erase_callback;
- }
-
- instr->state = MTD_ERASING;
- from = instr->addr;
- len = instr->len;
-
- /* check erase region has valid start and length */
- DEBUG(2, "blkmtd: erase: dev = `%s' from = 0x%x len = 0x%lx\n",
- bdevname(rawdevice->binding->bd_dev), from, len);
- while(numregions) {
- DEBUG(3, "blkmtd: checking erase region = 0x%08X size = 0x%X num = 0x%x\n",
- einfo->offset, einfo->erasesize, einfo->numblocks);
- if(from >= einfo->offset && from < einfo->offset + (einfo->erasesize * einfo->numblocks)) {
- if(len == einfo->erasesize && ( (from - einfo->offset) % einfo->erasesize == 0))
- break;
- }
- numregions--;
- einfo++;
- }
-
- if(!numregions) {
- /* Not a valid erase block */
- printk("blkmtd: erase: invalid erase request 0x%lX @ 0x%08X\n", len, from);
- instr->state = MTD_ERASE_FAILED;
- err = -EIO;
- }
-
- if(instr->state != MTD_ERASE_FAILED) {
- /* start the erase */
- int pagenr, pagecnt;
- struct page *page, **pages;
- int i = 0;
-
- /* Handle the last page of the device not being whole */
- if(len < PAGE_SIZE)
- len = PAGE_SIZE;
-
- pagenr = from >> PAGE_SHIFT;
- pagecnt = len >> PAGE_SHIFT;
- DEBUG(3, "blkmtd: erase: pagenr = %d pagecnt = %d\n", pagenr, pagecnt);
-
- pages = kmalloc(pagecnt * sizeof(struct page *), GFP_KERNEL);
- if(pages == NULL) {
- err = -ENOMEM;
- instr->state = MTD_ERASE_FAILED;
- goto erase_out;
- }
-
-
- while(pagecnt) {
- /* get the page via the page cache */
- DEBUG(3, "blkmtd: erase: doing grab_cache_page() for page %d\n", pagenr);
- page = grab_cache_page(&rawdevice->as, pagenr);
- if(!page) {
- DEBUG(3, "blkmtd: erase: grab_cache_page() failed for page %d\n", pagenr);
- kfree(pages);
- err = -EIO;
- instr->state = MTD_ERASE_FAILED;
- goto erase_out;
- }
- memset(page_address(page), 0xff, PAGE_SIZE);
- pages[i] = page;
- pagecnt--;
- pagenr++;
- i++;
- }
- DEBUG(3, "blkmtd: erase: queuing page write\n");
- err = queue_page_write(rawdevice, NULL, from >> PAGE_SHIFT, len >> PAGE_SHIFT, 1);
- pagecnt = len >> PAGE_SHIFT;
- if(!err) {
- while(pagecnt--) {
- SetPageUptodate(pages[pagecnt]);
- UnlockPage(pages[pagecnt]);
- page_cache_release(pages[pagecnt]);
- flush_dcache_page(pages[pagecnt]);
- }
- kfree(pages);
- instr->state = MTD_ERASE_DONE;
- } else {
- while(pagecnt--) {
- SetPageError(pages[pagecnt]);
- page_cache_release(pages[pagecnt]);
- }
- kfree(pages);
- instr->state = MTD_ERASE_FAILED;
- }
- }
- erase_out:
- DEBUG(3, "blkmtd: erase: checking callback\n");
- erase_callback:
- if (instr->callback) {
- (*(instr->callback))(instr);
- }
- DEBUG(2, "blkmtd: erase: finished (err = %d)\n", err);
- return err;
+ struct blkmtd_dev *dev = mtd->priv;
+ struct mtd_erase_region_info *einfo = mtd->eraseregions;
+ int numregions = mtd->numeraseregions;
+ size_t from;
+ u_long len;
+ int err = -EIO;
+ size_t retlen;
+
+ instr->state = MTD_ERASING;
+ from = instr->addr;
+ len = instr->len;
+
+ /* check erase region has valid start and length */
+ DEBUG(2, "blkmtd: erase: dev = `%s' from = 0x%zx len = 0x%lx\n",
+ mtd->name+9, from, len);
+ while(numregions) {
+ DEBUG(3, "blkmtd: checking erase region = 0x%08X size = 0x%X num = 0x%x\n",
+ einfo->offset, einfo->erasesize, einfo->numblocks);
+ if(from >= einfo->offset
+ && from < einfo->offset + (einfo->erasesize * einfo->numblocks)) {
+ if(len == einfo->erasesize
+ && ( (from - einfo->offset) % einfo->erasesize == 0))
+ break;
+ }
+ numregions--;
+ einfo++;
+ }
+
+ if(!numregions) {
+ /* Not a valid erase block */
+ err("erase: invalid erase request 0x%lX @ 0x%08zX", len, from);
+ instr->state = MTD_ERASE_FAILED;
+ err = -EIO;
+ }
+
+ if(instr->state != MTD_ERASE_FAILED) {
+ /* do the erase */
+ DEBUG(3, "Doing erase from = %zd len = %ld\n", from, len);
+ err = write_pages(dev, NULL, from, len, &retlen);
+ if(err || retlen != len) {
+ err("erase failed err = %d", err);
+ instr->state = MTD_ERASE_FAILED;
+ } else {
+ instr->state = MTD_ERASE_DONE;
+ }
+ }
+
+ DEBUG(3, "blkmtd: erase: checking callback\n");
+ mtd_erase_callback(instr);
+ DEBUG(2, "blkmtd: erase: finished (err = %d)\n", err);
+ return err;
}
/* read a range of the data via the page cache */
static int blkmtd_read(struct mtd_info *mtd, loff_t from, size_t len,
- size_t *retlen, u_char *buf)
+ size_t *retlen, u_char *buf)
{
- mtd_raw_dev_data_t *rawdevice = mtd->priv;
- int err = 0;
- int offset;
- int pagenr, pages;
-
- *retlen = 0;
-
- DEBUG(2, "blkmtd: read: dev = `%s' from = %ld len = %d buf = %p\n",
- bdevname(rawdevice->binding->bd_dev), (long int)from, len, buf);
-
- pagenr = from >> PAGE_SHIFT;
- offset = from - (pagenr << PAGE_SHIFT);
-
- pages = (offset+len+PAGE_SIZE-1) >> PAGE_SHIFT;
- DEBUG(3, "blkmtd: read: pagenr = %d offset = %d, pages = %d\n", pagenr, offset, pages);
-
- /* just loop through each page, getting it via readpage() - slow but easy */
- while(pages) {
- struct page *page;
- int cpylen;
- DEBUG(3, "blkmtd: read: looking for page: %d\n", pagenr);
- page = read_cache_page(&rawdevice->as, pagenr, (filler_t *)blkmtd_readpage, rawdevice);
- if(IS_ERR(page)) {
- return PTR_ERR(page);
- }
- wait_on_page(page);
- if(!Page_Uptodate(page)) {
- /* error reading page */
- printk("blkmtd: read: page not uptodate\n");
- page_cache_release(page);
- return -EIO;
- }
-
- cpylen = (PAGE_SIZE > len) ? len : PAGE_SIZE;
- if(offset+cpylen > PAGE_SIZE)
- cpylen = PAGE_SIZE-offset;
-
- memcpy(buf + *retlen, page_address(page) + offset, cpylen);
- offset = 0;
- len -= cpylen;
- *retlen += cpylen;
- pagenr++;
- pages--;
- page_cache_release(page);
- }
-
- DEBUG(2, "blkmtd: end read: retlen = %d, err = %d\n", *retlen, err);
- return err;
+ struct blkmtd_dev *dev = mtd->priv;
+ int err = 0;
+ int offset;
+ int pagenr, pages;
+ size_t thislen = 0;
+
+ DEBUG(2, "blkmtd: read: dev = `%s' from = %lld len = %zd buf = %p\n",
+ mtd->name+9, from, len, buf);
+
+ if(from > mtd->size)
+ return -EINVAL;
+ if(from + len > mtd->size)
+ len = mtd->size - from;
+
+ pagenr = from >> PAGE_SHIFT;
+ offset = from - (pagenr << PAGE_SHIFT);
+
+ pages = (offset+len+PAGE_SIZE-1) >> PAGE_SHIFT;
+ DEBUG(3, "blkmtd: read: pagenr = %d offset = %d, pages = %d\n",
+ pagenr, offset, pages);
+
+ while(pages) {
+ struct page *page;
+ int cpylen;
+
+ DEBUG(3, "blkmtd: read: looking for page: %d\n", pagenr);
+ page = read_cache_page(dev->blkdev->bd_inode->i_mapping, pagenr, (filler_t *)blkmtd_readpage, dev);
+ if(IS_ERR(page)) {
+ err = -EIO;
+ goto readerr;
+ }
+
+ cpylen = (PAGE_SIZE > len) ? len : PAGE_SIZE;
+ if(offset+cpylen > PAGE_SIZE)
+ cpylen = PAGE_SIZE-offset;
+
+ memcpy(buf + thislen, page_address(page) + offset, cpylen);
+ offset = 0;
+ len -= cpylen;
+ thislen += cpylen;
+ pagenr++;
+ pages--;
+ if(!PageDirty(page))
+ page_cache_release(page);
+ }
+
+ readerr:
+ if(retlen)
+ *retlen = thislen;
+ DEBUG(2, "blkmtd: end read: retlen = %zd, err = %d\n", thislen, err);
+ return err;
}
-
-/* write a range of the data via the page cache.
- *
- * Basic operation. break the write into three parts.
- *
- * 1. From a page unaligned start up until the next page boundary
- * 2. Page sized, page aligned blocks
- * 3. From end of last aligned block to end of range
- *
- * 1,3 are read via the page cache and readpage() since these are partial
- * pages, 2 we just grab pages from the page cache, not caring if they are
- * already in memory or not since they will be completly overwritten.
- *
- */
-
+
+/* write data to the underlying device */
static int blkmtd_write(struct mtd_info *mtd, loff_t to, size_t len,
- size_t *retlen, const u_char *buf)
+ size_t *retlen, const u_char *buf)
{
- mtd_raw_dev_data_t *rawdevice = mtd->priv;
- int err = 0;
- int offset;
- int pagenr;
- size_t len1 = 0, len2 = 0, len3 = 0;
- struct page **pages;
- int pagecnt = 0;
-
- *retlen = 0;
- DEBUG(2, "blkmtd: write: dev = `%s' to = %ld len = %d buf = %p\n",
- bdevname(rawdevice->binding->bd_dev), (long int)to, len, buf);
-
- /* handle readonly and out of range numbers */
-
- if(rawdevice->readonly) {
- printk("blkmtd: error: trying to write to a readonly device %s\n", device);
- return -EROFS;
- }
-
- if(to >= rawdevice->totalsize) {
- return -ENOSPC;
- }
-
- if(to + len > rawdevice->totalsize) {
- len = (rawdevice->totalsize - to);
- }
-
-
- pagenr = to >> PAGE_SHIFT;
- offset = to - (pagenr << PAGE_SHIFT);
-
- /* see if we have to do a partial write at the start */
- if(offset) {
- if((offset + len) > PAGE_SIZE) {
- len1 = PAGE_SIZE - offset;
- len -= len1;
- } else {
- len1 = len;
- len = 0;
- }
- }
-
- /* calculate the length of the other two regions */
- len3 = len & ~PAGE_MASK;
- len -= len3;
- len2 = len;
-
-
- if(len1)
- pagecnt++;
- if(len2)
- pagecnt += len2 >> PAGE_SHIFT;
- if(len3)
- pagecnt++;
-
- DEBUG(3, "blkmtd: write: len1 = %d len2 = %d len3 = %d pagecnt = %d\n", len1, len2, len3, pagecnt);
-
- /* get space for list of pages */
- pages = kmalloc(pagecnt * sizeof(struct page *), GFP_KERNEL);
- if(pages == NULL) {
- return -ENOMEM;
- }
- pagecnt = 0;
-
- if(len1) {
- /* do partial start region */
- struct page *page;
-
- DEBUG(3, "blkmtd: write: doing partial start, page = %d len = %d offset = %d\n", pagenr, len1, offset);
- page = read_cache_page(&rawdevice->as, pagenr, (filler_t *)blkmtd_readpage, rawdevice);
-
- if(IS_ERR(page)) {
- kfree(pages);
- return PTR_ERR(page);
- }
- memcpy(page_address(page)+offset, buf, len1);
- pages[pagecnt++] = page;
- buf += len1;
- *retlen = len1;
- err = 0;
- pagenr++;
- }
-
- /* Now do the main loop to a page aligned, n page sized output */
- if(len2) {
- int pagesc = len2 >> PAGE_SHIFT;
- DEBUG(3, "blkmtd: write: whole pages start = %d, count = %d\n", pagenr, pagesc);
- while(pagesc) {
- struct page *page;
-
- /* see if page is in the page cache */
- DEBUG(3, "blkmtd: write: grabbing page %d from page cache\n", pagenr);
- page = grab_cache_page(&rawdevice->as, pagenr);
- DEBUG(3, "blkmtd: write: got page %d from page cache\n", pagenr);
- if(!page) {
- printk("blkmtd: write: cant grab cache page %d\n", pagenr);
- err = -EIO;
- goto write_err;
- }
- memcpy(page_address(page), buf, PAGE_SIZE);
- pages[pagecnt++] = page;
- UnlockPage(page);
- SetPageUptodate(page);
- pagenr++;
- pagesc--;
- buf += PAGE_SIZE;
- *retlen += PAGE_SIZE;
- }
- }
-
-
- if(len3) {
- /* do the third region */
- struct page *page;
- DEBUG(3, "blkmtd: write: doing partial end, page = %d len = %d\n", pagenr, len3);
- page = read_cache_page(&rawdevice->as, pagenr, (filler_t *)blkmtd_readpage, rawdevice);
- if(IS_ERR(page)) {
- err = PTR_ERR(page);
- goto write_err;
- }
- memcpy(page_address(page), buf, len3);
- DEBUG(3, "blkmtd: write: writing out partial end\n");
- pages[pagecnt++] = page;
- *retlen += len3;
- err = 0;
- }
- DEBUG(2, "blkmtd: write: end, retlen = %d, err = %d\n", *retlen, err);
- /* submit it to the write task */
- err = queue_page_write(rawdevice, pages, to >> PAGE_SHIFT, pagecnt, 0);
- if(!err) {
- while(pagecnt--) {
- SetPageUptodate(pages[pagecnt]);
- flush_dcache_page(pages[pagecnt]);
- page_cache_release(pages[pagecnt]);
- }
- kfree(pages);
- return 0;
- }
+ struct blkmtd_dev *dev = mtd->priv;
+ int err;
- write_err:
- while(--pagecnt) {
- SetPageError(pages[pagecnt]);
- page_cache_release(pages[pagecnt]);
- }
- kfree(pages);
- return err;
+ if(!len)
+ return 0;
+
+ DEBUG(2, "blkmtd: write: dev = `%s' to = %lld len = %zd buf = %p\n",
+ mtd->name+9, to, len, buf);
+
+ if(to >= mtd->size) {
+ return -ENOSPC;
+ }
+
+ if(to + len > mtd->size) {
+ len = mtd->size - to;
+ }
+
+ err = write_pages(dev, buf, to, len, retlen);
+ if(err > 0)
+ err = 0;
+ DEBUG(2, "blkmtd: write: end, err = %d\n", err);
+ return err;
}
/* sync the device - wait until the write queue is empty */
static void blkmtd_sync(struct mtd_info *mtd)
{
- DECLARE_WAITQUEUE(wait, current);
- mtd_raw_dev_data_t *rawdevice = mtd->priv;
- if(rawdevice->readonly)
- return;
-
- DEBUG(2, "blkmtd: sync: called\n");
-
- stuff_inq:
- spin_lock(&mbd_writeq_lock);
- if(write_queue_cnt) {
- spin_unlock(&mbd_writeq_lock);
- current->state = TASK_UNINTERRUPTIBLE;
- add_wait_queue(&mtbd_sync_wq, &wait);
- DEBUG(3, "blkmtd: sync: waking up task\n");
- wake_up_interruptible(&thr_wq);
- schedule();
- current->state = TASK_RUNNING;
- remove_wait_queue(&mtbd_sync_wq, &wait);
- DEBUG(3, "blkmtd: sync: waking up after write task\n");
- goto stuff_inq;
- }
- spin_unlock(&mbd_writeq_lock);
-
- DEBUG(2, "blkmtd: sync: finished\n");
+ /* Currently all writes are synchronous */
}
-#ifdef BLKMTD_PROC_DEBUG
-/* procfs stuff */
-static int blkmtd_proc_read(char *page, char **start, off_t off, int count, int *eof, void *data)
+static void free_device(struct blkmtd_dev *dev)
{
- int clean = 0, dirty = 0, locked = 0;
- struct list_head *temp;
- int i, len, pages = 0, cnt;
- MOD_INC_USE_COUNT;
- spin_lock(&mbd_writeq_lock);
- cnt = write_queue_cnt;
- i = write_queue_tail;
- while(cnt) {
- if(!write_queue[i].iserase)
- pages += write_queue[i].pagecnt;
- i++;
- i %= write_queue_sz;
- cnt--;
- }
-
- /* Count the size of the page lists */
- list_for_each(temp, &mtd_rawdevice->as.clean_pages) {
- clean++;
- }
- list_for_each(temp, &mtd_rawdevice->as.dirty_pages) {
- dirty++;
- }
- list_for_each(temp, &mtd_rawdevice->as.locked_pages) {
- locked++;
- }
-
- len = sprintf(page, "Write queue head: %d\nWrite queue tail: %d\n"
- "Write queue count: %d\nPages in queue: %d (%dK)\n"
- "Clean Pages: %d\nDirty Pages: %d\nLocked Pages: %d\n"
- "nrpages: %ld\n",
- write_queue_head, write_queue_tail, write_queue_cnt,
- pages, pages << (PAGE_SHIFT-10), clean, dirty, locked,
- mtd_rawdevice->as.nrpages);
- if(len <= count)
- *eof = 1;
- spin_unlock(&mbd_writeq_lock);
- MOD_DEC_USE_COUNT;
- return len;
+ DEBUG(2, "blkmtd: free_device() dev = %p\n", dev);
+ if(dev) {
+ kfree(dev->mtd_info.eraseregions);
+ kfree(dev->mtd_info.name);
+ if(dev->blkdev) {
+ invalidate_inode_pages(dev->blkdev->bd_inode->i_mapping);
+ close_bdev_excl(dev->blkdev);
+ }
+ kfree(dev);
+ }
}
-#endif
-/* Cleanup and exit - sync the device and kill of the kernel thread */
-static void __exit cleanup_blkmtd(void)
+/* For a given size and initial erase size, calculate the number
+ * and size of each erase region. Goes round the loop twice,
+ * once to find out how many regions, then allocates space,
+ * then round the loop again to fill it in.
+ */
+static struct mtd_erase_region_info *calc_erase_regions(
+ size_t erase_size, size_t total_size, int *regions)
+{
+ struct mtd_erase_region_info *info = NULL;
+
+ DEBUG(2, "calc_erase_regions, es = %zd size = %zd regions = %d\n",
+ erase_size, total_size, *regions);
+ /* Make any user specified erasesize be a power of 2
+ and at least PAGE_SIZE */
+ if(erase_size) {
+ int es = erase_size;
+ erase_size = 1;
+ while(es != 1) {
+ es >>= 1;
+ erase_size <<= 1;
+ }
+ if(erase_size < PAGE_SIZE)
+ erase_size = PAGE_SIZE;
+ } else {
+ erase_size = CONFIG_MTD_BLKDEV_ERASESIZE;
+ }
+
+ *regions = 0;
+
+ do {
+ int tot_size = total_size;
+ int er_size = erase_size;
+ int count = 0, offset = 0, regcnt = 0;
+
+ while(tot_size) {
+ count = tot_size / er_size;
+ if(count) {
+ tot_size = tot_size % er_size;
+ if(info) {
+ DEBUG(2, "adding to erase info off=%d er=%d cnt=%d\n",
+ offset, er_size, count);
+ (info+regcnt)->offset = offset;
+ (info+regcnt)->erasesize = er_size;
+ (info+regcnt)->numblocks = count;
+ (*regions)++;
+ }
+ regcnt++;
+ offset += (count * er_size);
+ }
+ while(er_size > tot_size)
+ er_size >>= 1;
+ }
+ if(info == NULL) {
+ info = kmalloc(regcnt * sizeof(struct mtd_erase_region_info), GFP_KERNEL);
+ if(!info)
+ break;
+ }
+ } while(!(*regions));
+ DEBUG(2, "calc_erase_regions done, es = %zd size = %zd regions = %d\n",
+ erase_size, total_size, *regions);
+ return info;
+}
+
+
+static struct blkmtd_dev *add_device(char *devname, int readonly, int erase_size)
{
-#ifdef BLKMTD_PROC_DEBUG
- if(blkmtd_proc) {
- remove_proc_entry("blkmtd_debug", NULL);
- }
+ struct block_device *bdev;
+ int mode;
+ struct blkmtd_dev *dev;
+
+ if(!devname)
+ return NULL;
+
+ /* Get a handle on the device */
+
+
+#ifdef MODULE
+ mode = (readonly) ? O_RDONLY : O_RDWR;
+ bdev = open_bdev_excl(devname, mode, NULL);
+#else
+ mode = (readonly) ? FMODE_READ : FMODE_WRITE;
+ bdev = open_by_devnum(name_to_dev_t(devname), mode);
#endif
+ if(IS_ERR(bdev)) {
+ err("error: cannot open device %s", devname);
+ DEBUG(2, "blkmtd: opening bdev returned %ld\n", PTR_ERR(bdev));
+ return NULL;
+ }
+
+ DEBUG(1, "blkmtd: found a block device major = %d, minor = %d\n",
+ MAJOR(bdev->bd_dev), MINOR(bdev->bd_dev));
+
+ if(MAJOR(bdev->bd_dev) == MTD_BLOCK_MAJOR) {
+ err("attempting to use an MTD device as a block device");
+ blkdev_put(bdev);
+ return NULL;
+ }
+
+ dev = kmalloc(sizeof(struct blkmtd_dev), GFP_KERNEL);
+ if(dev == NULL) {
+ blkdev_put(bdev);
+ return NULL;
+ }
+
+ memset(dev, 0, sizeof(struct blkmtd_dev));
+ dev->blkdev = bdev;
+ if(!readonly) {
+ mutex_init(&dev->wrbuf_mutex);
+ }
+
+ dev->mtd_info.size = dev->blkdev->bd_inode->i_size & PAGE_MASK;
+
+ /* Setup the MTD structure */
+ /* make the name contain the block device in */
+ dev->mtd_info.name = kmalloc(sizeof("blkmtd: ") + strlen(devname), GFP_KERNEL);
+ if(dev->mtd_info.name == NULL)
+ goto devinit_err;
+
+ sprintf(dev->mtd_info.name, "blkmtd: %s", devname);
+ dev->mtd_info.eraseregions = calc_erase_regions(erase_size, dev->mtd_info.size,
+ &dev->mtd_info.numeraseregions);
+ if(dev->mtd_info.eraseregions == NULL)
+ goto devinit_err;
- if (mtd_rawdevice) {
- /* sync the device */
- if (!mtd_rawdevice->readonly) {
- blkmtd_sync(&mtd_rawdevice->mtd_info);
- write_task_finish = 1;
- wake_up_interruptible(&thr_wq);
- down(&thread_sem);
- }
- del_mtd_device(&mtd_rawdevice->mtd_info);
- if(mtd_rawdevice->binding != NULL)
- blkdev_put(mtd_rawdevice->binding, BDEV_RAW);
-
- if(mtd_rawdevice->mtd_info.eraseregions)
- kfree(mtd_rawdevice->mtd_info.eraseregions);
- if(mtd_rawdevice->mtd_info.name)
- kfree(mtd_rawdevice->mtd_info.name);
-
- kfree(mtd_rawdevice);
- }
- if(write_queue)
- kfree(write_queue);
-
- if(erase_page) {
- UnlockPage(erase_page);
- __free_pages(erase_page, 0);
- }
- printk("blkmtd: unloaded for %s\n", device);
+ dev->mtd_info.erasesize = dev->mtd_info.eraseregions->erasesize;
+ DEBUG(1, "blkmtd: init: found %d erase regions\n",
+ dev->mtd_info.numeraseregions);
+
+ if(readonly) {
+ dev->mtd_info.type = MTD_ROM;
+ dev->mtd_info.flags = MTD_CAP_ROM;
+ } else {
+ dev->mtd_info.type = MTD_RAM;
+ dev->mtd_info.flags = MTD_CAP_RAM;
+ dev->mtd_info.erase = blkmtd_erase;
+ dev->mtd_info.write = blkmtd_write;
+ dev->mtd_info.writev = default_mtd_writev;
+ dev->mtd_info.sync = blkmtd_sync;
+ }
+ dev->mtd_info.read = blkmtd_read;
+ dev->mtd_info.readv = default_mtd_readv;
+ dev->mtd_info.priv = dev;
+ dev->mtd_info.owner = THIS_MODULE;
+
+ list_add(&dev->list, &blkmtd_device_list);
+ if (add_mtd_device(&dev->mtd_info)) {
+ /* Device didnt get added, so free the entry */
+ list_del(&dev->list);
+ goto devinit_err;
+ } else {
+ info("mtd%d: [%s] erase_size = %dKiB %s",
+ dev->mtd_info.index, dev->mtd_info.name + strlen("blkmtd: "),
+ dev->mtd_info.erasesize >> 10,
+ readonly ? "(read-only)" : "");
+ }
+
+ return dev;
+
+ devinit_err:
+ free_device(dev);
+ return NULL;
}
-extern struct module __this_module;
+
+/* Cleanup and exit - sync the device and kill of the kernel thread */
+static void __devexit cleanup_blkmtd(void)
+{
+ struct list_head *temp1, *temp2;
+
+ /* Remove the MTD devices */
+ list_for_each_safe(temp1, temp2, &blkmtd_device_list) {
+ struct blkmtd_dev *dev = list_entry(temp1, struct blkmtd_dev,
+ list);
+ blkmtd_sync(&dev->mtd_info);
+ del_mtd_device(&dev->mtd_info);
+ info("mtd%d: [%s] removed", dev->mtd_info.index,
+ dev->mtd_info.name + strlen("blkmtd: "));
+ list_del(&dev->list);
+ free_device(dev);
+ }
+}
#ifndef MODULE
@@ -982,335 +740,79 @@ extern struct module __this_module;
static int __init param_blkmtd_device(char *str)
{
- device = str;
- return 1;
+ int i;
+
+ for(i = 0; i < MAX_DEVICES; i++) {
+ device[i] = str;
+ DEBUG(2, "blkmtd: device setup: %d = %s\n", i, device[i]);
+ strsep(&str, ",");
+ }
+ return 1;
}
static int __init param_blkmtd_erasesz(char *str)
{
- erasesz = simple_strtol(str, NULL, 0);
- return 1;
+ int i;
+ for(i = 0; i < MAX_DEVICES; i++) {
+ char *val = strsep(&str, ",");
+ if(val)
+ erasesz[i] = simple_strtoul(val, NULL, 0);
+ DEBUG(2, "blkmtd: erasesz setup: %d = %d\n", i, erasesz[i]);
+ }
+
+ return 1;
}
static int __init param_blkmtd_ro(char *str)
{
- ro = simple_strtol(str, NULL, 0);
- return 1;
-}
-
+ int i;
+ for(i = 0; i < MAX_DEVICES; i++) {
+ char *val = strsep(&str, ",");
+ if(val)
+ ro[i] = simple_strtoul(val, NULL, 0);
+ DEBUG(2, "blkmtd: ro setup: %d = %d\n", i, ro[i]);
+ }
-static int __init param_blkmtd_bs(char *str)
-{
- bs = simple_strtol(str, NULL, 0);
- return 1;
+ return 1;
}
-static int __init param_blkmtd_count(char *str)
+static int __init param_blkmtd_sync(char *str)
{
- count = simple_strtol(str, NULL, 0);
- return 1;
+ if(str[0] == '1')
+ sync = 1;
+ return 1;
}
__setup("blkmtd_device=", param_blkmtd_device);
__setup("blkmtd_erasesz=", param_blkmtd_erasesz);
__setup("blkmtd_ro=", param_blkmtd_ro);
-__setup("blkmtd_bs=", param_blkmtd_bs);
-__setup("blkmtd_count=", param_blkmtd_count);
+__setup("blkmtd_sync=", param_blkmtd_sync);
#endif
-/* for a given size and initial erase size, calculate the number and size of each
- erase region */
-static int __init calc_erase_regions(struct mtd_erase_region_info *info, size_t erase_size, size_t total_size)
-{
- int count = 0;
- int offset = 0;
- int regions = 0;
-
- while(total_size) {
- count = total_size / erase_size;
- if(count) {
- total_size = total_size % erase_size;
- if(info) {
- info->offset = offset;
- info->erasesize = erase_size;
- info->numblocks = count;
- info++;
- }
- offset += (count * erase_size);
- regions++;
- }
- while(erase_size > total_size)
- erase_size >>= 1;
- }
- return regions;
-}
-
-
-extern kdev_t name_to_kdev_t(char *line) __init;
-
/* Startup */
static int __init init_blkmtd(void)
{
-#ifdef MODULE
- struct file *file = NULL;
- struct inode *inode;
-#endif
+ int i;
- int maj, min;
- int i, blocksize, blocksize_bits;
- loff_t size = 0;
- int readonly = 0;
- int erase_size = CONFIG_MTD_BLKDEV_ERASESIZE;
- kdev_t rdev;
- int err;
- int mode;
- int regions;
-
- /* Check args */
- if(device == 0) {
- printk("blkmtd: error, missing `device' name\n");
- return -EINVAL;
- }
-
- if(ro)
- readonly = 1;
-
- if(erasesz)
- erase_size = erasesz;
-
- if(wqs) {
- if(wqs < 16)
- wqs = 16;
- if(wqs > 4*WRITE_QUEUE_SZ)
- wqs = 4*WRITE_QUEUE_SZ;
- write_queue_sz = wqs;
- }
-
- DEBUG(1, "blkmtd: device = `%s' erase size = %dK readonly = %s queue size = %d\n",
- device, erase_size, readonly ? "yes" : "no", write_queue_sz);
- /* Get a handle on the device */
- mode = (readonly) ? O_RDONLY : O_RDWR;
+ info("version " VERSION);
+ /* Check args - device[0] is the bare minimum*/
+ if(!device[0]) {
+ err("error: missing `device' name\n");
+ return -EINVAL;
+ }
-#ifdef MODULE
+ for(i = 0; i < MAX_DEVICES; i++)
+ add_device(device[i], ro[i], erasesz[i] << 10);
- file = filp_open(device, mode, 0);
- if(IS_ERR(file)) {
- printk("blkmtd: error, cant open device %s\n", device);
- DEBUG(2, "blkmtd: filp_open returned %ld\n", PTR_ERR(file));
- return 1;
- }
-
- /* determine is this is a block device and if so get its major and minor
- numbers */
- inode = file->f_dentry->d_inode;
- if(!S_ISBLK(inode->i_mode)) {
- printk("blkmtd: %s not a block device\n", device);
- filp_close(file, NULL);
- return 1;
- }
- rdev = inode->i_rdev;
- filp_close(file, NULL);
-#else
- rdev = name_to_kdev_t(device);
-#endif
+ if(list_empty(&blkmtd_device_list))
+ return -EINVAL;
- maj = MAJOR(rdev);
- min = MINOR(rdev);
- DEBUG(1, "blkmtd: found a block device major = %d, minor = %d\n", maj, min);
-
- if(!rdev) {
- printk("blkmtd: bad block device: `%s'\n", device);
- return 1;
- }
-
- if(maj == MTD_BLOCK_MAJOR) {
- printk("blkmtd: attempting to use an MTD device as a block device\n");
- return 1;
- }
-
- DEBUG(1, "blkmtd: devname = %s\n", bdevname(rdev));
- blocksize = BLOCK_SIZE;
-
- if(bs) {
- blocksize = bs;
- } else {
- if (blksize_size[maj] && blksize_size[maj][min]) {
- DEBUG(2, "blkmtd: blksize_size = %d\n", blksize_size[maj][min]);
- blocksize = blksize_size[maj][min];
- }
- }
- i = blocksize;
- blocksize_bits = 0;
- while(i != 1) {
- blocksize_bits++;
- i >>= 1;
- }
-
- if(count) {
- size = count;
- } else {
- if (blk_size[maj]) {
- size = ((loff_t) blk_size[maj][min] << BLOCK_SIZE_BITS) >> blocksize_bits;
- }
- }
- size *= blocksize;
- DEBUG(1, "blkmtd: size = %ld\n", (long int)size);
-
- if(size == 0) {
- printk("blkmtd: cant determine size\n");
- return 1;
- }
-
- mtd_rawdevice = (mtd_raw_dev_data_t *)kmalloc(sizeof(mtd_raw_dev_data_t), GFP_KERNEL);
- if(mtd_rawdevice == NULL) {
- err = -ENOMEM;
- goto init_err;
- }
- memset(mtd_rawdevice, 0, sizeof(mtd_raw_dev_data_t));
- /* get the block device */
- mtd_rawdevice->binding = bdget(kdev_t_to_nr(MKDEV(maj, min)));
- err = blkdev_get(mtd_rawdevice->binding, mode, 0, BDEV_RAW);
- if (err) {
- goto init_err;
- }
- mtd_rawdevice->totalsize = size;
- mtd_rawdevice->sector_size = blocksize;
- mtd_rawdevice->sector_bits = blocksize_bits;
- mtd_rawdevice->readonly = readonly;
-
- /* See if device ends on page boundary */
- if(size % PAGE_SIZE) {
- mtd_rawdevice->partial_last_page = size >> PAGE_SHIFT;
- mtd_rawdevice->last_page_sectors = (size & (PAGE_SIZE-1)) >> blocksize_bits;
- }
-
- DEBUG(2, "sector_size = %d, sector_bits = %d, partial_last_page = %d last_page_sectors = %d\n",
- mtd_rawdevice->sector_size, mtd_rawdevice->sector_bits,
- mtd_rawdevice->partial_last_page, mtd_rawdevice->last_page_sectors);
-
- /* Setup the MTD structure */
- /* make the name contain the block device in */
- mtd_rawdevice->mtd_info.name = kmalloc(9 + strlen(device), GFP_KERNEL);
- if(mtd_rawdevice->mtd_info.name == NULL)
- goto init_err;
-
- sprintf(mtd_rawdevice->mtd_info.name, "blkmtd: %s", device);
- if(readonly) {
- mtd_rawdevice->mtd_info.type = MTD_ROM;
- mtd_rawdevice->mtd_info.flags = MTD_CAP_ROM;
- mtd_rawdevice->mtd_info.erasesize = erase_size << 10;
- } else {
- mtd_rawdevice->mtd_info.type = MTD_RAM;
- mtd_rawdevice->mtd_info.flags = MTD_CAP_RAM;
- mtd_rawdevice->mtd_info.erasesize = erase_size << 10;
- }
- mtd_rawdevice->mtd_info.size = size;
- mtd_rawdevice->mtd_info.erase = blkmtd_erase;
- mtd_rawdevice->mtd_info.read = blkmtd_read;
- mtd_rawdevice->mtd_info.write = blkmtd_write;
- mtd_rawdevice->mtd_info.sync = blkmtd_sync;
- mtd_rawdevice->mtd_info.point = 0;
- mtd_rawdevice->mtd_info.unpoint = 0;
-
- mtd_rawdevice->mtd_info.priv = mtd_rawdevice;
- regions = calc_erase_regions(NULL, erase_size << 10, size);
- DEBUG(1, "blkmtd: init: found %d erase regions\n", regions);
- mtd_rawdevice->mtd_info.eraseregions = kmalloc(regions * sizeof(struct mtd_erase_region_info), GFP_KERNEL);
- if(mtd_rawdevice->mtd_info.eraseregions == NULL) {
- err = -ENOMEM;
- goto init_err;
- }
- mtd_rawdevice->mtd_info.numeraseregions = regions;
- calc_erase_regions(mtd_rawdevice->mtd_info.eraseregions, erase_size << 10, size);
-
- /* setup the page cache info */
-
- mtd_rawdevice->as.nrpages = 0;
- INIT_LIST_HEAD(&mtd_rawdevice->as.clean_pages);
- INIT_LIST_HEAD(&mtd_rawdevice->as.dirty_pages);
- INIT_LIST_HEAD(&mtd_rawdevice->as.locked_pages);
- mtd_rawdevice->as.host = NULL;
- spin_lock_init(&(mtd_rawdevice->as.i_shared_lock));
-
- mtd_rawdevice->as.a_ops = &blkmtd_aops;
- mtd_rawdevice->as.i_mmap = NULL;
- mtd_rawdevice->as.i_mmap_shared = NULL;
- mtd_rawdevice->as.gfp_mask = GFP_KERNEL;
-
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,2,0)
- mtd_rawdevice->mtd_info.module = THIS_MODULE;
-#endif
- if (add_mtd_device(&mtd_rawdevice->mtd_info)) {
- err = -EIO;
- goto init_err;
- }
-
- if(!mtd_rawdevice->readonly) {
- /* Allocate the write queue */
- write_queue = kmalloc(write_queue_sz * sizeof(mtdblkdev_write_queue_t), GFP_KERNEL);
- if(!write_queue) {
- err = -ENOMEM;
- goto init_err;
- }
- /* Set up the erase page */
- erase_page = alloc_pages(GFP_KERNEL, 0);
- if(erase_page == NULL) {
- err = -ENOMEM;
- goto init_err;
- }
- memset(page_address(erase_page), 0xff, PAGE_SIZE);
- lock_page(erase_page);
-
- init_waitqueue_head(&thr_wq);
- init_waitqueue_head(&mtbd_sync_wq);
- DEBUG(3, "blkmtd: init: kernel task @ %p\n", write_queue_task);
- DEBUG(2, "blkmtd: init: starting kernel task\n");
- kernel_thread(write_queue_task, NULL, CLONE_FS | CLONE_FILES | CLONE_SIGHAND);
- DEBUG(2, "blkmtd: init: started\n");
- printk("blkmtd loaded: version = %s using %s erase_size = %dK %s\n",
- VERSION, device, erase_size, (readonly) ? "(read-only)" : "");
- }
-
-#ifdef BLKMTD_PROC_DEBUG
- /* create proc entry */
- DEBUG(2, "Creating /proc/blkmtd_debug\n");
- blkmtd_proc = create_proc_read_entry("blkmtd_debug", 0444,
- NULL, blkmtd_proc_read, NULL);
- if(blkmtd_proc == NULL) {
- printk("Cant create /proc/blkmtd_debug\n");
- } else {
- blkmtd_proc->owner = THIS_MODULE;
- }
-#endif
-
- /* Everything is ok if we got here */
- return 0;
-
- init_err:
-
- if(mtd_rawdevice) {
- if(mtd_rawdevice->mtd_info.eraseregions)
- kfree(mtd_rawdevice->mtd_info.eraseregions);
- if(mtd_rawdevice->mtd_info.name)
- kfree(mtd_rawdevice->mtd_info.name);
- if(mtd_rawdevice->binding)
- blkdev_put(mtd_rawdevice->binding, BDEV_RAW);
- kfree(mtd_rawdevice);
- }
-
- if(write_queue) {
- kfree(write_queue);
- write_queue = NULL;
- }
-
- if(erase_page)
- __free_pages(erase_page, 0);
- return err;
+ return 0;
}
module_init(init_blkmtd);
diff --git a/linux-2.4.x/drivers/mtd/devices/block2mtd.c b/linux-2.4.x/drivers/mtd/devices/block2mtd.c
new file mode 100644
index 0000000..208b20a
--- /dev/null
+++ b/linux-2.4.x/drivers/mtd/devices/block2mtd.c
@@ -0,0 +1,494 @@
+/*
+ * $Id: block2mtd.c,v 1.32 2006/03/29 08:26:27 dwmw2 Exp $
+ *
+ * block2mtd.c - create an mtd from a block device
+ *
+ * Copyright (C) 2001,2002 Simon Evans <spse@secret.org.uk>
+ * Copyright (C) 2004,2005 Jörn Engel <joern@wh.fh-wedel.de>
+ *
+ * Licence: GPL
+ */
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/blkdev.h>
+#include <linux/bio.h>
+#include <linux/pagemap.h>
+#include <linux/list.h>
+#include <linux/init.h>
+#include <linux/mtd/mtd.h>
+#include <linux/buffer_head.h>
+#include <linux/mutex.h>
+
+#define VERSION "$Revision: 1.32 $"
+
+
+#define ERROR(fmt, args...) printk(KERN_ERR "block2mtd: " fmt "\n" , ## args)
+#define INFO(fmt, args...) printk(KERN_INFO "block2mtd: " fmt "\n" , ## args)
+
+
+/* Info for the block device */
+struct block2mtd_dev {
+ struct list_head list;
+ struct block_device *blkdev;
+ struct mtd_info mtd;
+ struct mutex write_mutex;
+};
+
+
+/* Static info about the MTD, used in cleanup_module */
+static LIST_HEAD(blkmtd_device_list);
+
+
+#define PAGE_READAHEAD 64
+static void cache_readahead(struct address_space *mapping, int index)
+{
+ filler_t *filler = (filler_t*)mapping->a_ops->readpage;
+ int i, pagei;
+ unsigned ret = 0;
+ unsigned long end_index;
+ struct page *page;
+ LIST_HEAD(page_pool);
+ struct inode *inode = mapping->host;
+ loff_t isize = i_size_read(inode);
+
+ if (!isize) {
+ INFO("iSize=0 in cache_readahead\n");
+ return;
+ }
+
+ end_index = ((isize - 1) >> PAGE_CACHE_SHIFT);
+
+ read_lock_irq(&mapping->tree_lock);
+ for (i = 0; i < PAGE_READAHEAD; i++) {
+ pagei = index + i;
+ if (pagei > end_index) {
+ INFO("Overrun end of disk in cache readahead\n");
+ break;
+ }
+ page = radix_tree_lookup(&mapping->page_tree, pagei);
+ if (page && (!i))
+ break;
+ if (page)
+ continue;
+ read_unlock_irq(&mapping->tree_lock);
+ page = page_cache_alloc_cold(mapping);
+ read_lock_irq(&mapping->tree_lock);
+ if (!page)
+ break;
+ page->index = pagei;
+ list_add(&page->lru, &page_pool);
+ ret++;
+ }
+ read_unlock_irq(&mapping->tree_lock);
+ if (ret)
+ read_cache_pages(mapping, &page_pool, filler, NULL);
+}
+
+
+static struct page* page_readahead(struct address_space *mapping, int index)
+{
+ filler_t *filler = (filler_t*)mapping->a_ops->readpage;
+ cache_readahead(mapping, index);
+ return read_cache_page(mapping, index, filler, NULL);
+}
+
+
+/* erase a specified part of the device */
+static int _block2mtd_erase(struct block2mtd_dev *dev, loff_t to, size_t len)
+{
+ struct address_space *mapping = dev->blkdev->bd_inode->i_mapping;
+ struct page *page;
+ int index = to >> PAGE_SHIFT; // page index
+ int pages = len >> PAGE_SHIFT;
+ u_long *p;
+ u_long *max;
+
+ while (pages) {
+ page = page_readahead(mapping, index);
+ if (!page)
+ return -ENOMEM;
+ if (IS_ERR(page))
+ return PTR_ERR(page);
+
+ max = (u_long*)page_address(page) + PAGE_SIZE;
+ for (p=(u_long*)page_address(page); p<max; p++)
+ if (*p != -1UL) {
+ lock_page(page);
+ memset(page_address(page), 0xff, PAGE_SIZE);
+ set_page_dirty(page);
+ unlock_page(page);
+ break;
+ }
+
+ page_cache_release(page);
+ pages--;
+ index++;
+ }
+ return 0;
+}
+static int block2mtd_erase(struct mtd_info *mtd, struct erase_info *instr)
+{
+ struct block2mtd_dev *dev = mtd->priv;
+ size_t from = instr->addr;
+ size_t len = instr->len;
+ int err;
+
+ instr->state = MTD_ERASING;
+ mutex_lock(&dev->write_mutex);
+ err = _block2mtd_erase(dev, from, len);
+ mutex_unlock(&dev->write_mutex);
+ if (err) {
+ ERROR("erase failed err = %d", err);
+ instr->state = MTD_ERASE_FAILED;
+ } else
+ instr->state = MTD_ERASE_DONE;
+
+ instr->state = MTD_ERASE_DONE;
+ mtd_erase_callback(instr);
+ return err;
+}
+
+
+static int block2mtd_read(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, u_char *buf)
+{
+ struct block2mtd_dev *dev = mtd->priv;
+ struct page *page;
+ int index = from >> PAGE_SHIFT;
+ int offset = from & (PAGE_SIZE-1);
+ int cpylen;
+
+ if (from > mtd->size)
+ return -EINVAL;
+ if (from + len > mtd->size)
+ len = mtd->size - from;
+
+ if (retlen)
+ *retlen = 0;
+
+ while (len) {
+ if ((offset + len) > PAGE_SIZE)
+ cpylen = PAGE_SIZE - offset; // multiple pages
+ else
+ cpylen = len; // this page
+ len = len - cpylen;
+
+ // Get page
+ page = page_readahead(dev->blkdev->bd_inode->i_mapping, index);
+ if (!page)
+ return -ENOMEM;
+ if (IS_ERR(page))
+ return PTR_ERR(page);
+
+ memcpy(buf, page_address(page) + offset, cpylen);
+ page_cache_release(page);
+
+ if (retlen)
+ *retlen += cpylen;
+ buf += cpylen;
+ offset = 0;
+ index++;
+ }
+ return 0;
+}
+
+
+/* write data to the underlying device */
+static int _block2mtd_write(struct block2mtd_dev *dev, const u_char *buf,
+ loff_t to, size_t len, size_t *retlen)
+{
+ struct page *page;
+ struct address_space *mapping = dev->blkdev->bd_inode->i_mapping;
+ int index = to >> PAGE_SHIFT; // page index
+ int offset = to & ~PAGE_MASK; // page offset
+ int cpylen;
+
+ if (retlen)
+ *retlen = 0;
+ while (len) {
+ if ((offset+len) > PAGE_SIZE)
+ cpylen = PAGE_SIZE - offset; // multiple pages
+ else
+ cpylen = len; // this page
+ len = len - cpylen;
+
+ // Get page
+ page = page_readahead(mapping, index);
+ if (!page)
+ return -ENOMEM;
+ if (IS_ERR(page))
+ return PTR_ERR(page);
+
+ if (memcmp(page_address(page)+offset, buf, cpylen)) {
+ lock_page(page);
+ memcpy(page_address(page) + offset, buf, cpylen);
+ set_page_dirty(page);
+ unlock_page(page);
+ }
+ page_cache_release(page);
+
+ if (retlen)
+ *retlen += cpylen;
+
+ buf += cpylen;
+ offset = 0;
+ index++;
+ }
+ return 0;
+}
+static int block2mtd_write(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t *retlen, const u_char *buf)
+{
+ struct block2mtd_dev *dev = mtd->priv;
+ int err;
+
+ if (!len)
+ return 0;
+ if (to >= mtd->size)
+ return -ENOSPC;
+ if (to + len > mtd->size)
+ len = mtd->size - to;
+
+ mutex_lock(&dev->write_mutex);
+ err = _block2mtd_write(dev, buf, to, len, retlen);
+ mutex_unlock(&dev->write_mutex);
+ if (err > 0)
+ err = 0;
+ return err;
+}
+
+
+/* sync the device - wait until the write queue is empty */
+static void block2mtd_sync(struct mtd_info *mtd)
+{
+ struct block2mtd_dev *dev = mtd->priv;
+ sync_blockdev(dev->blkdev);
+ return;
+}
+
+
+static void block2mtd_free_device(struct block2mtd_dev *dev)
+{
+ if (!dev)
+ return;
+
+ kfree(dev->mtd.name);
+
+ if (dev->blkdev) {
+ invalidate_inode_pages(dev->blkdev->bd_inode->i_mapping);
+ close_bdev_excl(dev->blkdev);
+ }
+
+ kfree(dev);
+}
+
+
+/* FIXME: ensure that mtd->size % erase_size == 0 */
+static struct block2mtd_dev *add_device(char *devname, int erase_size)
+{
+ struct block_device *bdev;
+ struct block2mtd_dev *dev;
+
+ if (!devname)
+ return NULL;
+
+ dev = kmalloc(sizeof(struct block2mtd_dev), GFP_KERNEL);
+ if (!dev)
+ return NULL;
+ memset(dev, 0, sizeof(*dev));
+
+ /* Get a handle on the device */
+ bdev = open_bdev_excl(devname, O_RDWR, NULL);
+ if (IS_ERR(bdev)) {
+ ERROR("error: cannot open device %s", devname);
+ goto devinit_err;
+ }
+ dev->blkdev = bdev;
+
+ if (MAJOR(bdev->bd_dev) == MTD_BLOCK_MAJOR) {
+ ERROR("attempting to use an MTD device as a block device");
+ goto devinit_err;
+ }
+
+ mutex_init(&dev->write_mutex);
+
+ /* Setup the MTD structure */
+ /* make the name contain the block device in */
+ dev->mtd.name = kmalloc(sizeof("block2mtd: ") + strlen(devname),
+ GFP_KERNEL);
+ if (!dev->mtd.name)
+ goto devinit_err;
+
+ sprintf(dev->mtd.name, "block2mtd: %s", devname);
+
+ dev->mtd.size = dev->blkdev->bd_inode->i_size & PAGE_MASK;
+ dev->mtd.erasesize = erase_size;
+ dev->mtd.type = MTD_BLOCK;
+ dev->mtd.flags = MTD_CAP_RAM;
+ dev->mtd.erase = block2mtd_erase;
+ dev->mtd.write = block2mtd_write;
+ dev->mtd.writev = default_mtd_writev;
+ dev->mtd.sync = block2mtd_sync;
+ dev->mtd.read = block2mtd_read;
+ dev->mtd.readv = default_mtd_readv;
+ dev->mtd.priv = dev;
+ dev->mtd.owner = THIS_MODULE;
+
+ if (add_mtd_device(&dev->mtd)) {
+ /* Device didnt get added, so free the entry */
+ goto devinit_err;
+ }
+ list_add(&dev->list, &blkmtd_device_list);
+ INFO("mtd%d: [%s] erase_size = %dKiB [%d]", dev->mtd.index,
+ dev->mtd.name + strlen("blkmtd: "),
+ dev->mtd.erasesize >> 10, dev->mtd.erasesize);
+ return dev;
+
+devinit_err:
+ block2mtd_free_device(dev);
+ return NULL;
+}
+
+
+static int ustrtoul(const char *cp, char **endp, unsigned int base)
+{
+ unsigned long result = simple_strtoul(cp, endp, base);
+ switch (**endp) {
+ case 'G' :
+ result *= 1024;
+ case 'M':
+ result *= 1024;
+ case 'k':
+ result *= 1024;
+ /* By dwmw2 editorial decree, "ki", "Mi" or "Gi" are to be used. */
+ if ((*endp)[1] == 'i')
+ (*endp) += 2;
+ }
+ return result;
+}
+
+
+static int parse_num(size_t *num, const char *token)
+{
+ char *endp;
+ size_t n;
+
+ n = (size_t) ustrtoul(token, &endp, 0);
+ if (*endp)
+ return -EINVAL;
+
+ *num = n;
+ return 0;
+}
+
+
+static int parse_name(char **pname, const char *token, size_t limit)
+{
+ size_t len;
+ char *name;
+
+ len = strlen(token) + 1;
+ if (len > limit)
+ return -ENOSPC;
+
+ name = kmalloc(len, GFP_KERNEL);
+ if (!name)
+ return -ENOMEM;
+
+ strcpy(name, token);
+
+ *pname = name;
+ return 0;
+}
+
+
+static inline void kill_final_newline(char *str)
+{
+ char *newline = strrchr(str, '\n');
+ if (newline && !newline[1])
+ *newline = 0;
+}
+
+
+#define parse_err(fmt, args...) do { \
+ ERROR("block2mtd: " fmt "\n", ## args); \
+ return 0; \
+} while (0)
+
+static int block2mtd_setup(const char *val, struct kernel_param *kp)
+{
+ char buf[80+12], *str=buf; /* 80 for device, 12 for erase size */
+ char *token[2];
+ char *name;
+ size_t erase_size = PAGE_SIZE;
+ int i, ret;
+
+ if (strnlen(val, sizeof(buf)) >= sizeof(buf))
+ parse_err("parameter too long");
+
+ strcpy(str, val);
+ kill_final_newline(str);
+
+ for (i=0; i<2; i++)
+ token[i] = strsep(&str, ",");
+
+ if (str)
+ parse_err("too many arguments");
+
+ if (!token[0])
+ parse_err("no argument");
+
+ ret = parse_name(&name, token[0], 80);
+ if (ret == -ENOMEM)
+ parse_err("out of memory");
+ if (ret == -ENOSPC)
+ parse_err("name too long");
+ if (ret)
+ return 0;
+
+ if (token[1]) {
+ ret = parse_num(&erase_size, token[1]);
+ if (ret)
+ parse_err("illegal erase size");
+ }
+
+ add_device(name, erase_size);
+
+ return 0;
+}
+
+
+module_param_call(block2mtd, block2mtd_setup, NULL, NULL, 0200);
+MODULE_PARM_DESC(block2mtd, "Device to use. \"block2mtd=<dev>[,<erasesize>]\"");
+
+static int __init block2mtd_init(void)
+{
+ INFO("version " VERSION);
+ return 0;
+}
+
+
+static void __devexit block2mtd_exit(void)
+{
+ struct list_head *pos, *next;
+
+ /* Remove the MTD devices */
+ list_for_each_safe(pos, next, &blkmtd_device_list) {
+ struct block2mtd_dev *dev = list_entry(pos, typeof(*dev), list);
+ block2mtd_sync(&dev->mtd);
+ del_mtd_device(&dev->mtd);
+ INFO("mtd%d: [%s] removed", dev->mtd.index,
+ dev->mtd.name + strlen("blkmtd: "));
+ list_del(&dev->list);
+ block2mtd_free_device(dev);
+ }
+}
+
+
+module_init(block2mtd_init);
+module_exit(block2mtd_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Simon Evans <spse@secret.org.uk> and others");
+MODULE_DESCRIPTION("Emulate an MTD using a block device");
diff --git a/linux-2.4.x/drivers/mtd/devices/doc2000.c b/linux-2.4.x/drivers/mtd/devices/doc2000.c
index 837ed5f..bab6fde 100644
--- a/linux-2.4.x/drivers/mtd/devices/doc2000.c
+++ b/linux-2.4.x/drivers/mtd/devices/doc2000.c
@@ -4,7 +4,7 @@
* (c) 1999 Machine Vision Holdings, Inc.
* (c) 1999, 2000 David Woodhouse <dwmw2@infradead.org>
*
- * $Id: doc2000.c,v 1.46 2001/10/02 15:05:13 dwmw2 Exp $
+ * $Id: doc2000.c,v 1.69 2006/03/29 08:26:27 dwmw2 Exp $
*/
#include <linux/kernel.h>
@@ -19,13 +19,15 @@
#include <linux/sched.h>
#include <linux/init.h>
#include <linux/types.h>
+#include <linux/bitops.h>
+#include <linux/mutex.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/nand.h>
-#include <linux/mtd/nand_ids.h>
#include <linux/mtd/doc2000.h>
#define DOC_SUPPORT_2000
+#define DOC_SUPPORT_2000TSOP
#define DOC_SUPPORT_MILLENNIUM
#ifdef DOC_SUPPORT_2000
@@ -34,7 +36,7 @@
#define DoC_is_2000(doc) (0)
#endif
-#ifdef DOC_SUPPORT_MILLENNIUM
+#if defined(DOC_SUPPORT_2000TSOP) || defined(DOC_SUPPORT_MILLENNIUM)
#define DoC_is_Millennium(doc) (doc->ChipID == DOC_ChipID_DocMil)
#else
#define DoC_is_Millennium(doc) (0)
@@ -54,9 +56,12 @@ static int doc_read(struct mtd_info *mtd, loff_t from, size_t len,
static int doc_write(struct mtd_info *mtd, loff_t to, size_t len,
size_t *retlen, const u_char *buf);
static int doc_read_ecc(struct mtd_info *mtd, loff_t from, size_t len,
- size_t *retlen, u_char *buf, u_char *eccbuf);
+ size_t *retlen, u_char *buf, u_char *eccbuf, struct nand_oobinfo *oobsel);
static int doc_write_ecc(struct mtd_info *mtd, loff_t to, size_t len,
- size_t *retlen, const u_char *buf, u_char *eccbuf);
+ size_t *retlen, const u_char *buf, u_char *eccbuf, struct nand_oobinfo *oobsel);
+static int doc_writev_ecc(struct mtd_info *mtd, const struct kvec *vecs,
+ unsigned long count, loff_t to, size_t *retlen,
+ u_char *eccbuf, struct nand_oobinfo *oobsel);
static int doc_read_oob(struct mtd_info *mtd, loff_t ofs, size_t len,
size_t *retlen, u_char *buf);
static int doc_write_oob(struct mtd_info *mtd, loff_t ofs, size_t len,
@@ -72,20 +77,20 @@ static void DoC_Delay(struct DiskOnChip *doc, unsigned short cycles)
{
volatile char dummy;
int i;
-
+
for (i = 0; i < cycles; i++) {
if (DoC_is_Millennium(doc))
dummy = ReadDOC(doc->virtadr, NOP);
else
dummy = ReadDOC(doc->virtadr, DOCStatus);
}
-
+
}
/* DOC_WaitReady: Wait for RDY line to be asserted by the flash chip */
static int _DoC_WaitReady(struct DiskOnChip *doc)
{
- unsigned long docptr = doc->virtadr;
+ void __iomem *docptr = doc->virtadr;
unsigned long timeo = jiffies + (HZ * 10);
DEBUG(MTD_DEBUG_LEVEL3,
@@ -93,16 +98,16 @@ static int _DoC_WaitReady(struct DiskOnChip *doc)
/* Out-of-line routine to wait for chip response */
while (!(ReadDOC(docptr, CDSNControl) & CDSN_CTRL_FR_B)) {
+ /* issue 2 read from NOP register after reading from CDSNControl register
+ see Software Requirement 11.4 item 2. */
+ DoC_Delay(doc, 2);
+
if (time_after(jiffies, timeo)) {
DEBUG(MTD_DEBUG_LEVEL2, "_DoC_WaitReady timed out.\n");
return -EIO;
}
- if (current->need_resched) {
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(1);
- }
- else
- udelay(1);
+ udelay(1);
+ cond_resched();
}
return 0;
@@ -110,7 +115,8 @@ static int _DoC_WaitReady(struct DiskOnChip *doc)
static inline int DoC_WaitReady(struct DiskOnChip *doc)
{
- unsigned long docptr = doc->virtadr;
+ void __iomem *docptr = doc->virtadr;
+
/* This is inline, to optimise the common case, where it's ready instantly */
int ret = 0;
@@ -136,7 +142,7 @@ static inline int DoC_WaitReady(struct DiskOnChip *doc)
static inline int DoC_Command(struct DiskOnChip *doc, unsigned char command,
unsigned char xtraflags)
{
- unsigned long docptr = doc->virtadr;
+ void __iomem *docptr = doc->virtadr;
if (DoC_is_2000(doc))
xtraflags |= CDSN_CTRL_FLASH_IO;
@@ -150,6 +156,8 @@ static inline int DoC_Command(struct DiskOnChip *doc, unsigned char command,
/* Send the command */
WriteDOC_(command, docptr, doc->ioreg);
+ if (DoC_is_Millennium(doc))
+ WriteDOC(command, docptr, WritePipeTerm);
/* Lower the CLE line */
WriteDOC(xtraflags | CDSN_CTRL_CE, docptr, CDSNControl);
@@ -166,10 +174,8 @@ static inline int DoC_Command(struct DiskOnChip *doc, unsigned char command,
static int DoC_Address(struct DiskOnChip *doc, int numbytes, unsigned long ofs,
unsigned char xtraflags1, unsigned char xtraflags2)
{
- unsigned long docptr;
int i;
-
- docptr = doc->virtadr;
+ void __iomem *docptr = doc->virtadr;
if (DoC_is_2000(doc))
xtraflags1 |= CDSN_CTRL_FLASH_IO;
@@ -211,9 +217,12 @@ static int DoC_Address(struct DiskOnChip *doc, int numbytes, unsigned long ofs,
}
}
+ if (DoC_is_Millennium(doc))
+ WriteDOC(ofs & 0xff, docptr, WritePipeTerm);
+
DoC_Delay(doc, 2); /* Needed for some slow flash chips. mf. */
-
- /* FIXME: The SlowIO's for millennium could be replaced by
+
+ /* FIXME: The SlowIO's for millennium could be replaced by
a single WritePipeTerm here. mf. */
/* Lower the ALE line */
@@ -231,11 +240,9 @@ static void DoC_ReadBuf(struct DiskOnChip *doc, u_char * buf, int len)
{
volatile int dummy;
int modulus = 0xffff;
- unsigned long docptr;
+ void __iomem *docptr = doc->virtadr;
int i;
- docptr = doc->virtadr;
-
if (len <= 0)
return;
@@ -262,11 +269,9 @@ static void DoC_ReadBuf(struct DiskOnChip *doc, u_char * buf, int len)
/* Write a buffer to DoC, taking care of Millennium odditys */
static void DoC_WriteBuf(struct DiskOnChip *doc, const u_char * buf, int len)
{
- unsigned long docptr;
+ void __iomem *docptr = doc->virtadr;
int i;
- docptr = doc->virtadr;
-
if (len <= 0)
return;
@@ -283,7 +288,7 @@ static void DoC_WriteBuf(struct DiskOnChip *doc, const u_char * buf, int len)
static inline int DoC_SelectChip(struct DiskOnChip *doc, int chip)
{
- unsigned long docptr = doc->virtadr;
+ void __iomem *docptr = doc->virtadr;
/* Software requirement 11.4.4 before writing DeviceSelect */
/* Deassert the CE line to eliminate glitches on the FCE# outputs */
@@ -307,7 +312,7 @@ static inline int DoC_SelectChip(struct DiskOnChip *doc, int chip)
static inline int DoC_SelectFloor(struct DiskOnChip *doc, int floor)
{
- unsigned long docptr = doc->virtadr;
+ void __iomem *docptr = doc->virtadr;
/* Select the floor (bank) of chips required */
WriteDOC(floor, docptr, FloorSelect);
@@ -320,7 +325,7 @@ static inline int DoC_SelectFloor(struct DiskOnChip *doc, int floor)
static int DoC_IdentChip(struct DiskOnChip *doc, int floor, int chip)
{
- int mfr, id, i;
+ int mfr, id, i, j;
volatile char dummy;
/* Page in the required floor/chip */
@@ -349,23 +354,33 @@ static int DoC_IdentChip(struct DiskOnChip *doc, int floor, int chip)
/* Read the manufacturer and device id codes from the device */
- /* CDSN Slow IO register see Software Requirement 11.4 item 5. */
- dummy = ReadDOC(doc->virtadr, CDSNSlowIO);
- DoC_Delay(doc, 2);
- mfr = ReadDOC_(doc->virtadr, doc->ioreg);
+ if (DoC_is_Millennium(doc)) {
+ DoC_Delay(doc, 2);
+ dummy = ReadDOC(doc->virtadr, ReadPipeInit);
+ mfr = ReadDOC(doc->virtadr, LastDataRead);
- /* CDSN Slow IO register see Software Requirement 11.4 item 5. */
- dummy = ReadDOC(doc->virtadr, CDSNSlowIO);
- DoC_Delay(doc, 2);
- id = ReadDOC_(doc->virtadr, doc->ioreg);
+ DoC_Delay(doc, 2);
+ dummy = ReadDOC(doc->virtadr, ReadPipeInit);
+ id = ReadDOC(doc->virtadr, LastDataRead);
+ } else {
+ /* CDSN Slow IO register see Software Req 11.4 item 5. */
+ dummy = ReadDOC(doc->virtadr, CDSNSlowIO);
+ DoC_Delay(doc, 2);
+ mfr = ReadDOC_(doc->virtadr, doc->ioreg);
+
+ /* CDSN Slow IO register see Software Req 11.4 item 5. */
+ dummy = ReadDOC(doc->virtadr, CDSNSlowIO);
+ DoC_Delay(doc, 2);
+ id = ReadDOC_(doc->virtadr, doc->ioreg);
+ }
/* No response - return failure */
if (mfr == 0xff || mfr == 0)
return 0;
- /* Check it's the same as the first chip we identified.
+ /* Check it's the same as the first chip we identified.
* M-Systems say that any given DiskOnChip device should only
- * contain _one_ type of flash part, although that's not a
+ * contain _one_ type of flash part, although that's not a
* hardware restriction. */
if (doc->mfr) {
if (doc->mfr == mfr && doc->id == id)
@@ -378,20 +393,23 @@ static int DoC_IdentChip(struct DiskOnChip *doc, int floor, int chip)
/* Print and store the manufacturer and ID codes. */
for (i = 0; nand_flash_ids[i].name != NULL; i++) {
- if (mfr == nand_flash_ids[i].manufacture_id &&
- id == nand_flash_ids[i].model_id) {
+ if (id == nand_flash_ids[i].id) {
+ /* Try to identify manufacturer */
+ for (j = 0; nand_manuf_ids[j].id != 0x0; j++) {
+ if (nand_manuf_ids[j].id == mfr)
+ break;
+ }
printk(KERN_INFO
"Flash chip found: Manufacturer ID: %2.2X, "
- "Chip ID: %2.2X (%s)\n", mfr, id,
- nand_flash_ids[i].name);
+ "Chip ID: %2.2X (%s:%s)\n", mfr, id,
+ nand_manuf_ids[j].name, nand_flash_ids[i].name);
if (!doc->mfr) {
doc->mfr = mfr;
doc->id = id;
doc->chipshift =
- nand_flash_ids[i].chipshift;
- doc->page256 = nand_flash_ids[i].page256;
- doc->pageadrlen =
- nand_flash_ids[i].pageadrlen;
+ ffs((nand_flash_ids[i].chipsize << 20)) - 1;
+ doc->page256 = (nand_flash_ids[i].pagesize == 256) ? 1 : 0;
+ doc->pageadrlen = doc->chipshift > 25 ? 3 : 2;
doc->erasesize =
nand_flash_ids[i].erasesize;
return 1;
@@ -411,20 +429,16 @@ static int DoC_IdentChip(struct DiskOnChip *doc, int floor, int chip)
/* DoC_ScanChips: Find all NAND chips present in a DiskOnChip, and identify them */
-static void DoC_ScanChips(struct DiskOnChip *this)
+static void DoC_ScanChips(struct DiskOnChip *this, int maxchips)
{
int floor, chip;
int numchips[MAX_FLOORS];
- int maxchips = MAX_CHIPS;
int ret = 1;
this->numchips = 0;
this->mfr = 0;
this->id = 0;
- if (DoC_is_Millennium(this))
- maxchips = MAX_CHIPS_MIL;
-
/* For each floor, find the number of valid chips it contains */
for (floor = 0; floor < MAX_FLOORS; floor++) {
ret = 1;
@@ -454,7 +468,7 @@ static void DoC_ScanChips(struct DiskOnChip *this)
ret = 0;
- /* Fill out the chip array with {floor, chipno} for each
+ /* Fill out the chip array with {floor, chipno} for each
* detected chip in the device. */
for (floor = 0; floor < MAX_FLOORS; floor++) {
for (chip = 0; chip < numchips[floor]; chip++) {
@@ -514,39 +528,54 @@ static const char im_name[] = "DoC2k_init";
*/
static void DoC2k_init(struct mtd_info *mtd)
{
- struct DiskOnChip *this = (struct DiskOnChip *) mtd->priv;
+ struct DiskOnChip *this = mtd->priv;
struct DiskOnChip *old = NULL;
+ int maxchips;
/* We must avoid being called twice for the same device. */
if (doc2klist)
- old = (struct DiskOnChip *) doc2klist->priv;
+ old = doc2klist->priv;
while (old) {
if (DoC2k_is_alias(old, this)) {
printk(KERN_NOTICE
"Ignoring DiskOnChip 2000 at 0x%lX - already configured\n",
this->physadr);
- iounmap((void *) this->virtadr);
+ iounmap(this->virtadr);
kfree(mtd);
return;
}
if (old->nextdoc)
- old = (struct DiskOnChip *) old->nextdoc->priv;
+ old = old->nextdoc->priv;
else
old = NULL;
}
switch (this->ChipID) {
+ case DOC_ChipID_Doc2kTSOP:
+ mtd->name = "DiskOnChip 2000 TSOP";
+ this->ioreg = DoC_Mil_CDSN_IO;
+ /* Pretend it's a Millennium */
+ this->ChipID = DOC_ChipID_DocMil;
+ maxchips = MAX_CHIPS;
+ break;
case DOC_ChipID_Doc2k:
mtd->name = "DiskOnChip 2000";
this->ioreg = DoC_2k_CDSN_IO;
+ maxchips = MAX_CHIPS;
break;
case DOC_ChipID_DocMil:
mtd->name = "DiskOnChip Millennium";
this->ioreg = DoC_Mil_CDSN_IO;
+ maxchips = MAX_CHIPS_MIL;
break;
+ default:
+ printk("Unknown ChipID 0x%02x\n", this->ChipID);
+ kfree(mtd);
+ iounmap(this->virtadr);
+ return;
}
printk(KERN_NOTICE "%s found at address 0x%lX\n", mtd->name,
@@ -554,11 +583,12 @@ static void DoC2k_init(struct mtd_info *mtd)
mtd->type = MTD_NANDFLASH;
mtd->flags = MTD_CAP_NANDFLASH;
+ mtd->ecctype = MTD_ECC_RS_DiskOnChip;
mtd->size = 0;
mtd->erasesize = 0;
mtd->oobblock = 512;
mtd->oobsize = 16;
- mtd->module = THIS_MODULE;
+ mtd->owner = THIS_MODULE;
mtd->erase = doc_erase;
mtd->point = NULL;
mtd->unpoint = NULL;
@@ -566,6 +596,7 @@ static void DoC2k_init(struct mtd_info *mtd)
mtd->write = doc_write;
mtd->read_ecc = doc_read_ecc;
mtd->write_ecc = doc_write_ecc;
+ mtd->writev_ecc = doc_writev_ecc;
mtd->read_oob = doc_read_oob;
mtd->write_oob = doc_write_oob;
mtd->sync = NULL;
@@ -575,14 +606,14 @@ static void DoC2k_init(struct mtd_info *mtd)
this->curfloor = -1;
this->curchip = -1;
- init_MUTEX(&this->lock);
+ mutex_init(&this->lock);
/* Ident all the chips present. */
- DoC_ScanChips(this);
+ DoC_ScanChips(this, maxchips);
if (!this->totlen) {
kfree(mtd);
- iounmap((void *) this->virtadr);
+ iounmap(this->virtadr);
} else {
this->nextdoc = doc2klist;
doc2klist = mtd;
@@ -597,146 +628,154 @@ static int doc_read(struct mtd_info *mtd, loff_t from, size_t len,
size_t * retlen, u_char * buf)
{
/* Just a special case of doc_read_ecc */
- return doc_read_ecc(mtd, from, len, retlen, buf, NULL);
+ return doc_read_ecc(mtd, from, len, retlen, buf, NULL, NULL);
}
static int doc_read_ecc(struct mtd_info *mtd, loff_t from, size_t len,
- size_t * retlen, u_char * buf, u_char * eccbuf)
+ size_t * retlen, u_char * buf, u_char * eccbuf, struct nand_oobinfo *oobsel)
{
- struct DiskOnChip *this = (struct DiskOnChip *) mtd->priv;
- unsigned long docptr;
+ struct DiskOnChip *this = mtd->priv;
+ void __iomem *docptr = this->virtadr;
struct Nand *mychip;
unsigned char syndrome[6];
volatile char dummy;
int i, len256 = 0, ret=0;
-
- docptr = this->virtadr;
+ size_t left = len;
/* Don't allow read past end of device */
if (from >= this->totlen)
return -EINVAL;
- down(&this->lock);
+ mutex_lock(&this->lock);
- /* Don't allow a single read to cross a 512-byte block boundary */
- if (from + len > ((from | 0x1ff) + 1))
- len = ((from | 0x1ff) + 1) - from;
+ *retlen = 0;
+ while (left) {
+ len = left;
- /* The ECC will not be calculated correctly if less than 512 is read */
- if (len != 0x200 && eccbuf)
- printk(KERN_WARNING
- "ECC needs a full sector read (adr: %lx size %lx)\n",
- (long) from, (long) len);
+ /* Don't allow a single read to cross a 512-byte block boundary */
+ if (from + len > ((from | 0x1ff) + 1))
+ len = ((from | 0x1ff) + 1) - from;
- /* printk("DoC_Read (adr: %lx size %lx)\n", (long) from, (long) len); */
+ /* The ECC will not be calculated correctly if less than 512 is read */
+ if (len != 0x200 && eccbuf)
+ printk(KERN_WARNING
+ "ECC needs a full sector read (adr: %lx size %lx)\n",
+ (long) from, (long) len);
+ /* printk("DoC_Read (adr: %lx size %lx)\n", (long) from, (long) len); */
- /* Find the chip which is to be used and select it */
- mychip = &this->chips[from >> (this->chipshift)];
- if (this->curfloor != mychip->floor) {
- DoC_SelectFloor(this, mychip->floor);
- DoC_SelectChip(this, mychip->chip);
- } else if (this->curchip != mychip->chip) {
- DoC_SelectChip(this, mychip->chip);
- }
-
- this->curfloor = mychip->floor;
- this->curchip = mychip->chip;
+ /* Find the chip which is to be used and select it */
+ mychip = &this->chips[from >> (this->chipshift)];
- DoC_Command(this,
- (!this->page256
- && (from & 0x100)) ? NAND_CMD_READ1 : NAND_CMD_READ0,
- CDSN_CTRL_WP);
- DoC_Address(this, ADDR_COLUMN_PAGE, from, CDSN_CTRL_WP,
- CDSN_CTRL_ECC_IO);
-
- if (eccbuf) {
- /* Prime the ECC engine */
- WriteDOC(DOC_ECC_RESET, docptr, ECCConf);
- WriteDOC(DOC_ECC_EN, docptr, ECCConf);
- } else {
- /* disable the ECC engine */
- WriteDOC(DOC_ECC_RESET, docptr, ECCConf);
- WriteDOC(DOC_ECC_DIS, docptr, ECCConf);
- }
-
- /* treat crossing 256-byte sector for 2M x 8bits devices */
- if (this->page256 && from + len > (from | 0xff) + 1) {
- len256 = (from | 0xff) + 1 - from;
- DoC_ReadBuf(this, buf, len256);
+ if (this->curfloor != mychip->floor) {
+ DoC_SelectFloor(this, mychip->floor);
+ DoC_SelectChip(this, mychip->chip);
+ } else if (this->curchip != mychip->chip) {
+ DoC_SelectChip(this, mychip->chip);
+ }
- DoC_Command(this, NAND_CMD_READ0, CDSN_CTRL_WP);
- DoC_Address(this, ADDR_COLUMN_PAGE, from + len256,
- CDSN_CTRL_WP, CDSN_CTRL_ECC_IO);
- }
+ this->curfloor = mychip->floor;
+ this->curchip = mychip->chip;
- DoC_ReadBuf(this, &buf[len256], len - len256);
+ DoC_Command(this,
+ (!this->page256
+ && (from & 0x100)) ? NAND_CMD_READ1 : NAND_CMD_READ0,
+ CDSN_CTRL_WP);
+ DoC_Address(this, ADDR_COLUMN_PAGE, from, CDSN_CTRL_WP,
+ CDSN_CTRL_ECC_IO);
- /* Let the caller know we completed it */
- *retlen = len;
+ if (eccbuf) {
+ /* Prime the ECC engine */
+ WriteDOC(DOC_ECC_RESET, docptr, ECCConf);
+ WriteDOC(DOC_ECC_EN, docptr, ECCConf);
+ } else {
+ /* disable the ECC engine */
+ WriteDOC(DOC_ECC_RESET, docptr, ECCConf);
+ WriteDOC(DOC_ECC_DIS, docptr, ECCConf);
+ }
- if (eccbuf) {
- /* Read the ECC data through the DiskOnChip ECC logic */
- /* Note: this will work even with 2M x 8bit devices as */
- /* they have 8 bytes of OOB per 256 page. mf. */
- DoC_ReadBuf(this, eccbuf, 6);
+ /* treat crossing 256-byte sector for 2M x 8bits devices */
+ if (this->page256 && from + len > (from | 0xff) + 1) {
+ len256 = (from | 0xff) + 1 - from;
+ DoC_ReadBuf(this, buf, len256);
- /* Flush the pipeline */
- if (DoC_is_Millennium(this)) {
- dummy = ReadDOC(docptr, ECCConf);
- dummy = ReadDOC(docptr, ECCConf);
- i = ReadDOC(docptr, ECCConf);
- } else {
- dummy = ReadDOC(docptr, 2k_ECCStatus);
- dummy = ReadDOC(docptr, 2k_ECCStatus);
- i = ReadDOC(docptr, 2k_ECCStatus);
+ DoC_Command(this, NAND_CMD_READ0, CDSN_CTRL_WP);
+ DoC_Address(this, ADDR_COLUMN_PAGE, from + len256,
+ CDSN_CTRL_WP, CDSN_CTRL_ECC_IO);
}
- /* Check the ECC Status */
- if (i & 0x80) {
- int nb_errors;
- /* There was an ECC error */
+ DoC_ReadBuf(this, &buf[len256], len - len256);
+
+ /* Let the caller know we completed it */
+ *retlen += len;
+
+ if (eccbuf) {
+ /* Read the ECC data through the DiskOnChip ECC logic */
+ /* Note: this will work even with 2M x 8bit devices as */
+ /* they have 8 bytes of OOB per 256 page. mf. */
+ DoC_ReadBuf(this, eccbuf, 6);
+
+ /* Flush the pipeline */
+ if (DoC_is_Millennium(this)) {
+ dummy = ReadDOC(docptr, ECCConf);
+ dummy = ReadDOC(docptr, ECCConf);
+ i = ReadDOC(docptr, ECCConf);
+ } else {
+ dummy = ReadDOC(docptr, 2k_ECCStatus);
+ dummy = ReadDOC(docptr, 2k_ECCStatus);
+ i = ReadDOC(docptr, 2k_ECCStatus);
+ }
+
+ /* Check the ECC Status */
+ if (i & 0x80) {
+ int nb_errors;
+ /* There was an ECC error */
#ifdef ECC_DEBUG
- printk(KERN_ERR "DiskOnChip ECC Error: Read at %lx\n", (long)from);
+ printk(KERN_ERR "DiskOnChip ECC Error: Read at %lx\n", (long)from);
#endif
- /* Read the ECC syndrom through the DiskOnChip ECC logic.
- These syndrome will be all ZERO when there is no error */
- for (i = 0; i < 6; i++) {
- syndrome[i] =
- ReadDOC(docptr, ECCSyndrome0 + i);
- }
- nb_errors = doc_decode_ecc(buf, syndrome);
+ /* Read the ECC syndrom through the DiskOnChip ECC logic.
+ These syndrome will be all ZERO when there is no error */
+ for (i = 0; i < 6; i++) {
+ syndrome[i] =
+ ReadDOC(docptr, ECCSyndrome0 + i);
+ }
+ nb_errors = doc_decode_ecc(buf, syndrome);
#ifdef ECC_DEBUG
- printk(KERN_ERR "Errors corrected: %x\n", nb_errors);
+ printk(KERN_ERR "Errors corrected: %x\n", nb_errors);
#endif
- if (nb_errors < 0) {
- /* We return error, but have actually done the read. Not that
- this can be told to user-space, via sys_read(), but at least
- MTD-aware stuff can know about it by checking *retlen */
- ret = -EIO;
- }
- }
+ if (nb_errors < 0) {
+ /* We return error, but have actually done the read. Not that
+ this can be told to user-space, via sys_read(), but at least
+ MTD-aware stuff can know about it by checking *retlen */
+ ret = -EIO;
+ }
+ }
#ifdef PSYCHO_DEBUG
- printk(KERN_DEBUG "ECC DATA at %lxB: %2.2X %2.2X %2.2X %2.2X %2.2X %2.2X\n",
- (long)from, eccbuf[0], eccbuf[1], eccbuf[2],
- eccbuf[3], eccbuf[4], eccbuf[5]);
+ printk(KERN_DEBUG "ECC DATA at %lxB: %2.2X %2.2X %2.2X %2.2X %2.2X %2.2X\n",
+ (long)from, eccbuf[0], eccbuf[1], eccbuf[2],
+ eccbuf[3], eccbuf[4], eccbuf[5]);
#endif
-
- /* disable the ECC engine */
- WriteDOC(DOC_ECC_DIS, docptr , ECCConf);
- }
- /* according to 11.4.1, we need to wait for the busy line
- * drop if we read to the end of the page. */
- if(0 == ((from + *retlen) & 0x1ff))
- {
- DoC_WaitReady(this);
+ /* disable the ECC engine */
+ WriteDOC(DOC_ECC_DIS, docptr , ECCConf);
+ }
+
+ /* according to 11.4.1, we need to wait for the busy line
+ * drop if we read to the end of the page. */
+ if(0 == ((from + len) & 0x1ff))
+ {
+ DoC_WaitReady(this);
+ }
+
+ from += len;
+ left -= len;
+ buf += len;
}
- up(&this->lock);
+ mutex_unlock(&this->lock);
return ret;
}
@@ -745,181 +784,261 @@ static int doc_write(struct mtd_info *mtd, loff_t to, size_t len,
size_t * retlen, const u_char * buf)
{
char eccbuf[6];
- return doc_write_ecc(mtd, to, len, retlen, buf, eccbuf);
+ return doc_write_ecc(mtd, to, len, retlen, buf, eccbuf, NULL);
}
static int doc_write_ecc(struct mtd_info *mtd, loff_t to, size_t len,
size_t * retlen, const u_char * buf,
- u_char * eccbuf)
+ u_char * eccbuf, struct nand_oobinfo *oobsel)
{
- struct DiskOnChip *this = (struct DiskOnChip *) mtd->priv;
+ struct DiskOnChip *this = mtd->priv;
int di; /* Yes, DI is a hangover from when I was disassembling the binary driver */
- unsigned long docptr;
+ void __iomem *docptr = this->virtadr;
volatile char dummy;
int len256 = 0;
struct Nand *mychip;
-
- docptr = this->virtadr;
+ size_t left = len;
+ int status;
/* Don't allow write past end of device */
if (to >= this->totlen)
return -EINVAL;
- down(&this->lock);
+ mutex_lock(&this->lock);
- /* Don't allow a single write to cross a 512-byte block boundary */
- if (to + len > ((to | 0x1ff) + 1))
- len = ((to | 0x1ff) + 1) - to;
+ *retlen = 0;
+ while (left) {
+ len = left;
- /* The ECC will not be calculated correctly if less than 512 is written */
- if (len != 0x200 && eccbuf)
- printk(KERN_WARNING
- "ECC needs a full sector write (adr: %lx size %lx)\n",
- (long) to, (long) len);
+ /* Don't allow a single write to cross a 512-byte block boundary */
+ if (to + len > ((to | 0x1ff) + 1))
+ len = ((to | 0x1ff) + 1) - to;
- /* printk("DoC_Write (adr: %lx size %lx)\n", (long) to, (long) len); */
+ /* The ECC will not be calculated correctly if less than 512 is written */
+/* DBB-
+ if (len != 0x200 && eccbuf)
+ printk(KERN_WARNING
+ "ECC needs a full sector write (adr: %lx size %lx)\n",
+ (long) to, (long) len);
+ -DBB */
- /* Find the chip which is to be used and select it */
- mychip = &this->chips[to >> (this->chipshift)];
+ /* printk("DoC_Write (adr: %lx size %lx)\n", (long) to, (long) len); */
- if (this->curfloor != mychip->floor) {
- DoC_SelectFloor(this, mychip->floor);
- DoC_SelectChip(this, mychip->chip);
- } else if (this->curchip != mychip->chip) {
- DoC_SelectChip(this, mychip->chip);
- }
+ /* Find the chip which is to be used and select it */
+ mychip = &this->chips[to >> (this->chipshift)];
- this->curfloor = mychip->floor;
- this->curchip = mychip->chip;
+ if (this->curfloor != mychip->floor) {
+ DoC_SelectFloor(this, mychip->floor);
+ DoC_SelectChip(this, mychip->chip);
+ } else if (this->curchip != mychip->chip) {
+ DoC_SelectChip(this, mychip->chip);
+ }
- /* Set device to main plane of flash */
- DoC_Command(this, NAND_CMD_RESET, CDSN_CTRL_WP);
- DoC_Command(this,
- (!this->page256
- && (to & 0x100)) ? NAND_CMD_READ1 : NAND_CMD_READ0,
- CDSN_CTRL_WP);
+ this->curfloor = mychip->floor;
+ this->curchip = mychip->chip;
- DoC_Command(this, NAND_CMD_SEQIN, 0);
- DoC_Address(this, ADDR_COLUMN_PAGE, to, 0, CDSN_CTRL_ECC_IO);
+ /* Set device to main plane of flash */
+ DoC_Command(this, NAND_CMD_RESET, CDSN_CTRL_WP);
+ DoC_Command(this,
+ (!this->page256
+ && (to & 0x100)) ? NAND_CMD_READ1 : NAND_CMD_READ0,
+ CDSN_CTRL_WP);
- if (eccbuf) {
- /* Prime the ECC engine */
- WriteDOC(DOC_ECC_RESET, docptr, ECCConf);
- WriteDOC(DOC_ECC_EN | DOC_ECC_RW, docptr, ECCConf);
- } else {
- /* disable the ECC engine */
- WriteDOC(DOC_ECC_RESET, docptr, ECCConf);
- WriteDOC(DOC_ECC_DIS, docptr, ECCConf);
- }
+ DoC_Command(this, NAND_CMD_SEQIN, 0);
+ DoC_Address(this, ADDR_COLUMN_PAGE, to, 0, CDSN_CTRL_ECC_IO);
- /* treat crossing 256-byte sector for 2M x 8bits devices */
- if (this->page256 && to + len > (to | 0xff) + 1) {
- len256 = (to | 0xff) + 1 - to;
- DoC_WriteBuf(this, buf, len256);
+ if (eccbuf) {
+ /* Prime the ECC engine */
+ WriteDOC(DOC_ECC_RESET, docptr, ECCConf);
+ WriteDOC(DOC_ECC_EN | DOC_ECC_RW, docptr, ECCConf);
+ } else {
+ /* disable the ECC engine */
+ WriteDOC(DOC_ECC_RESET, docptr, ECCConf);
+ WriteDOC(DOC_ECC_DIS, docptr, ECCConf);
+ }
+
+ /* treat crossing 256-byte sector for 2M x 8bits devices */
+ if (this->page256 && to + len > (to | 0xff) + 1) {
+ len256 = (to | 0xff) + 1 - to;
+ DoC_WriteBuf(this, buf, len256);
+
+ DoC_Command(this, NAND_CMD_PAGEPROG, 0);
+
+ DoC_Command(this, NAND_CMD_STATUS, CDSN_CTRL_WP);
+ /* There's an implicit DoC_WaitReady() in DoC_Command */
+
+ dummy = ReadDOC(docptr, CDSNSlowIO);
+ DoC_Delay(this, 2);
+
+ if (ReadDOC_(docptr, this->ioreg) & 1) {
+ printk(KERN_ERR "Error programming flash\n");
+ /* Error in programming */
+ *retlen = 0;
+ mutex_unlock(&this->lock);
+ return -EIO;
+ }
+
+ DoC_Command(this, NAND_CMD_SEQIN, 0);
+ DoC_Address(this, ADDR_COLUMN_PAGE, to + len256, 0,
+ CDSN_CTRL_ECC_IO);
+ }
+
+ DoC_WriteBuf(this, &buf[len256], len - len256);
+
+ if (eccbuf) {
+ WriteDOC(CDSN_CTRL_ECC_IO | CDSN_CTRL_CE, docptr,
+ CDSNControl);
+
+ if (DoC_is_Millennium(this)) {
+ WriteDOC(0, docptr, NOP);
+ WriteDOC(0, docptr, NOP);
+ WriteDOC(0, docptr, NOP);
+ } else {
+ WriteDOC_(0, docptr, this->ioreg);
+ WriteDOC_(0, docptr, this->ioreg);
+ WriteDOC_(0, docptr, this->ioreg);
+ }
+
+ WriteDOC(CDSN_CTRL_ECC_IO | CDSN_CTRL_FLASH_IO | CDSN_CTRL_CE, docptr,
+ CDSNControl);
+
+ /* Read the ECC data through the DiskOnChip ECC logic */
+ for (di = 0; di < 6; di++) {
+ eccbuf[di] = ReadDOC(docptr, ECCSyndrome0 + di);
+ }
+
+ /* Reset the ECC engine */
+ WriteDOC(DOC_ECC_DIS, docptr, ECCConf);
+
+#ifdef PSYCHO_DEBUG
+ printk
+ ("OOB data at %lx is %2.2X %2.2X %2.2X %2.2X %2.2X %2.2X\n",
+ (long) to, eccbuf[0], eccbuf[1], eccbuf[2], eccbuf[3],
+ eccbuf[4], eccbuf[5]);
+#endif
+ }
DoC_Command(this, NAND_CMD_PAGEPROG, 0);
DoC_Command(this, NAND_CMD_STATUS, CDSN_CTRL_WP);
/* There's an implicit DoC_WaitReady() in DoC_Command */
- dummy = ReadDOC(docptr, CDSNSlowIO);
- DoC_Delay(this, 2);
+ if (DoC_is_Millennium(this)) {
+ ReadDOC(docptr, ReadPipeInit);
+ status = ReadDOC(docptr, LastDataRead);
+ } else {
+ dummy = ReadDOC(docptr, CDSNSlowIO);
+ DoC_Delay(this, 2);
+ status = ReadDOC_(docptr, this->ioreg);
+ }
- if (ReadDOC_(docptr, this->ioreg) & 1) {
+ if (status & 1) {
printk(KERN_ERR "Error programming flash\n");
/* Error in programming */
*retlen = 0;
- up(&this->lock);
+ mutex_unlock(&this->lock);
return -EIO;
}
- DoC_Command(this, NAND_CMD_SEQIN, 0);
- DoC_Address(this, ADDR_COLUMN_PAGE, to + len256, 0,
- CDSN_CTRL_ECC_IO);
- }
+ /* Let the caller know we completed it */
+ *retlen += len;
- DoC_WriteBuf(this, &buf[len256], len - len256);
+ if (eccbuf) {
+ unsigned char x[8];
+ size_t dummy;
+ int ret;
- if (eccbuf) {
- WriteDOC(CDSN_CTRL_ECC_IO | CDSN_CTRL_CE, docptr,
- CDSNControl);
+ /* Write the ECC data to flash */
+ for (di=0; di<6; di++)
+ x[di] = eccbuf[di];
- if (DoC_is_Millennium(this)) {
- WriteDOC(0, docptr, NOP);
- WriteDOC(0, docptr, NOP);
- WriteDOC(0, docptr, NOP);
- } else {
- WriteDOC_(0, docptr, this->ioreg);
- WriteDOC_(0, docptr, this->ioreg);
- WriteDOC_(0, docptr, this->ioreg);
- }
+ x[6]=0x55;
+ x[7]=0x55;
- /* Read the ECC data through the DiskOnChip ECC logic */
- for (di = 0; di < 6; di++) {
- eccbuf[di] = ReadDOC(docptr, ECCSyndrome0 + di);
+ ret = doc_write_oob_nolock(mtd, to, 8, &dummy, x);
+ if (ret) {
+ mutex_unlock(&this->lock);
+ return ret;
+ }
}
- /* Reset the ECC engine */
- WriteDOC(DOC_ECC_DIS, docptr, ECCConf);
-
-#ifdef PSYCHO_DEBUG
- printk
- ("OOB data at %lx is %2.2X %2.2X %2.2X %2.2X %2.2X %2.2X\n",
- (long) to, eccbuf[0], eccbuf[1], eccbuf[2], eccbuf[3],
- eccbuf[4], eccbuf[5]);
-#endif
+ to += len;
+ left -= len;
+ buf += len;
}
- DoC_Command(this, NAND_CMD_PAGEPROG, 0);
+ mutex_unlock(&this->lock);
+ return 0;
+}
- DoC_Command(this, NAND_CMD_STATUS, CDSN_CTRL_WP);
- /* There's an implicit DoC_WaitReady() in DoC_Command */
+static int doc_writev_ecc(struct mtd_info *mtd, const struct kvec *vecs,
+ unsigned long count, loff_t to, size_t *retlen,
+ u_char *eccbuf, struct nand_oobinfo *oobsel)
+{
+ static char static_buf[512];
+ static DEFINE_MUTEX(writev_buf_mutex);
- dummy = ReadDOC(docptr, CDSNSlowIO);
- DoC_Delay(this, 2);
+ size_t totretlen = 0;
+ size_t thisvecofs = 0;
+ int ret= 0;
- if (ReadDOC_(docptr, this->ioreg) & 1) {
- printk(KERN_ERR "Error programming flash\n");
- /* Error in programming */
- *retlen = 0;
- up(&this->lock);
- return -EIO;
- }
+ mutex_lock(&writev_buf_mutex);
- /* Let the caller know we completed it */
- *retlen = len;
-
- if (eccbuf) {
- unsigned char x[8];
- size_t dummy;
- int ret;
-
- /* Write the ECC data to flash */
- for (di=0; di<6; di++)
- x[di] = eccbuf[di];
-
- x[6]=0x55;
- x[7]=0x55;
-
- ret = doc_write_oob_nolock(mtd, to, 8, &dummy, x);
- up(&this->lock);
- return ret;
+ while(count) {
+ size_t thislen, thisretlen;
+ unsigned char *buf;
+
+ buf = vecs->iov_base + thisvecofs;
+ thislen = vecs->iov_len - thisvecofs;
+
+
+ if (thislen >= 512) {
+ thislen = thislen & ~(512-1);
+ thisvecofs += thislen;
+ } else {
+ /* Not enough to fill a page. Copy into buf */
+ memcpy(static_buf, buf, thislen);
+ buf = &static_buf[thislen];
+
+ while(count && thislen < 512) {
+ vecs++;
+ count--;
+ thisvecofs = min((512-thislen), vecs->iov_len);
+ memcpy(buf, vecs->iov_base, thisvecofs);
+ thislen += thisvecofs;
+ buf += thisvecofs;
+ }
+ buf = static_buf;
+ }
+ if (count && thisvecofs == vecs->iov_len) {
+ thisvecofs = 0;
+ vecs++;
+ count--;
+ }
+ ret = doc_write_ecc(mtd, to, thislen, &thisretlen, buf, eccbuf, oobsel);
+
+ totretlen += thisretlen;
+
+ if (ret || thisretlen != thislen)
+ break;
+
+ to += thislen;
}
- up(&this->lock);
- return 0;
+
+ mutex_unlock(&writev_buf_mutex);
+ *retlen = totretlen;
+ return ret;
}
+
static int doc_read_oob(struct mtd_info *mtd, loff_t ofs, size_t len,
size_t * retlen, u_char * buf)
{
- struct DiskOnChip *this = (struct DiskOnChip *) mtd->priv;
+ struct DiskOnChip *this = mtd->priv;
int len256 = 0, ret;
- unsigned long docptr;
struct Nand *mychip;
- down(&this->lock);
-
- docptr = this->virtadr;
+ mutex_lock(&this->lock);
mychip = &this->chips[ofs >> this->chipshift];
@@ -962,10 +1081,10 @@ static int doc_read_oob(struct mtd_info *mtd, loff_t ofs, size_t len,
/* Reading the full OOB data drops us off of the end of the page,
* causing the flash device to go into busy mode, so we need
* to wait until ready 11.4.1 and Toshiba TC58256FT docs */
-
+
ret = DoC_WaitReady(this);
- up(&this->lock);
+ mutex_unlock(&this->lock);
return ret;
}
@@ -973,11 +1092,12 @@ static int doc_read_oob(struct mtd_info *mtd, loff_t ofs, size_t len,
static int doc_write_oob_nolock(struct mtd_info *mtd, loff_t ofs, size_t len,
size_t * retlen, const u_char * buf)
{
- struct DiskOnChip *this = (struct DiskOnChip *) mtd->priv;
+ struct DiskOnChip *this = mtd->priv;
int len256 = 0;
- unsigned long docptr = this->virtadr;
+ void __iomem *docptr = this->virtadr;
struct Nand *mychip = &this->chips[ofs >> this->chipshift];
volatile int dummy;
+ int status;
// printk("doc_write_oob(%lx, %d): %2.2X %2.2X %2.2X %2.2X ... %2.2X %2.2X .. %2.2X %2.2X\n",(long)ofs, len,
// buf[0], buf[1], buf[2], buf[3], buf[8], buf[9], buf[14],buf[15]);
@@ -1026,10 +1146,16 @@ static int doc_write_oob_nolock(struct mtd_info *mtd, loff_t ofs, size_t len,
DoC_Command(this, NAND_CMD_STATUS, 0);
/* DoC_WaitReady() is implicit in DoC_Command */
- dummy = ReadDOC(docptr, CDSNSlowIO);
- DoC_Delay(this, 2);
+ if (DoC_is_Millennium(this)) {
+ ReadDOC(docptr, ReadPipeInit);
+ status = ReadDOC(docptr, LastDataRead);
+ } else {
+ dummy = ReadDOC(docptr, CDSNSlowIO);
+ DoC_Delay(this, 2);
+ status = ReadDOC_(docptr, this->ioreg);
+ }
- if (ReadDOC_(docptr, this->ioreg) & 1) {
+ if (status & 1) {
printk(KERN_ERR "Error programming oob data\n");
/* There was an error */
*retlen = 0;
@@ -1045,10 +1171,16 @@ static int doc_write_oob_nolock(struct mtd_info *mtd, loff_t ofs, size_t len,
DoC_Command(this, NAND_CMD_STATUS, 0);
/* DoC_WaitReady() is implicit in DoC_Command */
- dummy = ReadDOC(docptr, CDSNSlowIO);
- DoC_Delay(this, 2);
+ if (DoC_is_Millennium(this)) {
+ ReadDOC(docptr, ReadPipeInit);
+ status = ReadDOC(docptr, LastDataRead);
+ } else {
+ dummy = ReadDOC(docptr, CDSNSlowIO);
+ DoC_Delay(this, 2);
+ status = ReadDOC_(docptr, this->ioreg);
+ }
- if (ReadDOC_(docptr, this->ioreg) & 1) {
+ if (status & 1) {
printk(KERN_ERR "Error programming oob data\n");
/* There was an error */
*retlen = 0;
@@ -1059,39 +1191,38 @@ static int doc_write_oob_nolock(struct mtd_info *mtd, loff_t ofs, size_t len,
return 0;
}
-
+
static int doc_write_oob(struct mtd_info *mtd, loff_t ofs, size_t len,
size_t * retlen, const u_char * buf)
{
- struct DiskOnChip *this = (struct DiskOnChip *) mtd->priv;
+ struct DiskOnChip *this = mtd->priv;
int ret;
- down(&this->lock);
+ mutex_lock(&this->lock);
ret = doc_write_oob_nolock(mtd, ofs, len, retlen, buf);
- up(&this->lock);
+ mutex_unlock(&this->lock);
return ret;
}
static int doc_erase(struct mtd_info *mtd, struct erase_info *instr)
{
- struct DiskOnChip *this = (struct DiskOnChip *) mtd->priv;
+ struct DiskOnChip *this = mtd->priv;
__u32 ofs = instr->addr;
__u32 len = instr->len;
volatile int dummy;
- unsigned long docptr;
+ void __iomem *docptr = this->virtadr;
struct Nand *mychip;
+ int status;
- down(&this->lock);
+ mutex_lock(&this->lock);
if (ofs & (mtd->erasesize-1) || len & (mtd->erasesize-1)) {
- up(&this->lock);
+ mutex_unlock(&this->lock);
return -EINVAL;
}
instr->state = MTD_ERASING;
-
- docptr = this->virtadr;
/* FIXME: Do this in the background. Use timers or schedule_task() */
while(len) {
@@ -1112,10 +1243,16 @@ static int doc_erase(struct mtd_info *mtd, struct erase_info *instr)
DoC_Command(this, NAND_CMD_STATUS, CDSN_CTRL_WP);
- dummy = ReadDOC(docptr, CDSNSlowIO);
- DoC_Delay(this, 2);
-
- if (ReadDOC_(docptr, this->ioreg) & 1) {
+ if (DoC_is_Millennium(this)) {
+ ReadDOC(docptr, ReadPipeInit);
+ status = ReadDOC(docptr, LastDataRead);
+ } else {
+ dummy = ReadDOC(docptr, CDSNSlowIO);
+ DoC_Delay(this, 2);
+ status = ReadDOC_(docptr, this->ioreg);
+ }
+
+ if (status & 1) {
printk(KERN_ERR "Error erasing at 0x%x\n", ofs);
/* There was an error */
instr->state = MTD_ERASE_FAILED;
@@ -1127,10 +1264,9 @@ static int doc_erase(struct mtd_info *mtd, struct erase_info *instr)
instr->state = MTD_ERASE_DONE;
callback:
- if (instr->callback)
- instr->callback(instr);
+ mtd_erase_callback(instr);
- up(&this->lock);
+ mutex_unlock(&this->lock);
return 0;
}
@@ -1141,7 +1277,7 @@ static int doc_erase(struct mtd_info *mtd, struct erase_info *instr)
*
****************************************************************************/
-int __init init_doc2000(void)
+static int __init init_doc2000(void)
{
inter_module_register(im_name, THIS_MODULE, &DoC2k_init);
return 0;
@@ -1153,12 +1289,12 @@ static void __exit cleanup_doc2000(void)
struct DiskOnChip *this;
while ((mtd = doc2klist)) {
- this = (struct DiskOnChip *) mtd->priv;
+ this = mtd->priv;
doc2klist = this->nextdoc;
del_mtd_device(mtd);
- iounmap((void *) this->virtadr);
+ iounmap(this->virtadr);
kfree(this->chips);
kfree(mtd);
}
diff --git a/linux-2.4.x/drivers/mtd/devices/doc2001.c b/linux-2.4.x/drivers/mtd/devices/doc2001.c
index b34591a..fcb28a6 100644
--- a/linux-2.4.x/drivers/mtd/devices/doc2001.c
+++ b/linux-2.4.x/drivers/mtd/devices/doc2001.c
@@ -4,7 +4,7 @@
* (c) 1999 Machine Vision Holdings, Inc.
* (c) 1999, 2000 David Woodhouse <dwmw2@infradead.org>
*
- * $Id: doc2001.c,v 1.35 2001/10/02 15:05:13 dwmw2 Exp $
+ * $Id: doc2001.c,v 1.49 2005/11/07 11:14:24 gleixner Exp $
*/
#include <linux/kernel.h>
@@ -19,10 +19,10 @@
#include <linux/sched.h>
#include <linux/init.h>
#include <linux/types.h>
+#include <linux/bitops.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/nand.h>
-#include <linux/mtd/nand_ids.h>
#include <linux/mtd/doc2000.h>
/* #define ECC_DEBUG */
@@ -38,9 +38,11 @@ static int doc_read(struct mtd_info *mtd, loff_t from, size_t len,
static int doc_write(struct mtd_info *mtd, loff_t to, size_t len,
size_t *retlen, const u_char *buf);
static int doc_read_ecc(struct mtd_info *mtd, loff_t from, size_t len,
- size_t *retlen, u_char *buf, u_char *eccbuf);
+ size_t *retlen, u_char *buf, u_char *eccbuf,
+ struct nand_oobinfo *oobsel);
static int doc_write_ecc(struct mtd_info *mtd, loff_t to, size_t len,
- size_t *retlen, const u_char *buf, u_char *eccbuf);
+ size_t *retlen, const u_char *buf, u_char *eccbuf,
+ struct nand_oobinfo *oobsel);
static int doc_read_oob(struct mtd_info *mtd, loff_t ofs, size_t len,
size_t *retlen, u_char *buf);
static int doc_write_oob(struct mtd_info *mtd, loff_t ofs, size_t len,
@@ -50,7 +52,7 @@ static int doc_erase (struct mtd_info *mtd, struct erase_info *instr);
static struct mtd_info *docmillist = NULL;
/* Perform the required delay cycles by reading from the NOP register */
-static void DoC_Delay(unsigned long docptr, unsigned short cycles)
+static void DoC_Delay(void __iomem * docptr, unsigned short cycles)
{
volatile char dummy;
int i;
@@ -60,7 +62,7 @@ static void DoC_Delay(unsigned long docptr, unsigned short cycles)
}
/* DOC_WaitReady: Wait for RDY line to be asserted by the flash chip */
-static int _DoC_WaitReady(unsigned long docptr)
+static int _DoC_WaitReady(void __iomem * docptr)
{
unsigned short c = 0xffff;
@@ -77,7 +79,7 @@ static int _DoC_WaitReady(unsigned long docptr)
return (c == 0);
}
-static inline int DoC_WaitReady(unsigned long docptr)
+static inline int DoC_WaitReady(void __iomem * docptr)
{
/* This is inline, to optimise the common case, where it's ready instantly */
int ret = 0;
@@ -101,7 +103,7 @@ static inline int DoC_WaitReady(unsigned long docptr)
with the internal pipeline. Each of 4 delay cycles (read from the NOP register) is
required after writing to CDSN Control register, see Software Requirement 11.4 item 3. */
-static inline void DoC_Command(unsigned long docptr, unsigned char command,
+static inline void DoC_Command(void __iomem * docptr, unsigned char command,
unsigned char xtraflags)
{
/* Assert the CLE (Command Latch Enable) line to the flash chip */
@@ -121,7 +123,7 @@ static inline void DoC_Command(unsigned long docptr, unsigned char command,
with the internal pipeline. Each of 4 delay cycles (read from the NOP register) is
required after writing to CDSN Control register, see Software Requirement 11.4 item 3. */
-static inline void DoC_Address(unsigned long docptr, int numbytes, unsigned long ofs,
+static inline void DoC_Address(void __iomem * docptr, int numbytes, unsigned long ofs,
unsigned char xtraflags1, unsigned char xtraflags2)
{
/* Assert the ALE (Address Latch Enable) line to the flash chip */
@@ -159,7 +161,7 @@ static inline void DoC_Address(unsigned long docptr, int numbytes, unsigned long
}
/* DoC_SelectChip: Select a given flash chip within the current floor */
-static int DoC_SelectChip(unsigned long docptr, int chip)
+static int DoC_SelectChip(void __iomem * docptr, int chip)
{
/* Select the individual flash chip requested */
WriteDOC(chip, docptr, CDSNDeviceSelect);
@@ -170,7 +172,7 @@ static int DoC_SelectChip(unsigned long docptr, int chip)
}
/* DoC_SelectFloor: Select a given floor (bank of flash chips) */
-static int DoC_SelectFloor(unsigned long docptr, int floor)
+static int DoC_SelectFloor(void __iomem * docptr, int floor)
{
/* Select the floor (bank) of chips required */
WriteDOC(floor, docptr, FloorSelect);
@@ -182,7 +184,7 @@ static int DoC_SelectFloor(unsigned long docptr, int floor)
/* DoC_IdentChip: Identify a given NAND chip given {floor,chip} */
static int DoC_IdentChip(struct DiskOnChip *doc, int floor, int chip)
{
- int mfr, id, i;
+ int mfr, id, i, j;
volatile char dummy;
/* Page in the required floor/chip
@@ -194,10 +196,10 @@ static int DoC_IdentChip(struct DiskOnChip *doc, int floor, int chip)
DoC_Command(doc->virtadr, NAND_CMD_RESET, CDSN_CTRL_WP);
DoC_WaitReady(doc->virtadr);
- /* Read the NAND chip ID: 1. Send ReadID command */
+ /* Read the NAND chip ID: 1. Send ReadID command */
DoC_Command(doc->virtadr, NAND_CMD_READID, CDSN_CTRL_WP);
- /* Read the NAND chip ID: 2. Send address byte zero */
+ /* Read the NAND chip ID: 2. Send address byte zero */
DoC_Address(doc->virtadr, 1, 0x00, CDSN_CTRL_WP, 0x00);
/* Read the manufacturer and device id codes of the flash device through
@@ -216,14 +218,18 @@ static int DoC_IdentChip(struct DiskOnChip *doc, int floor, int chip)
/* FIXME: to deal with multi-flash on multi-Millennium case more carefully */
for (i = 0; nand_flash_ids[i].name != NULL; i++) {
- if (mfr == nand_flash_ids[i].manufacture_id &&
- id == nand_flash_ids[i].model_id) {
+ if ( id == nand_flash_ids[i].id) {
+ /* Try to identify manufacturer */
+ for (j = 0; nand_manuf_ids[j].id != 0x0; j++) {
+ if (nand_manuf_ids[j].id == mfr)
+ break;
+ }
printk(KERN_INFO "Flash chip found: Manufacturer ID: %2.2X, "
- "Chip ID: %2.2X (%s)\n",
- mfr, id, nand_flash_ids[i].name);
+ "Chip ID: %2.2X (%s:%s)\n",
+ mfr, id, nand_manuf_ids[j].name, nand_flash_ids[i].name);
doc->mfr = mfr;
doc->id = id;
- doc->chipshift = nand_flash_ids[i].chipshift;
+ doc->chipshift = ffs((nand_flash_ids[i].chipsize << 20)) - 1;
break;
}
}
@@ -269,7 +275,7 @@ static void DoC_ScanChips(struct DiskOnChip *this)
return;
}
- /* Fill out the chip array with {floor, chipno} for each
+ /* Fill out the chip array with {floor, chipno} for each
* detected chip in the device. */
for (floor = 0, ret = 0; floor < MAX_FLOORS_MIL; floor++) {
for (chip = 0 ; chip < numchips[floor] ; chip++) {
@@ -303,7 +309,7 @@ static int DoCMil_is_alias(struct DiskOnChip *doc1, struct DiskOnChip *doc2)
tmp2 = ReadDOC(doc2->virtadr, AliasResolution);
if (tmp1 != tmp2)
return 0;
-
+
WriteDOC((tmp1+1) % 0xff, doc1->virtadr, AliasResolution);
tmp2 = ReadDOC(doc2->virtadr, AliasResolution);
if (tmp2 == (tmp1+1) % 0xff)
@@ -329,23 +335,23 @@ static const char im_name[] = "DoCMil_init";
*/
static void DoCMil_init(struct mtd_info *mtd)
{
- struct DiskOnChip *this = (struct DiskOnChip *)mtd->priv;
+ struct DiskOnChip *this = mtd->priv;
struct DiskOnChip *old = NULL;
/* We must avoid being called twice for the same device. */
if (docmillist)
- old = (struct DiskOnChip *)docmillist->priv;
+ old = docmillist->priv;
while (old) {
if (DoCMil_is_alias(this, old)) {
printk(KERN_NOTICE "Ignoring DiskOnChip Millennium at "
"0x%lX - already configured\n", this->physadr);
- iounmap((void *)this->virtadr);
+ iounmap(this->virtadr);
kfree(mtd);
return;
}
if (old->nextdoc)
- old = (struct DiskOnChip *)old->nextdoc->priv;
+ old = old->nextdoc->priv;
else
old = NULL;
}
@@ -356,14 +362,15 @@ static void DoCMil_init(struct mtd_info *mtd)
mtd->type = MTD_NANDFLASH;
mtd->flags = MTD_CAP_NANDFLASH;
+ mtd->ecctype = MTD_ECC_RS_DiskOnChip;
mtd->size = 0;
- /* FIXME: erase size is not always 8kB */
+ /* FIXME: erase size is not always 8KiB */
mtd->erasesize = 0x2000;
mtd->oobblock = 512;
mtd->oobsize = 16;
- mtd->module = THIS_MODULE;
+ mtd->owner = THIS_MODULE;
mtd->erase = doc_erase;
mtd->point = NULL;
mtd->unpoint = NULL;
@@ -385,7 +392,7 @@ static void DoCMil_init(struct mtd_info *mtd)
if (!this->totlen) {
kfree(mtd);
- iounmap((void *)this->virtadr);
+ iounmap(this->virtadr);
} else {
this->nextdoc = docmillist;
docmillist = mtd;
@@ -399,17 +406,18 @@ static int doc_read (struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, u_char *buf)
{
/* Just a special case of doc_read_ecc */
- return doc_read_ecc(mtd, from, len, retlen, buf, NULL);
+ return doc_read_ecc(mtd, from, len, retlen, buf, NULL, NULL);
}
static int doc_read_ecc (struct mtd_info *mtd, loff_t from, size_t len,
- size_t *retlen, u_char *buf, u_char *eccbuf)
+ size_t *retlen, u_char *buf, u_char *eccbuf,
+ struct nand_oobinfo *oobsel)
{
int i, ret;
volatile char dummy;
unsigned char syndrome[6];
- struct DiskOnChip *this = (struct DiskOnChip *)mtd->priv;
- unsigned long docptr = this->virtadr;
+ struct DiskOnChip *this = mtd->priv;
+ void __iomem *docptr = this->virtadr;
struct Nand *mychip = &this->chips[from >> (this->chipshift)];
/* Don't allow read past end of device */
@@ -417,7 +425,7 @@ static int doc_read_ecc (struct mtd_info *mtd, loff_t from, size_t len,
return -EINVAL;
/* Don't allow a single read to cross a 512-byte block boundary */
- if (from + len > ((from | 0x1ff) + 1))
+ if (from + len > ((from | 0x1ff) + 1))
len = ((from | 0x1ff) + 1) - from;
/* Find the chip which is to be used and select it */
@@ -525,16 +533,17 @@ static int doc_write (struct mtd_info *mtd, loff_t to, size_t len,
size_t *retlen, const u_char *buf)
{
char eccbuf[6];
- return doc_write_ecc(mtd, to, len, retlen, buf, eccbuf);
+ return doc_write_ecc(mtd, to, len, retlen, buf, eccbuf, NULL);
}
static int doc_write_ecc (struct mtd_info *mtd, loff_t to, size_t len,
- size_t *retlen, const u_char *buf, u_char *eccbuf)
+ size_t *retlen, const u_char *buf, u_char *eccbuf,
+ struct nand_oobinfo *oobsel)
{
int i,ret = 0;
volatile char dummy;
- struct DiskOnChip *this = (struct DiskOnChip *)mtd->priv;
- unsigned long docptr = this->virtadr;
+ struct DiskOnChip *this = mtd->priv;
+ void __iomem *docptr = this->virtadr;
struct Nand *mychip = &this->chips[to >> (this->chipshift)];
/* Don't allow write past end of device */
@@ -543,7 +552,7 @@ static int doc_write_ecc (struct mtd_info *mtd, loff_t to, size_t len,
#if 0
/* Don't allow a single write to cross a 512-byte block boundary */
- if (to + len > ( (to | 0x1ff) + 1))
+ if (to + len > ( (to | 0x1ff) + 1))
len = ((to | 0x1ff) + 1) - to;
#else
/* Don't allow writes which aren't exactly one block */
@@ -623,7 +632,7 @@ static int doc_write_ecc (struct mtd_info *mtd, loff_t to, size_t len,
/* write the block status BLOCK_USED (0x5555) at the end of ECC data
FIXME: this is only a hack for programming the IPL area for LinuxBIOS
- and should be replace with proper codes in user space utilities */
+ and should be replace with proper codes in user space utilities */
WriteDOC(0x55, docptr, Mil_CDSN_IO);
WriteDOC(0x55, docptr, Mil_CDSN_IO + 1);
@@ -668,8 +677,8 @@ static int doc_read_oob(struct mtd_info *mtd, loff_t ofs, size_t len,
int i;
#endif
volatile char dummy;
- struct DiskOnChip *this = (struct DiskOnChip *)mtd->priv;
- unsigned long docptr = this->virtadr;
+ struct DiskOnChip *this = mtd->priv;
+ void __iomem *docptr = this->virtadr;
struct Nand *mychip = &this->chips[ofs >> this->chipshift];
/* Find the chip which is to be used and select it */
@@ -720,8 +729,8 @@ static int doc_write_oob(struct mtd_info *mtd, loff_t ofs, size_t len,
#endif
volatile char dummy;
int ret = 0;
- struct DiskOnChip *this = (struct DiskOnChip *)mtd->priv;
- unsigned long docptr = this->virtadr;
+ struct DiskOnChip *this = mtd->priv;
+ void __iomem *docptr = this->virtadr;
struct Nand *mychip = &this->chips[ofs >> this->chipshift];
/* Find the chip which is to be used and select it */
@@ -787,13 +796,13 @@ static int doc_write_oob(struct mtd_info *mtd, loff_t ofs, size_t len,
int doc_erase (struct mtd_info *mtd, struct erase_info *instr)
{
volatile char dummy;
- struct DiskOnChip *this = (struct DiskOnChip *)mtd->priv;
+ struct DiskOnChip *this = mtd->priv;
__u32 ofs = instr->addr;
__u32 len = instr->len;
- unsigned long docptr = this->virtadr;
+ void __iomem *docptr = this->virtadr;
struct Nand *mychip = &this->chips[ofs >> this->chipshift];
- if (len != mtd->erasesize)
+ if (len != mtd->erasesize)
printk(KERN_WARNING "Erase not right size (%x != %x)n",
len, mtd->erasesize);
@@ -836,8 +845,7 @@ int doc_erase (struct mtd_info *mtd, struct erase_info *instr)
instr->state = MTD_ERASE_DONE;
dummy = ReadDOC(docptr, LastDataRead);
- if (instr->callback)
- instr->callback(instr);
+ mtd_erase_callback(instr);
return 0;
}
@@ -848,7 +856,7 @@ int doc_erase (struct mtd_info *mtd, struct erase_info *instr)
*
****************************************************************************/
-int __init init_doc2001(void)
+static int __init init_doc2001(void)
{
inter_module_register(im_name, THIS_MODULE, &DoCMil_init);
return 0;
@@ -860,12 +868,12 @@ static void __exit cleanup_doc2001(void)
struct DiskOnChip *this;
while ((mtd=docmillist)) {
- this = (struct DiskOnChip *)mtd->priv;
+ this = mtd->priv;
docmillist = this->nextdoc;
-
+
del_mtd_device(mtd);
-
- iounmap((void *)this->virtadr);
+
+ iounmap(this->virtadr);
kfree(this->chips);
kfree(mtd);
}
diff --git a/linux-2.4.x/drivers/mtd/devices/doc2001plus.c b/linux-2.4.x/drivers/mtd/devices/doc2001plus.c
new file mode 100644
index 0000000..0595cc7
--- /dev/null
+++ b/linux-2.4.x/drivers/mtd/devices/doc2001plus.c
@@ -0,0 +1,1154 @@
+/*
+ * Linux driver for Disk-On-Chip Millennium Plus
+ *
+ * (c) 2002-2003 Greg Ungerer <gerg@snapgear.com>
+ * (c) 2002-2003 SnapGear Inc
+ * (c) 1999 Machine Vision Holdings, Inc.
+ * (c) 1999, 2000 David Woodhouse <dwmw2@infradead.org>
+ *
+ * $Id: doc2001plus.c,v 1.14 2005/11/07 11:14:24 gleixner Exp $
+ *
+ * Released under GPL
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <asm/errno.h>
+#include <asm/io.h>
+#include <asm/uaccess.h>
+#include <linux/miscdevice.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/bitops.h>
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/doc2000.h>
+
+/* #define ECC_DEBUG */
+
+/* I have no idea why some DoC chips can not use memcop_form|to_io().
+ * This may be due to the different revisions of the ASIC controller built-in or
+ * simplily a QA/Bug issue. Who knows ?? If you have trouble, please uncomment
+ * this:*/
+#undef USE_MEMCPY
+
+static int doc_read(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, u_char *buf);
+static int doc_write(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t *retlen, const u_char *buf);
+static int doc_read_ecc(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, u_char *buf, u_char *eccbuf,
+ struct nand_oobinfo *oobsel);
+static int doc_write_ecc(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t *retlen, const u_char *buf, u_char *eccbuf,
+ struct nand_oobinfo *oobsel);
+static int doc_read_oob(struct mtd_info *mtd, loff_t ofs, size_t len,
+ size_t *retlen, u_char *buf);
+static int doc_write_oob(struct mtd_info *mtd, loff_t ofs, size_t len,
+ size_t *retlen, const u_char *buf);
+static int doc_erase (struct mtd_info *mtd, struct erase_info *instr);
+
+static struct mtd_info *docmilpluslist = NULL;
+
+
+/* Perform the required delay cycles by writing to the NOP register */
+static void DoC_Delay(void __iomem * docptr, int cycles)
+{
+ int i;
+
+ for (i = 0; (i < cycles); i++)
+ WriteDOC(0, docptr, Mplus_NOP);
+}
+
+#define CDSN_CTRL_FR_B_MASK (CDSN_CTRL_FR_B0 | CDSN_CTRL_FR_B1)
+
+/* DOC_WaitReady: Wait for RDY line to be asserted by the flash chip */
+static int _DoC_WaitReady(void __iomem * docptr)
+{
+ unsigned int c = 0xffff;
+
+ DEBUG(MTD_DEBUG_LEVEL3,
+ "_DoC_WaitReady called for out-of-line wait\n");
+
+ /* Out-of-line routine to wait for chip response */
+ while (((ReadDOC(docptr, Mplus_FlashControl) & CDSN_CTRL_FR_B_MASK) != CDSN_CTRL_FR_B_MASK) && --c)
+ ;
+
+ if (c == 0)
+ DEBUG(MTD_DEBUG_LEVEL2, "_DoC_WaitReady timed out.\n");
+
+ return (c == 0);
+}
+
+static inline int DoC_WaitReady(void __iomem * docptr)
+{
+ /* This is inline, to optimise the common case, where it's ready instantly */
+ int ret = 0;
+
+ /* read form NOP register should be issued prior to the read from CDSNControl
+ see Software Requirement 11.4 item 2. */
+ DoC_Delay(docptr, 4);
+
+ if ((ReadDOC(docptr, Mplus_FlashControl) & CDSN_CTRL_FR_B_MASK) != CDSN_CTRL_FR_B_MASK)
+ /* Call the out-of-line routine to wait */
+ ret = _DoC_WaitReady(docptr);
+
+ return ret;
+}
+
+/* For some reason the Millennium Plus seems to occassionally put itself
+ * into reset mode. For me this happens randomly, with no pattern that I
+ * can detect. M-systems suggest always check this on any block level
+ * operation and setting to normal mode if in reset mode.
+ */
+static inline void DoC_CheckASIC(void __iomem * docptr)
+{
+ /* Make sure the DoC is in normal mode */
+ if ((ReadDOC(docptr, Mplus_DOCControl) & DOC_MODE_NORMAL) == 0) {
+ WriteDOC((DOC_MODE_NORMAL | DOC_MODE_MDWREN), docptr, Mplus_DOCControl);
+ WriteDOC(~(DOC_MODE_NORMAL | DOC_MODE_MDWREN), docptr, Mplus_CtrlConfirm);
+ }
+}
+
+/* DoC_Command: Send a flash command to the flash chip through the Flash
+ * command register. Need 2 Write Pipeline Terminates to complete send.
+ */
+static inline void DoC_Command(void __iomem * docptr, unsigned char command,
+ unsigned char xtraflags)
+{
+ WriteDOC(command, docptr, Mplus_FlashCmd);
+ WriteDOC(command, docptr, Mplus_WritePipeTerm);
+ WriteDOC(command, docptr, Mplus_WritePipeTerm);
+}
+
+/* DoC_Address: Set the current address for the flash chip through the Flash
+ * Address register. Need 2 Write Pipeline Terminates to complete send.
+ */
+static inline void DoC_Address(struct DiskOnChip *doc, int numbytes,
+ unsigned long ofs, unsigned char xtraflags1,
+ unsigned char xtraflags2)
+{
+ void __iomem * docptr = doc->virtadr;
+
+ /* Allow for possible Mill Plus internal flash interleaving */
+ ofs >>= doc->interleave;
+
+ switch (numbytes) {
+ case 1:
+ /* Send single byte, bits 0-7. */
+ WriteDOC(ofs & 0xff, docptr, Mplus_FlashAddress);
+ break;
+ case 2:
+ /* Send bits 9-16 followed by 17-23 */
+ WriteDOC((ofs >> 9) & 0xff, docptr, Mplus_FlashAddress);
+ WriteDOC((ofs >> 17) & 0xff, docptr, Mplus_FlashAddress);
+ break;
+ case 3:
+ /* Send 0-7, 9-16, then 17-23 */
+ WriteDOC(ofs & 0xff, docptr, Mplus_FlashAddress);
+ WriteDOC((ofs >> 9) & 0xff, docptr, Mplus_FlashAddress);
+ WriteDOC((ofs >> 17) & 0xff, docptr, Mplus_FlashAddress);
+ break;
+ default:
+ return;
+ }
+
+ WriteDOC(0x00, docptr, Mplus_WritePipeTerm);
+ WriteDOC(0x00, docptr, Mplus_WritePipeTerm);
+}
+
+/* DoC_SelectChip: Select a given flash chip within the current floor */
+static int DoC_SelectChip(void __iomem * docptr, int chip)
+{
+ /* No choice for flash chip on Millennium Plus */
+ return 0;
+}
+
+/* DoC_SelectFloor: Select a given floor (bank of flash chips) */
+static int DoC_SelectFloor(void __iomem * docptr, int floor)
+{
+ WriteDOC((floor & 0x3), docptr, Mplus_DeviceSelect);
+ return 0;
+}
+
+/*
+ * Translate the given offset into the appropriate command and offset.
+ * This does the mapping using the 16bit interleave layout defined by
+ * M-Systems, and looks like this for a sector pair:
+ * +-----------+-------+-------+-------+--------------+---------+-----------+
+ * | 0 --- 511 |512-517|518-519|520-521| 522 --- 1033 |1034-1039|1040 - 1055|
+ * +-----------+-------+-------+-------+--------------+---------+-----------+
+ * | Data 0 | ECC 0 |Flags0 |Flags1 | Data 1 |ECC 1 | OOB 1 + 2 |
+ * +-----------+-------+-------+-------+--------------+---------+-----------+
+ */
+/* FIXME: This lives in INFTL not here. Other users of flash devices
+ may not want it */
+static unsigned int DoC_GetDataOffset(struct mtd_info *mtd, loff_t *from)
+{
+ struct DiskOnChip *this = mtd->priv;
+
+ if (this->interleave) {
+ unsigned int ofs = *from & 0x3ff;
+ unsigned int cmd;
+
+ if (ofs < 512) {
+ cmd = NAND_CMD_READ0;
+ ofs &= 0x1ff;
+ } else if (ofs < 1014) {
+ cmd = NAND_CMD_READ1;
+ ofs = (ofs & 0x1ff) + 10;
+ } else {
+ cmd = NAND_CMD_READOOB;
+ ofs = ofs - 1014;
+ }
+
+ *from = (*from & ~0x3ff) | ofs;
+ return cmd;
+ } else {
+ /* No interleave */
+ if ((*from) & 0x100)
+ return NAND_CMD_READ1;
+ return NAND_CMD_READ0;
+ }
+}
+
+static unsigned int DoC_GetECCOffset(struct mtd_info *mtd, loff_t *from)
+{
+ unsigned int ofs, cmd;
+
+ if (*from & 0x200) {
+ cmd = NAND_CMD_READOOB;
+ ofs = 10 + (*from & 0xf);
+ } else {
+ cmd = NAND_CMD_READ1;
+ ofs = (*from & 0xf);
+ }
+
+ *from = (*from & ~0x3ff) | ofs;
+ return cmd;
+}
+
+static unsigned int DoC_GetFlagsOffset(struct mtd_info *mtd, loff_t *from)
+{
+ unsigned int ofs, cmd;
+
+ cmd = NAND_CMD_READ1;
+ ofs = (*from & 0x200) ? 8 : 6;
+ *from = (*from & ~0x3ff) | ofs;
+ return cmd;
+}
+
+static unsigned int DoC_GetHdrOffset(struct mtd_info *mtd, loff_t *from)
+{
+ unsigned int ofs, cmd;
+
+ cmd = NAND_CMD_READOOB;
+ ofs = (*from & 0x200) ? 24 : 16;
+ *from = (*from & ~0x3ff) | ofs;
+ return cmd;
+}
+
+static inline void MemReadDOC(void __iomem * docptr, unsigned char *buf, int len)
+{
+#ifndef USE_MEMCPY
+ int i;
+ for (i = 0; i < len; i++)
+ buf[i] = ReadDOC(docptr, Mil_CDSN_IO + i);
+#else
+ memcpy_fromio(buf, docptr + DoC_Mil_CDSN_IO, len);
+#endif
+}
+
+static inline void MemWriteDOC(void __iomem * docptr, unsigned char *buf, int len)
+{
+#ifndef USE_MEMCPY
+ int i;
+ for (i = 0; i < len; i++)
+ WriteDOC(buf[i], docptr, Mil_CDSN_IO + i);
+#else
+ memcpy_toio(docptr + DoC_Mil_CDSN_IO, buf, len);
+#endif
+}
+
+/* DoC_IdentChip: Identify a given NAND chip given {floor,chip} */
+static int DoC_IdentChip(struct DiskOnChip *doc, int floor, int chip)
+{
+ int mfr, id, i, j;
+ volatile char dummy;
+ void __iomem * docptr = doc->virtadr;
+
+ /* Page in the required floor/chip */
+ DoC_SelectFloor(docptr, floor);
+ DoC_SelectChip(docptr, chip);
+
+ /* Millennium Plus bus cycle sequence as per figure 2, section 2.4 */
+ WriteDOC((DOC_FLASH_CE | DOC_FLASH_WP), docptr, Mplus_FlashSelect);
+
+ /* Reset the chip, see Software Requirement 11.4 item 1. */
+ DoC_Command(docptr, NAND_CMD_RESET, 0);
+ DoC_WaitReady(docptr);
+
+ /* Read the NAND chip ID: 1. Send ReadID command */
+ DoC_Command(docptr, NAND_CMD_READID, 0);
+
+ /* Read the NAND chip ID: 2. Send address byte zero */
+ DoC_Address(doc, 1, 0x00, 0, 0x00);
+
+ WriteDOC(0, docptr, Mplus_FlashControl);
+ DoC_WaitReady(docptr);
+
+ /* Read the manufacturer and device id codes of the flash device through
+ CDSN IO register see Software Requirement 11.4 item 5.*/
+ dummy = ReadDOC(docptr, Mplus_ReadPipeInit);
+ dummy = ReadDOC(docptr, Mplus_ReadPipeInit);
+
+ mfr = ReadDOC(docptr, Mil_CDSN_IO);
+ if (doc->interleave)
+ dummy = ReadDOC(docptr, Mil_CDSN_IO); /* 2 way interleave */
+
+ id = ReadDOC(docptr, Mil_CDSN_IO);
+ if (doc->interleave)
+ dummy = ReadDOC(docptr, Mil_CDSN_IO); /* 2 way interleave */
+
+ dummy = ReadDOC(docptr, Mplus_LastDataRead);
+ dummy = ReadDOC(docptr, Mplus_LastDataRead);
+
+ /* Disable flash internally */
+ WriteDOC(0, docptr, Mplus_FlashSelect);
+
+ /* No response - return failure */
+ if (mfr == 0xff || mfr == 0)
+ return 0;
+
+ for (i = 0; nand_flash_ids[i].name != NULL; i++) {
+ if (id == nand_flash_ids[i].id) {
+ /* Try to identify manufacturer */
+ for (j = 0; nand_manuf_ids[j].id != 0x0; j++) {
+ if (nand_manuf_ids[j].id == mfr)
+ break;
+ }
+ printk(KERN_INFO "Flash chip found: Manufacturer ID: %2.2X, "
+ "Chip ID: %2.2X (%s:%s)\n", mfr, id,
+ nand_manuf_ids[j].name, nand_flash_ids[i].name);
+ doc->mfr = mfr;
+ doc->id = id;
+ doc->chipshift = ffs((nand_flash_ids[i].chipsize << 20)) - 1;
+ doc->erasesize = nand_flash_ids[i].erasesize << doc->interleave;
+ break;
+ }
+ }
+
+ if (nand_flash_ids[i].name == NULL)
+ return 0;
+ return 1;
+}
+
+/* DoC_ScanChips: Find all NAND chips present in a DiskOnChip, and identify them */
+static void DoC_ScanChips(struct DiskOnChip *this)
+{
+ int floor, chip;
+ int numchips[MAX_FLOORS_MPLUS];
+ int ret;
+
+ this->numchips = 0;
+ this->mfr = 0;
+ this->id = 0;
+
+ /* Work out the intended interleave setting */
+ this->interleave = 0;
+ if (this->ChipID == DOC_ChipID_DocMilPlus32)
+ this->interleave = 1;
+
+ /* Check the ASIC agrees */
+ if ( (this->interleave << 2) !=
+ (ReadDOC(this->virtadr, Mplus_Configuration) & 4)) {
+ u_char conf = ReadDOC(this->virtadr, Mplus_Configuration);
+ printk(KERN_NOTICE "Setting DiskOnChip Millennium Plus interleave to %s\n",
+ this->interleave?"on (16-bit)":"off (8-bit)");
+ conf ^= 4;
+ WriteDOC(conf, this->virtadr, Mplus_Configuration);
+ }
+
+ /* For each floor, find the number of valid chips it contains */
+ for (floor = 0,ret = 1; floor < MAX_FLOORS_MPLUS; floor++) {
+ numchips[floor] = 0;
+ for (chip = 0; chip < MAX_CHIPS_MPLUS && ret != 0; chip++) {
+ ret = DoC_IdentChip(this, floor, chip);
+ if (ret) {
+ numchips[floor]++;
+ this->numchips++;
+ }
+ }
+ }
+ /* If there are none at all that we recognise, bail */
+ if (!this->numchips) {
+ printk("No flash chips recognised.\n");
+ return;
+ }
+
+ /* Allocate an array to hold the information for each chip */
+ this->chips = kmalloc(sizeof(struct Nand) * this->numchips, GFP_KERNEL);
+ if (!this->chips){
+ printk("MTD: No memory for allocating chip info structures\n");
+ return;
+ }
+
+ /* Fill out the chip array with {floor, chipno} for each
+ * detected chip in the device. */
+ for (floor = 0, ret = 0; floor < MAX_FLOORS_MPLUS; floor++) {
+ for (chip = 0 ; chip < numchips[floor] ; chip++) {
+ this->chips[ret].floor = floor;
+ this->chips[ret].chip = chip;
+ this->chips[ret].curadr = 0;
+ this->chips[ret].curmode = 0x50;
+ ret++;
+ }
+ }
+
+ /* Calculate and print the total size of the device */
+ this->totlen = this->numchips * (1 << this->chipshift);
+ printk(KERN_INFO "%d flash chips found. Total DiskOnChip size: %ld MiB\n",
+ this->numchips ,this->totlen >> 20);
+}
+
+static int DoCMilPlus_is_alias(struct DiskOnChip *doc1, struct DiskOnChip *doc2)
+{
+ int tmp1, tmp2, retval;
+
+ if (doc1->physadr == doc2->physadr)
+ return 1;
+
+ /* Use the alias resolution register which was set aside for this
+ * purpose. If it's value is the same on both chips, they might
+ * be the same chip, and we write to one and check for a change in
+ * the other. It's unclear if this register is usuable in the
+ * DoC 2000 (it's in the Millennium docs), but it seems to work. */
+ tmp1 = ReadDOC(doc1->virtadr, Mplus_AliasResolution);
+ tmp2 = ReadDOC(doc2->virtadr, Mplus_AliasResolution);
+ if (tmp1 != tmp2)
+ return 0;
+
+ WriteDOC((tmp1+1) % 0xff, doc1->virtadr, Mplus_AliasResolution);
+ tmp2 = ReadDOC(doc2->virtadr, Mplus_AliasResolution);
+ if (tmp2 == (tmp1+1) % 0xff)
+ retval = 1;
+ else
+ retval = 0;
+
+ /* Restore register contents. May not be necessary, but do it just to
+ * be safe. */
+ WriteDOC(tmp1, doc1->virtadr, Mplus_AliasResolution);
+
+ return retval;
+}
+
+static const char im_name[] = "DoCMilPlus_init";
+
+/* This routine is made available to other mtd code via
+ * inter_module_register. It must only be accessed through
+ * inter_module_get which will bump the use count of this module. The
+ * addresses passed back in mtd are valid as long as the use count of
+ * this module is non-zero, i.e. between inter_module_get and
+ * inter_module_put. Keith Owens <kaos@ocs.com.au> 29 Oct 2000.
+ */
+static void DoCMilPlus_init(struct mtd_info *mtd)
+{
+ struct DiskOnChip *this = mtd->priv;
+ struct DiskOnChip *old = NULL;
+
+ /* We must avoid being called twice for the same device. */
+ if (docmilpluslist)
+ old = docmilpluslist->priv;
+
+ while (old) {
+ if (DoCMilPlus_is_alias(this, old)) {
+ printk(KERN_NOTICE "Ignoring DiskOnChip Millennium "
+ "Plus at 0x%lX - already configured\n",
+ this->physadr);
+ iounmap(this->virtadr);
+ kfree(mtd);
+ return;
+ }
+ if (old->nextdoc)
+ old = old->nextdoc->priv;
+ else
+ old = NULL;
+ }
+
+ mtd->name = "DiskOnChip Millennium Plus";
+ printk(KERN_NOTICE "DiskOnChip Millennium Plus found at "
+ "address 0x%lX\n", this->physadr);
+
+ mtd->type = MTD_NANDFLASH;
+ mtd->flags = MTD_CAP_NANDFLASH;
+ mtd->ecctype = MTD_ECC_RS_DiskOnChip;
+ mtd->size = 0;
+
+ mtd->erasesize = 0;
+ mtd->oobblock = 512;
+ mtd->oobsize = 16;
+ mtd->owner = THIS_MODULE;
+ mtd->erase = doc_erase;
+ mtd->point = NULL;
+ mtd->unpoint = NULL;
+ mtd->read = doc_read;
+ mtd->write = doc_write;
+ mtd->read_ecc = doc_read_ecc;
+ mtd->write_ecc = doc_write_ecc;
+ mtd->read_oob = doc_read_oob;
+ mtd->write_oob = doc_write_oob;
+ mtd->sync = NULL;
+
+ this->totlen = 0;
+ this->numchips = 0;
+ this->curfloor = -1;
+ this->curchip = -1;
+
+ /* Ident all the chips present. */
+ DoC_ScanChips(this);
+
+ if (!this->totlen) {
+ kfree(mtd);
+ iounmap(this->virtadr);
+ } else {
+ this->nextdoc = docmilpluslist;
+ docmilpluslist = mtd;
+ mtd->size = this->totlen;
+ mtd->erasesize = this->erasesize;
+ add_mtd_device(mtd);
+ return;
+ }
+}
+
+#if 0
+static int doc_dumpblk(struct mtd_info *mtd, loff_t from)
+{
+ int i;
+ loff_t fofs;
+ struct DiskOnChip *this = mtd->priv;
+ void __iomem * docptr = this->virtadr;
+ struct Nand *mychip = &this->chips[from >> (this->chipshift)];
+ unsigned char *bp, buf[1056];
+ char c[32];
+
+ from &= ~0x3ff;
+
+ /* Don't allow read past end of device */
+ if (from >= this->totlen)
+ return -EINVAL;
+
+ DoC_CheckASIC(docptr);
+
+ /* Find the chip which is to be used and select it */
+ if (this->curfloor != mychip->floor) {
+ DoC_SelectFloor(docptr, mychip->floor);
+ DoC_SelectChip(docptr, mychip->chip);
+ } else if (this->curchip != mychip->chip) {
+ DoC_SelectChip(docptr, mychip->chip);
+ }
+ this->curfloor = mychip->floor;
+ this->curchip = mychip->chip;
+
+ /* Millennium Plus bus cycle sequence as per figure 2, section 2.4 */
+ WriteDOC((DOC_FLASH_CE | DOC_FLASH_WP), docptr, Mplus_FlashSelect);
+
+ /* Reset the chip, see Software Requirement 11.4 item 1. */
+ DoC_Command(docptr, NAND_CMD_RESET, 0);
+ DoC_WaitReady(docptr);
+
+ fofs = from;
+ DoC_Command(docptr, DoC_GetDataOffset(mtd, &fofs), 0);
+ DoC_Address(this, 3, fofs, 0, 0x00);
+ WriteDOC(0, docptr, Mplus_FlashControl);
+ DoC_WaitReady(docptr);
+
+ /* disable the ECC engine */
+ WriteDOC(DOC_ECC_RESET, docptr, Mplus_ECCConf);
+
+ ReadDOC(docptr, Mplus_ReadPipeInit);
+ ReadDOC(docptr, Mplus_ReadPipeInit);
+
+ /* Read the data via the internal pipeline through CDSN IO
+ register, see Pipelined Read Operations 11.3 */
+ MemReadDOC(docptr, buf, 1054);
+ buf[1054] = ReadDOC(docptr, Mplus_LastDataRead);
+ buf[1055] = ReadDOC(docptr, Mplus_LastDataRead);
+
+ memset(&c[0], 0, sizeof(c));
+ printk("DUMP OFFSET=%x:\n", (int)from);
+
+ for (i = 0, bp = &buf[0]; (i < 1056); i++) {
+ if ((i % 16) == 0)
+ printk("%08x: ", i);
+ printk(" %02x", *bp);
+ c[(i & 0xf)] = ((*bp >= 0x20) && (*bp <= 0x7f)) ? *bp : '.';
+ bp++;
+ if (((i + 1) % 16) == 0)
+ printk(" %s\n", c);
+ }
+ printk("\n");
+
+ /* Disable flash internally */
+ WriteDOC(0, docptr, Mplus_FlashSelect);
+
+ return 0;
+}
+#endif
+
+static int doc_read(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, u_char *buf)
+{
+ /* Just a special case of doc_read_ecc */
+ return doc_read_ecc(mtd, from, len, retlen, buf, NULL, NULL);
+}
+
+static int doc_read_ecc(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, u_char *buf, u_char *eccbuf,
+ struct nand_oobinfo *oobsel)
+{
+ int ret, i;
+ volatile char dummy;
+ loff_t fofs;
+ unsigned char syndrome[6];
+ struct DiskOnChip *this = mtd->priv;
+ void __iomem * docptr = this->virtadr;
+ struct Nand *mychip = &this->chips[from >> (this->chipshift)];
+
+ /* Don't allow read past end of device */
+ if (from >= this->totlen)
+ return -EINVAL;
+
+ /* Don't allow a single read to cross a 512-byte block boundary */
+ if (from + len > ((from | 0x1ff) + 1))
+ len = ((from | 0x1ff) + 1) - from;
+
+ DoC_CheckASIC(docptr);
+
+ /* Find the chip which is to be used and select it */
+ if (this->curfloor != mychip->floor) {
+ DoC_SelectFloor(docptr, mychip->floor);
+ DoC_SelectChip(docptr, mychip->chip);
+ } else if (this->curchip != mychip->chip) {
+ DoC_SelectChip(docptr, mychip->chip);
+ }
+ this->curfloor = mychip->floor;
+ this->curchip = mychip->chip;
+
+ /* Millennium Plus bus cycle sequence as per figure 2, section 2.4 */
+ WriteDOC((DOC_FLASH_CE | DOC_FLASH_WP), docptr, Mplus_FlashSelect);
+
+ /* Reset the chip, see Software Requirement 11.4 item 1. */
+ DoC_Command(docptr, NAND_CMD_RESET, 0);
+ DoC_WaitReady(docptr);
+
+ fofs = from;
+ DoC_Command(docptr, DoC_GetDataOffset(mtd, &fofs), 0);
+ DoC_Address(this, 3, fofs, 0, 0x00);
+ WriteDOC(0, docptr, Mplus_FlashControl);
+ DoC_WaitReady(docptr);
+
+ if (eccbuf) {
+ /* init the ECC engine, see Reed-Solomon EDC/ECC 11.1 .*/
+ WriteDOC(DOC_ECC_RESET, docptr, Mplus_ECCConf);
+ WriteDOC(DOC_ECC_EN, docptr, Mplus_ECCConf);
+ } else {
+ /* disable the ECC engine */
+ WriteDOC(DOC_ECC_RESET, docptr, Mplus_ECCConf);
+ }
+
+ /* Let the caller know we completed it */
+ *retlen = len;
+ ret = 0;
+
+ ReadDOC(docptr, Mplus_ReadPipeInit);
+ ReadDOC(docptr, Mplus_ReadPipeInit);
+
+ if (eccbuf) {
+ /* Read the data via the internal pipeline through CDSN IO
+ register, see Pipelined Read Operations 11.3 */
+ MemReadDOC(docptr, buf, len);
+
+ /* Read the ECC data following raw data */
+ MemReadDOC(docptr, eccbuf, 4);
+ eccbuf[4] = ReadDOC(docptr, Mplus_LastDataRead);
+ eccbuf[5] = ReadDOC(docptr, Mplus_LastDataRead);
+
+ /* Flush the pipeline */
+ dummy = ReadDOC(docptr, Mplus_ECCConf);
+ dummy = ReadDOC(docptr, Mplus_ECCConf);
+
+ /* Check the ECC Status */
+ if (ReadDOC(docptr, Mplus_ECCConf) & 0x80) {
+ int nb_errors;
+ /* There was an ECC error */
+#ifdef ECC_DEBUG
+ printk("DiskOnChip ECC Error: Read at %lx\n", (long)from);
+#endif
+ /* Read the ECC syndrom through the DiskOnChip ECC logic.
+ These syndrome will be all ZERO when there is no error */
+ for (i = 0; i < 6; i++)
+ syndrome[i] = ReadDOC(docptr, Mplus_ECCSyndrome0 + i);
+
+ nb_errors = doc_decode_ecc(buf, syndrome);
+#ifdef ECC_DEBUG
+ printk("ECC Errors corrected: %x\n", nb_errors);
+#endif
+ if (nb_errors < 0) {
+ /* We return error, but have actually done the read. Not that
+ this can be told to user-space, via sys_read(), but at least
+ MTD-aware stuff can know about it by checking *retlen */
+#ifdef ECC_DEBUG
+ printk("%s(%d): Millennium Plus ECC error (from=0x%x:\n",
+ __FILE__, __LINE__, (int)from);
+ printk(" syndrome= %02x:%02x:%02x:%02x:%02x:"
+ "%02x\n",
+ syndrome[0], syndrome[1], syndrome[2],
+ syndrome[3], syndrome[4], syndrome[5]);
+ printk(" eccbuf= %02x:%02x:%02x:%02x:%02x:"
+ "%02x\n",
+ eccbuf[0], eccbuf[1], eccbuf[2],
+ eccbuf[3], eccbuf[4], eccbuf[5]);
+#endif
+ ret = -EIO;
+ }
+ }
+
+#ifdef PSYCHO_DEBUG
+ printk("ECC DATA at %lx: %2.2X %2.2X %2.2X %2.2X %2.2X %2.2X\n",
+ (long)from, eccbuf[0], eccbuf[1], eccbuf[2], eccbuf[3],
+ eccbuf[4], eccbuf[5]);
+#endif
+
+ /* disable the ECC engine */
+ WriteDOC(DOC_ECC_DIS, docptr , Mplus_ECCConf);
+ } else {
+ /* Read the data via the internal pipeline through CDSN IO
+ register, see Pipelined Read Operations 11.3 */
+ MemReadDOC(docptr, buf, len-2);
+ buf[len-2] = ReadDOC(docptr, Mplus_LastDataRead);
+ buf[len-1] = ReadDOC(docptr, Mplus_LastDataRead);
+ }
+
+ /* Disable flash internally */
+ WriteDOC(0, docptr, Mplus_FlashSelect);
+
+ return ret;
+}
+
+static int doc_write(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t *retlen, const u_char *buf)
+{
+ char eccbuf[6];
+ return doc_write_ecc(mtd, to, len, retlen, buf, eccbuf, NULL);
+}
+
+static int doc_write_ecc(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t *retlen, const u_char *buf, u_char *eccbuf,
+ struct nand_oobinfo *oobsel)
+{
+ int i, before, ret = 0;
+ loff_t fto;
+ volatile char dummy;
+ struct DiskOnChip *this = mtd->priv;
+ void __iomem * docptr = this->virtadr;
+ struct Nand *mychip = &this->chips[to >> (this->chipshift)];
+
+ /* Don't allow write past end of device */
+ if (to >= this->totlen)
+ return -EINVAL;
+
+ /* Don't allow writes which aren't exactly one block (512 bytes) */
+ if ((to & 0x1ff) || (len != 0x200))
+ return -EINVAL;
+
+ /* Determine position of OOB flags, before or after data */
+ before = (this->interleave && (to & 0x200));
+
+ DoC_CheckASIC(docptr);
+
+ /* Find the chip which is to be used and select it */
+ if (this->curfloor != mychip->floor) {
+ DoC_SelectFloor(docptr, mychip->floor);
+ DoC_SelectChip(docptr, mychip->chip);
+ } else if (this->curchip != mychip->chip) {
+ DoC_SelectChip(docptr, mychip->chip);
+ }
+ this->curfloor = mychip->floor;
+ this->curchip = mychip->chip;
+
+ /* Millennium Plus bus cycle sequence as per figure 2, section 2.4 */
+ WriteDOC(DOC_FLASH_CE, docptr, Mplus_FlashSelect);
+
+ /* Reset the chip, see Software Requirement 11.4 item 1. */
+ DoC_Command(docptr, NAND_CMD_RESET, 0);
+ DoC_WaitReady(docptr);
+
+ /* Set device to appropriate plane of flash */
+ fto = to;
+ WriteDOC(DoC_GetDataOffset(mtd, &fto), docptr, Mplus_FlashCmd);
+
+ /* On interleaved devices the flags for 2nd half 512 are before data */
+ if (eccbuf && before)
+ fto -= 2;
+
+ /* issue the Serial Data In command to initial the Page Program process */
+ DoC_Command(docptr, NAND_CMD_SEQIN, 0x00);
+ DoC_Address(this, 3, fto, 0x00, 0x00);
+
+ /* Disable the ECC engine */
+ WriteDOC(DOC_ECC_RESET, docptr, Mplus_ECCConf);
+
+ if (eccbuf) {
+ if (before) {
+ /* Write the block status BLOCK_USED (0x5555) */
+ WriteDOC(0x55, docptr, Mil_CDSN_IO);
+ WriteDOC(0x55, docptr, Mil_CDSN_IO);
+ }
+
+ /* init the ECC engine, see Reed-Solomon EDC/ECC 11.1 .*/
+ WriteDOC(DOC_ECC_EN | DOC_ECC_RW, docptr, Mplus_ECCConf);
+ }
+
+ MemWriteDOC(docptr, (unsigned char *) buf, len);
+
+ if (eccbuf) {
+ /* Write ECC data to flash, the ECC info is generated by
+ the DiskOnChip ECC logic see Reed-Solomon EDC/ECC 11.1 */
+ DoC_Delay(docptr, 3);
+
+ /* Read the ECC data through the DiskOnChip ECC logic */
+ for (i = 0; i < 6; i++)
+ eccbuf[i] = ReadDOC(docptr, Mplus_ECCSyndrome0 + i);
+
+ /* disable the ECC engine */
+ WriteDOC(DOC_ECC_DIS, docptr, Mplus_ECCConf);
+
+ /* Write the ECC data to flash */
+ MemWriteDOC(docptr, eccbuf, 6);
+
+ if (!before) {
+ /* Write the block status BLOCK_USED (0x5555) */
+ WriteDOC(0x55, docptr, Mil_CDSN_IO+6);
+ WriteDOC(0x55, docptr, Mil_CDSN_IO+7);
+ }
+
+#ifdef PSYCHO_DEBUG
+ printk("OOB data at %lx is %2.2X %2.2X %2.2X %2.2X %2.2X %2.2X\n",
+ (long) to, eccbuf[0], eccbuf[1], eccbuf[2], eccbuf[3],
+ eccbuf[4], eccbuf[5]);
+#endif
+ }
+
+ WriteDOC(0x00, docptr, Mplus_WritePipeTerm);
+ WriteDOC(0x00, docptr, Mplus_WritePipeTerm);
+
+ /* Commit the Page Program command and wait for ready
+ see Software Requirement 11.4 item 1.*/
+ DoC_Command(docptr, NAND_CMD_PAGEPROG, 0x00);
+ DoC_WaitReady(docptr);
+
+ /* Read the status of the flash device through CDSN IO register
+ see Software Requirement 11.4 item 5.*/
+ DoC_Command(docptr, NAND_CMD_STATUS, 0);
+ dummy = ReadDOC(docptr, Mplus_ReadPipeInit);
+ dummy = ReadDOC(docptr, Mplus_ReadPipeInit);
+ DoC_Delay(docptr, 2);
+ if ((dummy = ReadDOC(docptr, Mplus_LastDataRead)) & 1) {
+ printk("MTD: Error 0x%x programming at 0x%x\n", dummy, (int)to);
+ /* Error in programming
+ FIXME: implement Bad Block Replacement (in nftl.c ??) */
+ *retlen = 0;
+ ret = -EIO;
+ }
+ dummy = ReadDOC(docptr, Mplus_LastDataRead);
+
+ /* Disable flash internally */
+ WriteDOC(0, docptr, Mplus_FlashSelect);
+
+ /* Let the caller know we completed it */
+ *retlen = len;
+
+ return ret;
+}
+
+static int doc_read_oob(struct mtd_info *mtd, loff_t ofs, size_t len,
+ size_t *retlen, u_char *buf)
+{
+ loff_t fofs, base;
+ struct DiskOnChip *this = mtd->priv;
+ void __iomem * docptr = this->virtadr;
+ struct Nand *mychip = &this->chips[ofs >> this->chipshift];
+ size_t i, size, got, want;
+
+ DoC_CheckASIC(docptr);
+
+ /* Find the chip which is to be used and select it */
+ if (this->curfloor != mychip->floor) {
+ DoC_SelectFloor(docptr, mychip->floor);
+ DoC_SelectChip(docptr, mychip->chip);
+ } else if (this->curchip != mychip->chip) {
+ DoC_SelectChip(docptr, mychip->chip);
+ }
+ this->curfloor = mychip->floor;
+ this->curchip = mychip->chip;
+
+ /* Millennium Plus bus cycle sequence as per figure 2, section 2.4 */
+ WriteDOC((DOC_FLASH_CE | DOC_FLASH_WP), docptr, Mplus_FlashSelect);
+
+ /* disable the ECC engine */
+ WriteDOC(DOC_ECC_RESET, docptr, Mplus_ECCConf);
+ DoC_WaitReady(docptr);
+
+ /* Maximum of 16 bytes in the OOB region, so limit read to that */
+ if (len > 16)
+ len = 16;
+ got = 0;
+ want = len;
+
+ for (i = 0; ((i < 3) && (want > 0)); i++) {
+ /* Figure out which region we are accessing... */
+ fofs = ofs;
+ base = ofs & 0xf;
+ if (!this->interleave) {
+ DoC_Command(docptr, NAND_CMD_READOOB, 0);
+ size = 16 - base;
+ } else if (base < 6) {
+ DoC_Command(docptr, DoC_GetECCOffset(mtd, &fofs), 0);
+ size = 6 - base;
+ } else if (base < 8) {
+ DoC_Command(docptr, DoC_GetFlagsOffset(mtd, &fofs), 0);
+ size = 8 - base;
+ } else {
+ DoC_Command(docptr, DoC_GetHdrOffset(mtd, &fofs), 0);
+ size = 16 - base;
+ }
+ if (size > want)
+ size = want;
+
+ /* Issue read command */
+ DoC_Address(this, 3, fofs, 0, 0x00);
+ WriteDOC(0, docptr, Mplus_FlashControl);
+ DoC_WaitReady(docptr);
+
+ ReadDOC(docptr, Mplus_ReadPipeInit);
+ ReadDOC(docptr, Mplus_ReadPipeInit);
+ MemReadDOC(docptr, &buf[got], size - 2);
+ buf[got + size - 2] = ReadDOC(docptr, Mplus_LastDataRead);
+ buf[got + size - 1] = ReadDOC(docptr, Mplus_LastDataRead);
+
+ ofs += size;
+ got += size;
+ want -= size;
+ }
+
+ /* Disable flash internally */
+ WriteDOC(0, docptr, Mplus_FlashSelect);
+
+ *retlen = len;
+ return 0;
+}
+
+static int doc_write_oob(struct mtd_info *mtd, loff_t ofs, size_t len,
+ size_t *retlen, const u_char *buf)
+{
+ volatile char dummy;
+ loff_t fofs, base;
+ struct DiskOnChip *this = mtd->priv;
+ void __iomem * docptr = this->virtadr;
+ struct Nand *mychip = &this->chips[ofs >> this->chipshift];
+ size_t i, size, got, want;
+ int ret = 0;
+
+ DoC_CheckASIC(docptr);
+
+ /* Find the chip which is to be used and select it */
+ if (this->curfloor != mychip->floor) {
+ DoC_SelectFloor(docptr, mychip->floor);
+ DoC_SelectChip(docptr, mychip->chip);
+ } else if (this->curchip != mychip->chip) {
+ DoC_SelectChip(docptr, mychip->chip);
+ }
+ this->curfloor = mychip->floor;
+ this->curchip = mychip->chip;
+
+ /* Millennium Plus bus cycle sequence as per figure 2, section 2.4 */
+ WriteDOC(DOC_FLASH_CE, docptr, Mplus_FlashSelect);
+
+
+ /* Maximum of 16 bytes in the OOB region, so limit write to that */
+ if (len > 16)
+ len = 16;
+ got = 0;
+ want = len;
+
+ for (i = 0; ((i < 3) && (want > 0)); i++) {
+ /* Reset the chip, see Software Requirement 11.4 item 1. */
+ DoC_Command(docptr, NAND_CMD_RESET, 0);
+ DoC_WaitReady(docptr);
+
+ /* Figure out which region we are accessing... */
+ fofs = ofs;
+ base = ofs & 0x0f;
+ if (!this->interleave) {
+ WriteDOC(NAND_CMD_READOOB, docptr, Mplus_FlashCmd);
+ size = 16 - base;
+ } else if (base < 6) {
+ WriteDOC(DoC_GetECCOffset(mtd, &fofs), docptr, Mplus_FlashCmd);
+ size = 6 - base;
+ } else if (base < 8) {
+ WriteDOC(DoC_GetFlagsOffset(mtd, &fofs), docptr, Mplus_FlashCmd);
+ size = 8 - base;
+ } else {
+ WriteDOC(DoC_GetHdrOffset(mtd, &fofs), docptr, Mplus_FlashCmd);
+ size = 16 - base;
+ }
+ if (size > want)
+ size = want;
+
+ /* Issue the Serial Data In command to initial the Page Program process */
+ DoC_Command(docptr, NAND_CMD_SEQIN, 0x00);
+ DoC_Address(this, 3, fofs, 0, 0x00);
+
+ /* Disable the ECC engine */
+ WriteDOC(DOC_ECC_RESET, docptr, Mplus_ECCConf);
+
+ /* Write the data via the internal pipeline through CDSN IO
+ register, see Pipelined Write Operations 11.2 */
+ MemWriteDOC(docptr, (unsigned char *) &buf[got], size);
+ WriteDOC(0x00, docptr, Mplus_WritePipeTerm);
+ WriteDOC(0x00, docptr, Mplus_WritePipeTerm);
+
+ /* Commit the Page Program command and wait for ready
+ see Software Requirement 11.4 item 1.*/
+ DoC_Command(docptr, NAND_CMD_PAGEPROG, 0x00);
+ DoC_WaitReady(docptr);
+
+ /* Read the status of the flash device through CDSN IO register
+ see Software Requirement 11.4 item 5.*/
+ DoC_Command(docptr, NAND_CMD_STATUS, 0x00);
+ dummy = ReadDOC(docptr, Mplus_ReadPipeInit);
+ dummy = ReadDOC(docptr, Mplus_ReadPipeInit);
+ DoC_Delay(docptr, 2);
+ if ((dummy = ReadDOC(docptr, Mplus_LastDataRead)) & 1) {
+ printk("MTD: Error 0x%x programming oob at 0x%x\n",
+ dummy, (int)ofs);
+ /* FIXME: implement Bad Block Replacement */
+ *retlen = 0;
+ ret = -EIO;
+ }
+ dummy = ReadDOC(docptr, Mplus_LastDataRead);
+
+ ofs += size;
+ got += size;
+ want -= size;
+ }
+
+ /* Disable flash internally */
+ WriteDOC(0, docptr, Mplus_FlashSelect);
+
+ *retlen = len;
+ return ret;
+}
+
+int doc_erase(struct mtd_info *mtd, struct erase_info *instr)
+{
+ volatile char dummy;
+ struct DiskOnChip *this = mtd->priv;
+ __u32 ofs = instr->addr;
+ __u32 len = instr->len;
+ void __iomem * docptr = this->virtadr;
+ struct Nand *mychip = &this->chips[ofs >> this->chipshift];
+
+ DoC_CheckASIC(docptr);
+
+ if (len != mtd->erasesize)
+ printk(KERN_WARNING "MTD: Erase not right size (%x != %x)n",
+ len, mtd->erasesize);
+
+ /* Find the chip which is to be used and select it */
+ if (this->curfloor != mychip->floor) {
+ DoC_SelectFloor(docptr, mychip->floor);
+ DoC_SelectChip(docptr, mychip->chip);
+ } else if (this->curchip != mychip->chip) {
+ DoC_SelectChip(docptr, mychip->chip);
+ }
+ this->curfloor = mychip->floor;
+ this->curchip = mychip->chip;
+
+ instr->state = MTD_ERASE_PENDING;
+
+ /* Millennium Plus bus cycle sequence as per figure 2, section 2.4 */
+ WriteDOC(DOC_FLASH_CE, docptr, Mplus_FlashSelect);
+
+ DoC_Command(docptr, NAND_CMD_RESET, 0x00);
+ DoC_WaitReady(docptr);
+
+ DoC_Command(docptr, NAND_CMD_ERASE1, 0);
+ DoC_Address(this, 2, ofs, 0, 0x00);
+ DoC_Command(docptr, NAND_CMD_ERASE2, 0);
+ DoC_WaitReady(docptr);
+ instr->state = MTD_ERASING;
+
+ /* Read the status of the flash device through CDSN IO register
+ see Software Requirement 11.4 item 5. */
+ DoC_Command(docptr, NAND_CMD_STATUS, 0);
+ dummy = ReadDOC(docptr, Mplus_ReadPipeInit);
+ dummy = ReadDOC(docptr, Mplus_ReadPipeInit);
+ if ((dummy = ReadDOC(docptr, Mplus_LastDataRead)) & 1) {
+ printk("MTD: Error 0x%x erasing at 0x%x\n", dummy, ofs);
+ /* FIXME: implement Bad Block Replacement (in nftl.c ??) */
+ instr->state = MTD_ERASE_FAILED;
+ } else {
+ instr->state = MTD_ERASE_DONE;
+ }
+ dummy = ReadDOC(docptr, Mplus_LastDataRead);
+
+ /* Disable flash internally */
+ WriteDOC(0, docptr, Mplus_FlashSelect);
+
+ mtd_erase_callback(instr);
+
+ return 0;
+}
+
+/****************************************************************************
+ *
+ * Module stuff
+ *
+ ****************************************************************************/
+
+static int __init init_doc2001plus(void)
+{
+ inter_module_register(im_name, THIS_MODULE, &DoCMilPlus_init);
+ return 0;
+}
+
+static void __exit cleanup_doc2001plus(void)
+{
+ struct mtd_info *mtd;
+ struct DiskOnChip *this;
+
+ while ((mtd=docmilpluslist)) {
+ this = mtd->priv;
+ docmilpluslist = this->nextdoc;
+
+ del_mtd_device(mtd);
+
+ iounmap(this->virtadr);
+ kfree(this->chips);
+ kfree(mtd);
+ }
+ inter_module_unregister(im_name);
+}
+
+module_exit(cleanup_doc2001plus);
+module_init(init_doc2001plus);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Greg Ungerer <gerg@snapgear.com> et al.");
+MODULE_DESCRIPTION("Driver for DiskOnChip Millennium Plus");
diff --git a/linux-2.4.x/drivers/mtd/devices/docecc.c b/linux-2.4.x/drivers/mtd/devices/docecc.c
index 67af87d..cd3db72 100644
--- a/linux-2.4.x/drivers/mtd/devices/docecc.c
+++ b/linux-2.4.x/drivers/mtd/devices/docecc.c
@@ -4,10 +4,10 @@
* GNU GPL License. The rest is simply to convert the disk on chip
* syndrom into a standard syndom.
*
- * Author: Fabrice Bellard (fabrice.bellard@netgem.com)
+ * Author: Fabrice Bellard (fabrice.bellard@netgem.com)
* Copyright (C) 2000 Netgem S.A.
*
- * $Id: docecc.c,v 1.4 2001/10/02 15:05:13 dwmw2 Exp $
+ * $Id: docecc.c,v 1.7 2005/11/07 11:14:25 gleixner Exp $
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -40,6 +40,7 @@
#include <linux/mtd/mtd.h>
#include <linux/mtd/doc2000.h>
+#define DEBUG_ECC 0
/* need to undef it (from asm/termbits.h) */
#undef B0
@@ -121,7 +122,7 @@ for(ci=(n)-1;ci >=0;ci--)\
a(0) + a(1) @ + a(2) @^2 + ... + a(m-1) @^(m-1)
we consider the integer "i" whose binary representation with a(0) being LSB
and a(m-1) MSB is (a(0),a(1),...,a(m-1)) and locate the entry
- "index_of[i]". Now, @^index_of[i] is that element whose polynomial
+ "index_of[i]". Now, @^index_of[i] is that element whose polynomial
representation is (a(0),a(1),a(2),...,a(m-1)).
NOTE:
The element alpha_to[2^m-1] = 0 always signifying that the
@@ -129,7 +130,7 @@ for(ci=(n)-1;ci >=0;ci--)\
Similarily, the element index_of[0] = A0 always signifying
that the power of alpha which has the polynomial representation
(0,0,...,0) is "infinity".
-
+
*/
static void
@@ -175,7 +176,7 @@ generate_gf(dtype Alpha_to[NN + 1], dtype Index_of[NN + 1])
* are written back. NOTE! This array must be at least NN-KK elements long.
* The corrected data are written in eras_val[]. They must be xor with the data
* to retrieve the correct data : data[erase_pos[i]] ^= erase_val[i] .
- *
+ *
* First "no_eras" erasures are declared by the calling program. Then, the
* maximum # of errors correctable is t_after_eras = floor((NN-KK-no_eras)/2).
* If the number of channel errors is not greater than "t_after_eras" the
@@ -188,7 +189,7 @@ generate_gf(dtype Alpha_to[NN + 1], dtype Index_of[NN + 1])
* */
static int
eras_dec_rs(dtype Alpha_to[NN + 1], dtype Index_of[NN + 1],
- gf bb[NN - KK + 1], gf eras_val[NN-KK], int eras_pos[NN-KK],
+ gf bb[NN - KK + 1], gf eras_val[NN-KK], int eras_pos[NN-KK],
int no_eras)
{
int deg_lambda, el, deg_omega;
@@ -211,7 +212,7 @@ eras_dec_rs(dtype Alpha_to[NN + 1], dtype Index_of[NN + 1],
count = 0;
goto finish;
}
-
+
for(i=1;i<=NN-KK;i++){
s[i] = bb[0];
}
@@ -219,7 +220,7 @@ eras_dec_rs(dtype Alpha_to[NN + 1], dtype Index_of[NN + 1],
if(bb[j] == 0)
continue;
tmp = Index_of[bb[j]];
-
+
for(i=1;i<=NN-KK;i++)
s[i] ^= Alpha_to[modnn(tmp + (B0+i-1)*PRIM*j)];
}
@@ -233,7 +234,7 @@ eras_dec_rs(dtype Alpha_to[NN + 1], dtype Index_of[NN + 1],
tmp = modnn(tmp + 2 * KK * (B0+i-1)*PRIM);
s[i] = tmp;
}
-
+
CLEAR(&lambda[1],NN-KK);
lambda[0] = 1;
@@ -248,10 +249,10 @@ eras_dec_rs(dtype Alpha_to[NN + 1], dtype Index_of[NN + 1],
lambda[j] ^= Alpha_to[modnn(u + tmp)];
}
}
-#if DEBUG >= 1
+#if DEBUG_ECC >= 1
/* Test code that verifies the erasure locator polynomial just constructed
Needed only for decoder debugging. */
-
+
/* find roots of the erasure location polynomial */
for(i=1;i<=no_eras;i++)
reg[i] = Index_of[lambda[i]];
@@ -275,7 +276,7 @@ eras_dec_rs(dtype Alpha_to[NN + 1], dtype Index_of[NN + 1],
count = -1;
goto finish;
}
-#if DEBUG >= 2
+#if DEBUG_ECC >= 2
printf("\n Erasure positions as determined by roots of Eras Loc Poly:\n");
for (i = 0; i < count; i++)
printf("%d ", loc[i]);
@@ -285,7 +286,7 @@ eras_dec_rs(dtype Alpha_to[NN + 1], dtype Index_of[NN + 1],
}
for(i=0;i<NN-KK+1;i++)
b[i] = Index_of[lambda[i]];
-
+
/*
* Begin Berlekamp-Massey algorithm to determine error+erasure
* locator polynomial
@@ -388,7 +389,7 @@ eras_dec_rs(dtype Alpha_to[NN + 1], dtype Index_of[NN + 1],
omega[i] = Index_of[tmp];
}
omega[NN-KK] = A0;
-
+
/*
* Compute error values in poly-form. num1 = omega(inv(X(l))), num2 =
* inv(X(l))**(B0-1) and den = lambda_pr(inv(X(l))) all in poly-form
@@ -401,14 +402,14 @@ eras_dec_rs(dtype Alpha_to[NN + 1], dtype Index_of[NN + 1],
}
num2 = Alpha_to[modnn(root[j] * (B0 - 1) + NN)];
den = 0;
-
+
/* lambda[i+1] for i even is the formal derivative lambda_pr of lambda[i] */
for (i = min(deg_lambda,NN-KK-1) & ~1; i >= 0; i -=2) {
if(lambda[i+1] != A0)
den ^= Alpha_to[modnn(lambda[i+1] + i * root[j])];
}
if (den == 0) {
-#if DEBUG >= 1
+#if DEBUG_ECC >= 1
printf("\n ERROR: denominator = 0\n");
#endif
/* Convert to dual- basis */
@@ -435,11 +436,11 @@ eras_dec_rs(dtype Alpha_to[NN + 1], dtype Index_of[NN + 1],
/* The sector bytes are packed into NB_DATA MM bits words */
#define NB_DATA (((SECTOR_SIZE + 1) * 8 + 6) / MM)
-/*
+/*
* Correct the errors in 'sector[]' by using 'ecc1[]' which is the
* content of the feedback shift register applyied to the sector and
* the ECC. Return the number of errors corrected (and correct them in
- * sector), or -1 if error
+ * sector), or -1 if error
*/
int doc_decode_ecc(unsigned char sector[SECTOR_SIZE], unsigned char ecc1[6])
{
@@ -453,7 +454,7 @@ int doc_decode_ecc(unsigned char sector[SECTOR_SIZE], unsigned char ecc1[6])
Alpha_to = kmalloc((NN + 1) * sizeof(dtype), GFP_KERNEL);
if (!Alpha_to)
return -1;
-
+
Index_of = kmalloc((NN + 1) * sizeof(dtype), GFP_KERNEL);
if (!Index_of) {
kfree(Alpha_to);
@@ -469,7 +470,7 @@ int doc_decode_ecc(unsigned char sector[SECTOR_SIZE], unsigned char ecc1[6])
bb[2] = ((ecc1[2] & 0xf0) >> 4) | ((ecc1[3] & 0x3f) << 4);
bb[3] = ((ecc1[3] & 0xc0) >> 6) | ((ecc1[0] & 0xff) << 2);
- nb_errors = eras_dec_rs(Alpha_to, Index_of, bb,
+ nb_errors = eras_dec_rs(Alpha_to, Index_of, bb,
error_val, error_pos, 0);
if (nb_errors <= 0)
goto the_end;
@@ -488,7 +489,7 @@ int doc_decode_ecc(unsigned char sector[SECTOR_SIZE], unsigned char ecc1[6])
can be modified since pos is even */
index = (pos >> 3) ^ 1;
bitpos = pos & 7;
- if ((index >= 0 && index < SECTOR_SIZE) ||
+ if ((index >= 0 && index < SECTOR_SIZE) ||
index == (SECTOR_SIZE + 1)) {
val = error_val[i] >> (2 + bitpos);
parity ^= val;
@@ -499,7 +500,7 @@ int doc_decode_ecc(unsigned char sector[SECTOR_SIZE], unsigned char ecc1[6])
bitpos = (bitpos + 10) & 7;
if (bitpos == 0)
bitpos = 8;
- if ((index >= 0 && index < SECTOR_SIZE) ||
+ if ((index >= 0 && index < SECTOR_SIZE) ||
index == (SECTOR_SIZE + 1)) {
val = error_val[i] << (8 - bitpos);
parity ^= val;
@@ -508,7 +509,7 @@ int doc_decode_ecc(unsigned char sector[SECTOR_SIZE], unsigned char ecc1[6])
}
}
}
-
+
/* use parity to test extra errors */
if ((parity & 0xff) != 0)
nb_errors = -1;
@@ -519,6 +520,8 @@ int doc_decode_ecc(unsigned char sector[SECTOR_SIZE], unsigned char ecc1[6])
return nb_errors;
}
+EXPORT_SYMBOL_GPL(doc_decode_ecc);
+
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Fabrice Bellard <fabrice.bellard@netgem.com>");
MODULE_DESCRIPTION("ECC code for correcting errors detected by DiskOnChip 2000 and Millennium ECC hardware");
diff --git a/linux-2.4.x/drivers/mtd/devices/docprobe.c b/linux-2.4.x/drivers/mtd/devices/docprobe.c
index 43ccd77..13178b9 100644
--- a/linux-2.4.x/drivers/mtd/devices/docprobe.c
+++ b/linux-2.4.x/drivers/mtd/devices/docprobe.c
@@ -1,24 +1,25 @@
/* Linux driver for Disk-On-Chip devices */
/* Probe routines common to all DoC devices */
-/* (c) 1999 Machine Vision Holdings, Inc. */
-/* Author: David Woodhouse <dwmw2@infradead.org> */
-/* $Id: docprobe.c,v 1.30 2001/10/02 15:05:13 dwmw2 Exp $ */
+/* (C) 1999 Machine Vision Holdings, Inc. */
+/* (C) 1999-2003 David Woodhouse <dwmw2@infradead.org> */
+
+/* $Id: docprobe.c,v 1.46 2005/11/07 11:14:25 gleixner Exp $ */
/* DOC_PASSIVE_PROBE:
- In order to ensure that the BIOS checksum is correct at boot time, and
- hence that the onboard BIOS extension gets executed, the DiskOnChip
- goes into reset mode when it is read sequentially: all registers
- return 0xff until the chip is woken up again by writing to the
- DOCControl register.
-
- Unfortunately, this means that the probe for the DiskOnChip is unsafe,
- because one of the first things it does is write to where it thinks
- the DOCControl register should be - which may well be shared memory
- for another device. I've had machines which lock up when this is
- attempted. Hence the possibility to do a passive probe, which will fail
+ In order to ensure that the BIOS checksum is correct at boot time, and
+ hence that the onboard BIOS extension gets executed, the DiskOnChip
+ goes into reset mode when it is read sequentially: all registers
+ return 0xff until the chip is woken up again by writing to the
+ DOCControl register.
+
+ Unfortunately, this means that the probe for the DiskOnChip is unsafe,
+ because one of the first things it does is write to where it thinks
+ the DOCControl register should be - which may well be shared memory
+ for another device. I've had machines which lock up when this is
+ attempted. Hence the possibility to do a passive probe, which will fail
to detect a chip in reset mode, but is at least guaranteed not to lock
the machine.
@@ -30,14 +31,12 @@
/* DOC_SINGLE_DRIVER:
Millennium driver has been merged into DOC2000 driver.
- The newly-merged driver doesn't appear to work for writing. It's the
- same with the DiskOnChip 2000 and the Millennium. If you have a
- Millennium and you want write support to work, remove the definition
- of DOC_SINGLE_DRIVER below to use the old doc2001-specific driver.
-
- Otherwise, it's left on in the hope that it'll annoy someone with
- a Millennium enough that they go through and work out what the
- difference is :)
+ The old Millennium-only driver has been retained just in case there
+ are problems with the new code. If the combined driver doesn't work
+ for you, you can try the old one by undefining DOC_SINGLE_DRIVER
+ below and also enabling it in your configuration. If this fixes the
+ problems, please send a report to the MTD mailing list at
+ <linux-mtd@lists.infradead.org>.
*/
#define DOC_SINGLE_DRIVER
@@ -46,18 +45,15 @@
#include <linux/module.h>
#include <asm/errno.h>
#include <asm/io.h>
-#include <asm/uaccess.h>
-#include <linux/miscdevice.h>
-#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/slab.h>
-#include <linux/sched.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/nand.h>
#include <linux/mtd/doc2000.h>
+#include <linux/mtd/compatmac.h>
/* Where to look for the devices? */
#ifndef CONFIG_MTD_DOCPROBE_ADDRESS
@@ -66,39 +62,42 @@
static unsigned long doc_config_location = CONFIG_MTD_DOCPROBE_ADDRESS;
-MODULE_PARM(doc_config_location, "l");
-
+module_param(doc_config_location, ulong, 0);
+MODULE_PARM_DESC(doc_config_location, "Physical memory address at which to probe for DiskOnChip");
static unsigned long __initdata doc_locations[] = {
#if defined (__alpha__) || defined(__i386__) || defined(__x86_64__)
#ifdef CONFIG_MTD_DOCPROBE_HIGH
- 0xfffc8000, 0xfffca000, 0xfffcc000, 0xfffce000,
+ 0xfffc8000, 0xfffca000, 0xfffcc000, 0xfffce000,
0xfffd0000, 0xfffd2000, 0xfffd4000, 0xfffd6000,
- 0xfffd8000, 0xfffda000, 0xfffdc000, 0xfffde000,
- 0xfffe0000, 0xfffe2000, 0xfffe4000, 0xfffe6000,
+ 0xfffd8000, 0xfffda000, 0xfffdc000, 0xfffde000,
+ 0xfffe0000, 0xfffe2000, 0xfffe4000, 0xfffe6000,
0xfffe8000, 0xfffea000, 0xfffec000, 0xfffee000,
#else /* CONFIG_MTD_DOCPROBE_HIGH */
- 0xc8000, 0xca000, 0xcc000, 0xce000,
+ 0xc8000, 0xca000, 0xcc000, 0xce000,
0xd0000, 0xd2000, 0xd4000, 0xd6000,
- 0xd8000, 0xda000, 0xdc000, 0xde000,
- 0xe0000, 0xe2000, 0xe4000, 0xe6000,
+ 0xd8000, 0xda000, 0xdc000, 0xde000,
+ 0xe0000, 0xe2000, 0xe4000, 0xe6000,
0xe8000, 0xea000, 0xec000, 0xee000,
#endif /* CONFIG_MTD_DOCPROBE_HIGH */
-#elif defined(__ppc__)
+#elif defined(__PPC__)
0xe4000000,
#elif defined(CONFIG_MOMENCO_OCELOT)
0x2f000000,
-#else
+ 0xff000000,
+#elif defined(CONFIG_MOMENCO_OCELOT_G) || defined (CONFIG_MOMENCO_OCELOT_C)
+ 0xff000000,
+##else
#warning Unknown architecture for DiskOnChip. No default probe locations defined
#endif
- 0 };
+ 0xffffffff };
/* doccheck: Probe a given memory window to see if there's a DiskOnChip present */
-static inline int __init doccheck(unsigned long potential, unsigned long physadr)
+static inline int __init doccheck(void __iomem *potential, unsigned long physadr)
{
- unsigned long window=potential;
- unsigned char tmp, ChipID;
+ void __iomem *window=potential;
+ unsigned char tmp, tmpb, tmpc, ChipID;
#ifndef DOC_PASSIVE_PROBE
unsigned char tmp2;
#endif
@@ -112,50 +111,104 @@ static inline int __init doccheck(unsigned long potential, unsigned long physadr
return 0;
#endif /* CONFIG_MTD_DOCPROBE_55AA */
-#ifndef DOC_PASSIVE_PROBE
+#ifndef DOC_PASSIVE_PROBE
/* It's not possible to cleanly detect the DiskOnChip - the
* bootup procedure will put the device into reset mode, and
* it's not possible to talk to it without actually writing
* to the DOCControl register. So we store the current contents
* of the DOCControl register's location, in case we later decide
* that it's not a DiskOnChip, and want to put it back how we
- * found it.
+ * found it.
*/
tmp2 = ReadDOC(window, DOCControl);
-
+
/* Reset the DiskOnChip ASIC */
- WriteDOC(DOC_MODE_CLR_ERR | DOC_MODE_MDWREN | DOC_MODE_RESET,
+ WriteDOC(DOC_MODE_CLR_ERR | DOC_MODE_MDWREN | DOC_MODE_RESET,
window, DOCControl);
- WriteDOC(DOC_MODE_CLR_ERR | DOC_MODE_MDWREN | DOC_MODE_RESET,
+ WriteDOC(DOC_MODE_CLR_ERR | DOC_MODE_MDWREN | DOC_MODE_RESET,
window, DOCControl);
-
+
/* Enable the DiskOnChip ASIC */
- WriteDOC(DOC_MODE_CLR_ERR | DOC_MODE_MDWREN | DOC_MODE_NORMAL,
+ WriteDOC(DOC_MODE_CLR_ERR | DOC_MODE_MDWREN | DOC_MODE_NORMAL,
window, DOCControl);
- WriteDOC(DOC_MODE_CLR_ERR | DOC_MODE_MDWREN | DOC_MODE_NORMAL,
+ WriteDOC(DOC_MODE_CLR_ERR | DOC_MODE_MDWREN | DOC_MODE_NORMAL,
window, DOCControl);
-#endif /* !DOC_PASSIVE_PROBE */
+#endif /* !DOC_PASSIVE_PROBE */
+ /* We need to read the ChipID register four times. For some
+ newer DiskOnChip 2000 units, the first three reads will
+ return the DiskOnChip Millennium ident. Don't ask. */
ChipID = ReadDOC(window, ChipID);
-
+
switch (ChipID) {
case DOC_ChipID_Doc2k:
/* Check the TOGGLE bit in the ECC register */
- tmp = ReadDOC(window, 2k_ECCStatus) & DOC_TOGGLE_BIT;
- if ((ReadDOC(window, 2k_ECCStatus) & DOC_TOGGLE_BIT) != tmp)
+ tmp = ReadDOC(window, 2k_ECCStatus) & DOC_TOGGLE_BIT;
+ tmpb = ReadDOC(window, 2k_ECCStatus) & DOC_TOGGLE_BIT;
+ tmpc = ReadDOC(window, 2k_ECCStatus) & DOC_TOGGLE_BIT;
+ if (tmp != tmpb && tmp == tmpc)
return ChipID;
break;
-
+
case DOC_ChipID_DocMil:
+ /* Check for the new 2000 with Millennium ASIC */
+ ReadDOC(window, ChipID);
+ ReadDOC(window, ChipID);
+ if (ReadDOC(window, ChipID) != DOC_ChipID_DocMil)
+ ChipID = DOC_ChipID_Doc2kTSOP;
+
/* Check the TOGGLE bit in the ECC register */
- tmp = ReadDOC(window, ECCConf) & DOC_TOGGLE_BIT;
- if ((ReadDOC(window, ECCConf) & DOC_TOGGLE_BIT) != tmp)
+ tmp = ReadDOC(window, ECCConf) & DOC_TOGGLE_BIT;
+ tmpb = ReadDOC(window, ECCConf) & DOC_TOGGLE_BIT;
+ tmpc = ReadDOC(window, ECCConf) & DOC_TOGGLE_BIT;
+ if (tmp != tmpb && tmp == tmpc)
return ChipID;
break;
-
+
+ case DOC_ChipID_DocMilPlus16:
+ case DOC_ChipID_DocMilPlus32:
+ case 0:
+ /* Possible Millennium+, need to do more checks */
+#ifndef DOC_PASSIVE_PROBE
+ /* Possibly release from power down mode */
+ for (tmp = 0; (tmp < 4); tmp++)
+ ReadDOC(window, Mplus_Power);
+
+ /* Reset the DiskOnChip ASIC */
+ tmp = DOC_MODE_RESET | DOC_MODE_MDWREN | DOC_MODE_RST_LAT |
+ DOC_MODE_BDECT;
+ WriteDOC(tmp, window, Mplus_DOCControl);
+ WriteDOC(~tmp, window, Mplus_CtrlConfirm);
+
+ mdelay(1);
+ /* Enable the DiskOnChip ASIC */
+ tmp = DOC_MODE_NORMAL | DOC_MODE_MDWREN | DOC_MODE_RST_LAT |
+ DOC_MODE_BDECT;
+ WriteDOC(tmp, window, Mplus_DOCControl);
+ WriteDOC(~tmp, window, Mplus_CtrlConfirm);
+ mdelay(1);
+#endif /* !DOC_PASSIVE_PROBE */
+
+ ChipID = ReadDOC(window, ChipID);
+
+ switch (ChipID) {
+ case DOC_ChipID_DocMilPlus16:
+ case DOC_ChipID_DocMilPlus32:
+ /* Check the TOGGLE bit in the toggle register */
+ tmp = ReadDOC(window, Mplus_Toggle) & DOC_TOGGLE_BIT;
+ tmpb = ReadDOC(window, Mplus_Toggle) & DOC_TOGGLE_BIT;
+ tmpc = ReadDOC(window, Mplus_Toggle) & DOC_TOGGLE_BIT;
+ if (tmp != tmpb && tmp == tmpc)
+ return ChipID;
+ default:
+ break;
+ }
+ /* FALL TRHU */
+
default:
-#ifndef CONFIG_MTD_DOCPROBE_55AA
- printk(KERN_WARNING "Possible DiskOnChip with unknown ChipID %2.2X found at 0x%lx\n",
+
+#ifdef CONFIG_MTD_DOCPROBE_55AA
+ printk(KERN_DEBUG "Possible DiskOnChip with unknown ChipID %2.2X found at 0x%lx\n",
ChipID, physadr);
#endif
#ifndef DOC_PASSIVE_PROBE
@@ -174,13 +227,13 @@ static inline int __init doccheck(unsigned long potential, unsigned long physadr
WriteDOC(tmp2, window, DOCControl);
#endif
return 0;
-}
+}
static int docfound;
static void __init DoC_Probe(unsigned long physadr)
{
- unsigned long docptr;
+ void __iomem *docptr;
struct DiskOnChip *this;
struct mtd_info *mtd;
int ChipID;
@@ -190,23 +243,29 @@ static void __init DoC_Probe(unsigned long physadr)
char *im_modname = NULL;
void (*initroutine)(struct mtd_info *) = NULL;
- docptr = (unsigned long)ioremap(physadr, DOC_IOREMAP_LEN);
-
+ docptr = ioremap(physadr, DOC_IOREMAP_LEN);
+
if (!docptr)
return;
-
+
if ((ChipID = doccheck(docptr, physadr))) {
+ if (ChipID == DOC_ChipID_Doc2kTSOP) {
+ /* Remove this at your own peril. The hardware driver works but nothing prevents you from erasing bad blocks */
+ printk(KERN_NOTICE "Refusing to drive DiskOnChip 2000 TSOP until Bad Block Table is correctly supported by INFTL\n");
+ iounmap(docptr);
+ return;
+ }
docfound = 1;
mtd = kmalloc(sizeof(struct DiskOnChip) + sizeof(struct mtd_info), GFP_KERNEL);
if (!mtd) {
printk(KERN_WARNING "Cannot allocate memory for data structures. Dropping.\n");
- iounmap((void *)docptr);
+ iounmap(docptr);
return;
}
-
+
this = (struct DiskOnChip *)(&mtd[1]);
-
+
memset((char *)mtd,0, sizeof(struct mtd_info));
memset((char *)this, 0, sizeof(struct DiskOnChip));
@@ -217,12 +276,18 @@ static void __init DoC_Probe(unsigned long physadr)
sprintf(namebuf, "with ChipID %2.2X", ChipID);
switch(ChipID) {
+ case DOC_ChipID_Doc2kTSOP:
+ name="2000 TSOP";
+ im_funcname = "DoC2k_init";
+ im_modname = "doc2000";
+ break;
+
case DOC_ChipID_Doc2k:
name="2000";
im_funcname = "DoC2k_init";
im_modname = "doc2000";
break;
-
+
case DOC_ChipID_DocMil:
name="Millennium";
#ifdef DOC_SINGLE_DRIVER
@@ -233,6 +298,13 @@ static void __init DoC_Probe(unsigned long physadr)
im_modname = "doc2001";
#endif /* DOC_SINGLE_DRIVER */
break;
+
+ case DOC_ChipID_DocMilPlus16:
+ case DOC_ChipID_DocMilPlus32:
+ name="MillenniumPlus";
+ im_funcname = "DoCMilPlus_init";
+ im_modname = "doc2001plus";
+ break;
}
if (im_funcname)
@@ -244,8 +316,9 @@ static void __init DoC_Probe(unsigned long physadr)
return;
}
printk(KERN_NOTICE "Cannot find driver for DiskOnChip %s at 0x%lX\n", name, physadr);
+ kfree(mtd);
}
- iounmap((void *)docptr);
+ iounmap(docptr);
}
@@ -255,15 +328,15 @@ static void __init DoC_Probe(unsigned long physadr)
*
****************************************************************************/
-int __init init_doc(void)
+static int __init init_doc(void)
{
int i;
-
+
if (doc_config_location) {
printk(KERN_INFO "Using configured DiskOnChip probe address 0x%lx\n", doc_config_location);
DoC_Probe(doc_config_location);
} else {
- for (i=0; doc_locations[i]; i++) {
+ for (i=0; (doc_locations[i] != 0xffffffff); i++) {
DoC_Probe(doc_locations[i]);
}
}
@@ -271,11 +344,7 @@ int __init init_doc(void)
found, so the user knows we at least tried. */
if (!docfound)
printk(KERN_INFO "No recognised DiskOnChip devices found\n");
- /* So it looks like we've been used and we get unloaded */
- MOD_INC_USE_COUNT;
- MOD_DEC_USE_COUNT;
- return 0;
-
+ return -EAGAIN;
}
module_init(init_doc);
diff --git a/linux-2.4.x/drivers/mtd/devices/lart.c b/linux-2.4.x/drivers/mtd/devices/lart.c
index e049ace..0320e0f 100644
--- a/linux-2.4.x/drivers/mtd/devices/lart.c
+++ b/linux-2.4.x/drivers/mtd/devices/lart.c
@@ -2,7 +2,7 @@
/*
* MTD driver for the 28F160F3 Flash Memory (non-CFI) on LART.
*
- * $Id: lart.c,v 1.2 2001/10/02 15:05:13 dwmw2 Exp $
+ * $Id: lart.c,v 1.10 2006/03/29 08:31:11 dwmw2 Exp $
*
* Author: Abraham vd Merwe <abraham@2d3d.co.za>
*
@@ -42,8 +42,9 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/types.h>
-#include <linux/version.h>
+#include <linux/init.h>
#include <linux/errno.h>
+#include <linux/string.h>
#include <linux/mtd/mtd.h>
#ifdef HAVE_PARTITIONS
#include <linux/mtd/partitions.h>
@@ -121,7 +122,7 @@ static char module_name[] = "lart";
/*
* The data line mapping on LART is as follows:
- *
+ *
* U2 CPU | U3 CPU
* -------------------
* 0 20 | 0 12
@@ -180,7 +181,7 @@ static char module_name[] = "lart";
(((x) & 0x00004000) >> 13) \
)
-/*
+/*
* The address line mapping on LART is as follows:
*
* U3 CPU | U2 CPU
@@ -203,7 +204,7 @@ static char module_name[] = "lart";
* 12 15 | 12 15
* 13 14 | 13 14
* 14 16 | 14 16
- *
+ *
* MAIN BLOCK BOUNDARY
*
* 15 17 | 15 18
@@ -433,7 +434,7 @@ static int flash_erase (struct mtd_info *mtd,struct erase_info *instr)
}
instr->state = MTD_ERASE_DONE;
- if (instr->callback) instr->callback (instr);
+ mtd_erase_callback(instr);
return (0);
}
@@ -580,50 +581,43 @@ static int flash_write (struct mtd_info *mtd,loff_t to,size_t len,size_t *retlen
/***************************************************************************************************/
-#define NB_OF(x) (sizeof (x) / sizeof (x[0]))
-
static struct mtd_info mtd;
-static struct mtd_erase_region_info erase_regions[] =
-{
- /* parameter blocks */
- {
- offset: 0x00000000,
- erasesize: FLASH_BLOCKSIZE_PARAM,
- numblocks: FLASH_NUMBLOCKS_16m_PARAM
- },
- /* main blocks */
- {
- offset: FLASH_BLOCKSIZE_PARAM * FLASH_NUMBLOCKS_16m_PARAM,
- erasesize: FLASH_BLOCKSIZE_MAIN,
- numblocks: FLASH_NUMBLOCKS_16m_MAIN
- }
+static struct mtd_erase_region_info erase_regions[] = {
+ /* parameter blocks */
+ {
+ .offset = 0x00000000,
+ .erasesize = FLASH_BLOCKSIZE_PARAM,
+ .numblocks = FLASH_NUMBLOCKS_16m_PARAM,
+ },
+ /* main blocks */
+ {
+ .offset = FLASH_BLOCKSIZE_PARAM * FLASH_NUMBLOCKS_16m_PARAM,
+ .erasesize = FLASH_BLOCKSIZE_MAIN,
+ .numblocks = FLASH_NUMBLOCKS_16m_MAIN,
+ }
};
#ifdef HAVE_PARTITIONS
-static struct mtd_partition lart_partitions[] =
-{
- /* blob */
- {
- name: "blob",
- offset: BLOB_START,
- size: BLOB_LEN,
- mask_flags: 0
- },
- /* kernel */
- {
- name: "kernel",
- offset: KERNEL_START, /* MTDPART_OFS_APPEND */
- size: KERNEL_LEN,
- mask_flags: 0
- },
- /* initial ramdisk / file system */
- {
- name: "file system",
- offset: INITRD_START, /* MTDPART_OFS_APPEND */
- size: INITRD_LEN, /* MTDPART_SIZ_FULL */
- mask_flags: 0
- }
+static struct mtd_partition lart_partitions[] = {
+ /* blob */
+ {
+ .name = "blob",
+ .offset = BLOB_START,
+ .size = BLOB_LEN,
+ },
+ /* kernel */
+ {
+ .name = "kernel",
+ .offset = KERNEL_START, /* MTDPART_OFS_APPEND */
+ .size = KERNEL_LEN,
+ },
+ /* initial ramdisk / file system */
+ {
+ .name = "file system",
+ .offset = INITRD_START, /* MTDPART_OFS_APPEND */
+ .size = INITRD_LEN, /* MTDPART_SIZ_FULL */
+ }
};
#endif
@@ -644,12 +638,12 @@ int __init lart_flash_init (void)
mtd.flags = MTD_CAP_NORFLASH;
mtd.size = FLASH_BLOCKSIZE_PARAM * FLASH_NUMBLOCKS_16m_PARAM + FLASH_BLOCKSIZE_MAIN * FLASH_NUMBLOCKS_16m_MAIN;
mtd.erasesize = FLASH_BLOCKSIZE_MAIN;
- mtd.numeraseregions = NB_OF (erase_regions);
+ mtd.numeraseregions = ARRAY_SIZE(erase_regions);
mtd.eraseregions = erase_regions;
- mtd.module = THIS_MODULE;
mtd.erase = flash_erase;
mtd.read = flash_read;
mtd.write = flash_write;
+ mtd.owner = THIS_MODULE;
#ifdef LART_DEBUG
printk (KERN_DEBUG
@@ -674,9 +668,9 @@ int __init lart_flash_init (void)
result,mtd.eraseregions[result].numblocks);
#ifdef HAVE_PARTITIONS
- printk ("\npartitions = %d\n",NB_OF (lart_partitions));
+ printk ("\npartitions = %d\n", ARRAY_SIZE(lart_partitions));
- for (result = 0; result < NB_OF (lart_partitions); result++)
+ for (result = 0; result < ARRAY_SIZE(lart_partitions); result++)
printk (KERN_DEBUG
"\n\n"
"lart_partitions[%d].name = %s\n"
@@ -691,7 +685,7 @@ int __init lart_flash_init (void)
#ifndef HAVE_PARTITIONS
result = add_mtd_device (&mtd);
#else
- result = add_mtd_partitions (&mtd,lart_partitions,NB_OF (lart_partitions));
+ result = add_mtd_partitions (&mtd,lart_partitions, ARRAY_SIZE(lart_partitions));
#endif
return (result);
diff --git a/linux-2.4.x/drivers/mtd/devices/ms02-nv.c b/linux-2.4.x/drivers/mtd/devices/ms02-nv.c
index 47e8be3..306d735 100644
--- a/linux-2.4.x/drivers/mtd/devices/ms02-nv.c
+++ b/linux-2.4.x/drivers/mtd/devices/ms02-nv.c
@@ -1,10 +1,12 @@
/*
- * Copyright (c) 2001 Maciej W. Rozycki
+ * Copyright (c) 2001 Maciej W. Rozycki
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * $Id: ms02-nv.c,v 1.12 2006/03/29 08:31:11 dwmw2 Exp $
*/
#include <linux/init.h>
@@ -27,18 +29,18 @@
static char version[] __initdata =
- "ms02-nv.c: v.1.0.0 13 Aug 2001 Maciej W. Rozycki.\n";
+ "ms02-nv.c: v.1.0.0 13 Aug 2001 Maciej W. Rozycki.\n";
-MODULE_AUTHOR("Maciej W. Rozycki <macro@ds2.pg.gda.pl>");
+MODULE_AUTHOR("Maciej W. Rozycki <macro@linux-mips.org>");
MODULE_DESCRIPTION("DEC MS02-NV NVRAM module driver");
MODULE_LICENSE("GPL");
/*
* Addresses we probe for an MS02-NV at. Modules may be located
- * at any 8MB boundary within a 0MB up to 112MB range or at any 32MB
- * boundary within a 0MB up to 448MB range. We don't support a module
- * at 0MB, though.
+ * at any 8MiB boundary within a 0MiB up to 112MiB range or at any 32MiB
+ * boundary within a 0MiB up to 448MiB range. We don't support a module
+ * at 0MiB, though.
*/
static ulong ms02nv_addrs[] __initdata = {
0x07000000, 0x06800000, 0x06000000, 0x05800000, 0x05000000,
@@ -57,7 +59,7 @@ static struct mtd_info *root_ms02nv_mtd;
static int ms02nv_read(struct mtd_info *mtd, loff_t from,
size_t len, size_t *retlen, u_char *buf)
{
- struct ms02nv_private *mp = (struct ms02nv_private *)mtd->priv;
+ struct ms02nv_private *mp = mtd->priv;
if (from + len > mtd->size)
return -EINVAL;
@@ -71,7 +73,7 @@ static int ms02nv_read(struct mtd_info *mtd, loff_t from,
static int ms02nv_write(struct mtd_info *mtd, loff_t to,
size_t len, size_t *retlen, const u_char *buf)
{
- struct ms02nv_private *mp = (struct ms02nv_private *)mtd->priv;
+ struct ms02nv_private *mp = mtd->priv;
if (to + len > mtd->size)
return -EINVAL;
@@ -97,8 +99,8 @@ static inline uint ms02nv_probe_one(ulong addr)
* The firmware writes MS02NV_ID at MS02NV_MAGIC and also
* a diagnostic status at MS02NV_DIAG.
*/
- ms02nv_diagp = (ms02nv_uint *)(KSEG1ADDR(addr + MS02NV_DIAG));
- ms02nv_magicp = (ms02nv_uint *)(KSEG1ADDR(addr + MS02NV_MAGIC));
+ ms02nv_diagp = (ms02nv_uint *)(CKSEG1ADDR(addr + MS02NV_DIAG));
+ ms02nv_magicp = (ms02nv_uint *)(CKSEG1ADDR(addr + MS02NV_MAGIC));
err = get_dbe(ms02nv_magic, ms02nv_magicp);
if (err)
return 0;
@@ -128,7 +130,7 @@ static int __init ms02nv_init_one(ulong addr)
int ret = -ENODEV;
- /* The module decodes 8MB of address space. */
+ /* The module decodes 8MiB of address space. */
mod_res = kmalloc(sizeof(*mod_res), GFP_KERNEL);
if (!mod_res)
return -ENOMEM;
@@ -220,7 +222,7 @@ static int __init ms02nv_init_one(ulong addr)
mtd->flags = MTD_CAP_RAM | MTD_XIP;
mtd->size = fixsize;
mtd->name = (char *)ms02nv_name;
- mtd->module = THIS_MODULE;
+ mtd->owner = THIS_MODULE;
mtd->read = ms02nv_read;
mtd->write = ms02nv_write;
@@ -231,7 +233,7 @@ static int __init ms02nv_init_one(ulong addr)
goto err_out_csr_res;
}
- printk(KERN_INFO "mtd%d: %s at 0x%08lx, size %uMB.\n",
+ printk(KERN_INFO "mtd%d: %s at 0x%08lx, size %zuMiB.\n",
mtd->index, ms02nv_name, addr, size >> 20);
mp->next = root_ms02nv_mtd;
@@ -263,7 +265,7 @@ err_out_mod_res:
static void __exit ms02nv_remove_one(void)
{
struct mtd_info *mtd = root_ms02nv_mtd;
- struct ms02nv_private *mp = (struct ms02nv_private *)mtd->priv;
+ struct ms02nv_private *mp = mtd->priv;
root_ms02nv_mtd = mp->next;
@@ -291,12 +293,13 @@ static int __init ms02nv_init(void)
switch (mips_machtype) {
case MACH_DS5000_200:
- csr = (volatile u32 *)KN02_CSR_ADDR;
+ csr = (volatile u32 *)CKSEG1ADDR(KN02_SLOT_BASE + KN02_CSR);
if (*csr & KN02_CSR_BNK32M)
stride = 2;
break;
case MACH_DS5000_2X0:
- csr = (volatile u32 *)KN03_MCR_BASE;
+ case MACH_DS5900:
+ csr = (volatile u32 *)CKSEG1ADDR(KN03_SLOT_BASE + IOASIC_MCR);
if (*csr & KN03_MCR_BNK32M)
stride = 2;
break;
@@ -305,7 +308,7 @@ static int __init ms02nv_init(void)
break;
}
- for (i = 0; i < (sizeof(ms02nv_addrs) / sizeof(*ms02nv_addrs)); i++)
+ for (i = 0; i < ARRAY_SIZE(ms02nv_addrs); i++)
if (!ms02nv_init_one(ms02nv_addrs[i] << stride))
count++;
diff --git a/linux-2.4.x/drivers/mtd/devices/ms02-nv.h b/linux-2.4.x/drivers/mtd/devices/ms02-nv.h
index cd10b6b..8a6eef7 100644
--- a/linux-2.4.x/drivers/mtd/devices/ms02-nv.h
+++ b/linux-2.4.x/drivers/mtd/devices/ms02-nv.h
@@ -1,32 +1,96 @@
/*
- * Copyright (c) 2001 Maciej W. Rozycki
+ * Copyright (c) 2001, 2003 Maciej W. Rozycki
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
+ * DEC MS02-NV (54-20948-01) battery backed-up NVRAM module for
+ * DECstation/DECsystem 5000/2x0 and DECsystem 5900 and 5900/260
+ * systems.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * $Id: ms02-nv.h,v 1.3 2003/08/19 09:25:36 dwmw2 Exp $
*/
#include <linux/ioport.h>
#include <linux/mtd/mtd.h>
+/*
+ * Addresses are decoded as follows:
+ *
+ * 0x000000 - 0x3fffff SRAM
+ * 0x400000 - 0x7fffff CSR
+ *
+ * Within the SRAM area the following ranges are forced by the system
+ * firmware:
+ *
+ * 0x000000 - 0x0003ff diagnostic area, destroyed upon a reboot
+ * 0x000400 - ENDofRAM storage area, available to operating systems
+ *
+ * but we can't really use the available area right from 0x000400 as
+ * the first word is used by the firmware as a status flag passed
+ * from an operating system. If anything but the valid data magic
+ * ID value is found, the firmware considers the SRAM clean, i.e.
+ * containing no valid data, and disables the battery resulting in
+ * data being erased as soon as power is switched off. So the choice
+ * for the start address of the user-available is 0x001000 which is
+ * nicely page aligned. The area between 0x000404 and 0x000fff may
+ * be used by the driver for own needs.
+ *
+ * The diagnostic area defines two status words to be read by an
+ * operating system, a magic ID to distinguish a MS02-NV board from
+ * anything else and a status information providing results of tests
+ * as well as the size of SRAM available, which can be 1MiB or 2MiB
+ * (that's what the firmware handles; no idea if 2MiB modules ever
+ * existed).
+ *
+ * The firmware only handles the MS02-NV board if installed in the
+ * last (15th) slot, so for any other location the status information
+ * stored in the SRAM cannot be relied upon. But from the hardware
+ * point of view there is no problem using up to 14 such boards in a
+ * system -- only the 1st slot needs to be filled with a DRAM module.
+ * The MS02-NV board is ECC-protected, like other MS02 memory boards.
+ *
+ * The state of the battery as provided by the CSR is reflected on
+ * the two onboard LEDs. When facing the battery side of the board,
+ * with the LEDs at the top left and the battery at the bottom right
+ * (i.e. looking from the back side of the system box), their meaning
+ * is as follows (the system has to be powered on):
+ *
+ * left LED battery disable status: lit = enabled
+ * right LED battery condition status: lit = OK
+ */
+
/* MS02-NV iomem register offsets. */
#define MS02NV_CSR 0x400000 /* control & status register */
+/* MS02-NV CSR status bits. */
+#define MS02NV_CSR_BATT_OK 0x01 /* battery OK */
+#define MS02NV_CSR_BATT_OFF 0x02 /* battery disabled */
+
+
/* MS02-NV memory offsets. */
#define MS02NV_DIAG 0x0003f8 /* diagnostic status */
#define MS02NV_MAGIC 0x0003fc /* MS02-NV magic ID */
-#define MS02NV_RAM 0x000400 /* general-purpose RAM start */
+#define MS02NV_VALID 0x000400 /* valid data magic ID */
+#define MS02NV_RAM 0x001000 /* user-exposed RAM start */
-/* MS02-NV diagnostic status constants. */
-#define MS02NV_DIAG_SIZE_MASK 0xf0 /* RAM size mask */
-#define MS02NV_DIAG_SIZE_SHIFT 0x10 /* RAM size shift (left) */
+/* MS02-NV diagnostic status bits. */
+#define MS02NV_DIAG_TEST 0x01 /* SRAM test done (?) */
+#define MS02NV_DIAG_RO 0x02 /* SRAM r/o test done */
+#define MS02NV_DIAG_RW 0x04 /* SRAM r/w test done */
+#define MS02NV_DIAG_FAIL 0x08 /* SRAM test failed */
+#define MS02NV_DIAG_SIZE_MASK 0xf0 /* SRAM size mask */
+#define MS02NV_DIAG_SIZE_SHIFT 0x10 /* SRAM size shift (left) */
/* MS02-NV general constants. */
#define MS02NV_ID 0x03021966 /* MS02-NV magic ID value */
+#define MS02NV_VALID_ID 0xbd100248 /* valid data magic ID value */
#define MS02NV_SLOT_SIZE 0x800000 /* size of the address space
decoded by the module */
+
typedef volatile u32 ms02nv_uint;
struct ms02nv_private {
diff --git a/linux-2.4.x/drivers/mtd/devices/mtdram.c b/linux-2.4.x/drivers/mtd/devices/mtdram.c
index f105f7d..95a8b65 100644
--- a/linux-2.4.x/drivers/mtd/devices/mtdram.c
+++ b/linux-2.4.x/drivers/mtd/devices/mtdram.c
@@ -1,171 +1,154 @@
/*
* mtdram - a test mtd device
- * $Id: mtdram.c,v 1.26 2001/12/01 10:24:18 dwmw2 Exp $
+ * $Id: mtdram.c,v 1.38 2005/11/06 10:25:42 gleixner Exp $
* Author: Alexander Larsson <alex@cendio.se>
*
* Copyright (c) 1999 Alexander Larsson <alex@cendio.se>
+ * Copyright (c) 2005 Joern Engel <joern@wh.fh-wedel.de>
*
* This code is GPL
- *
*/
-
#include <linux/config.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/ioport.h>
+#include <linux/vmalloc.h>
+#include <linux/init.h>
#include <linux/mtd/compatmac.h>
#include <linux/mtd/mtd.h>
-#ifndef CONFIG_MTDRAM_ABS_POS
- #define CONFIG_MTDRAM_ABS_POS 0
-#endif
-
-#if CONFIG_MTDRAM_ABS_POS > 0
- #include <asm/io.h>
-#endif
-
-#ifdef MODULE
static unsigned long total_size = CONFIG_MTDRAM_TOTAL_SIZE;
static unsigned long erase_size = CONFIG_MTDRAM_ERASE_SIZE;
-MODULE_PARM(total_size,"l");
-MODULE_PARM(erase_size,"l");
#define MTDRAM_TOTAL_SIZE (total_size * 1024)
#define MTDRAM_ERASE_SIZE (erase_size * 1024)
-#else
-#define MTDRAM_TOTAL_SIZE (CONFIG_MTDRAM_TOTAL_SIZE * 1024)
-#define MTDRAM_ERASE_SIZE (CONFIG_MTDRAM_ERASE_SIZE * 1024)
-#endif
+#ifdef MODULE
+module_param(total_size, ulong, 0);
+MODULE_PARM_DESC(total_size, "Total device size in KiB");
+module_param(erase_size, ulong, 0);
+MODULE_PARM_DESC(erase_size, "Device erase block size in KiB");
+#endif
// We could store these in the mtd structure, but we only support 1 device..
static struct mtd_info *mtd_info;
-
-static int
-ram_erase(struct mtd_info *mtd, struct erase_info *instr)
+static int ram_erase(struct mtd_info *mtd, struct erase_info *instr)
{
- DEBUG(MTD_DEBUG_LEVEL2, "ram_erase(pos:%ld, len:%ld)\n", (long)instr->addr, (long)instr->len);
- if (instr->addr + instr->len > mtd->size) {
- DEBUG(MTD_DEBUG_LEVEL1, "ram_erase() out of bounds (%ld > %ld)\n", (long)(instr->addr + instr->len), (long)mtd->size);
- return -EINVAL;
- }
-
- memset((char *)mtd->priv + instr->addr, 0xff, instr->len);
-
- instr->state = MTD_ERASE_DONE;
-
- if (instr->callback)
- (*(instr->callback))(instr);
- return 0;
+ if (instr->addr + instr->len > mtd->size)
+ return -EINVAL;
+
+ memset((char *)mtd->priv + instr->addr, 0xff, instr->len);
+
+ instr->state = MTD_ERASE_DONE;
+ mtd_erase_callback(instr);
+
+ return 0;
}
-static int ram_point (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char **mtdbuf)
+static int ram_point(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, u_char **mtdbuf)
{
- if (from + len > mtd->size)
- return -EINVAL;
-
- *mtdbuf = mtd->priv + from;
- *retlen = len;
- return 0;
+ if (from + len > mtd->size)
+ return -EINVAL;
+
+ *mtdbuf = mtd->priv + from;
+ *retlen = len;
+ return 0;
}
-static void ram_unpoint (struct mtd_info *mtd, u_char *addr)
+static void ram_unpoint(struct mtd_info *mtd, u_char * addr, loff_t from,
+ size_t len)
{
- DEBUG(MTD_DEBUG_LEVEL2, "ram_unpoint\n");
}
static int ram_read(struct mtd_info *mtd, loff_t from, size_t len,
- size_t *retlen, u_char *buf)
+ size_t *retlen, u_char *buf)
{
- DEBUG(MTD_DEBUG_LEVEL2, "ram_read(pos:%ld, len:%ld)\n", (long)from, (long)len);
- if (from + len > mtd->size) {
- DEBUG(MTD_DEBUG_LEVEL1, "ram_read() out of bounds (%ld > %ld)\n", (long)(from + len), (long)mtd->size);
- return -EINVAL;
- }
+ if (from + len > mtd->size)
+ return -EINVAL;
- memcpy(buf, mtd->priv + from, len);
+ memcpy(buf, mtd->priv + from, len);
- *retlen=len;
- return 0;
+ *retlen = len;
+ return 0;
}
static int ram_write(struct mtd_info *mtd, loff_t to, size_t len,
- size_t *retlen, const u_char *buf)
+ size_t *retlen, const u_char *buf)
{
- DEBUG(MTD_DEBUG_LEVEL2, "ram_write(pos:%ld, len:%ld)\n", (long)to, (long)len);
- if (to + len > mtd->size) {
- DEBUG(MTD_DEBUG_LEVEL1, "ram_write() out of bounds (%ld > %ld)\n", (long)(to + len), (long)mtd->size);
- return -EINVAL;
- }
+ if (to + len > mtd->size)
+ return -EINVAL;
- memcpy ((char *)mtd->priv + to, buf, len);
+ memcpy((char *)mtd->priv + to, buf, len);
- *retlen=len;
- return 0;
+ *retlen = len;
+ return 0;
}
static void __exit cleanup_mtdram(void)
{
- if (mtd_info) {
- del_mtd_device(mtd_info);
- if (mtd_info->priv)
-#if CONFIG_MTDRAM_ABS_POS > 0
- iounmap(mtd_info->priv);
-#else
- vfree(mtd_info->priv);
-#endif
- kfree(mtd_info);
- }
+ if (mtd_info) {
+ del_mtd_device(mtd_info);
+ vfree(mtd_info->priv);
+ kfree(mtd_info);
+ }
}
-int __init init_mtdram(void)
+int mtdram_init_device(struct mtd_info *mtd, void *mapped_address,
+ unsigned long size, char *name)
{
- // Allocate some memory
- mtd_info = (struct mtd_info *)kmalloc(sizeof(struct mtd_info), GFP_KERNEL);
- if (!mtd_info)
- return -ENOMEM;
-
- memset(mtd_info, 0, sizeof(*mtd_info));
-
- // Setup the MTD structure
- mtd_info->name = "mtdram test device";
- mtd_info->type = MTD_RAM;
- mtd_info->flags = MTD_CAP_RAM;
- mtd_info->size = MTDRAM_TOTAL_SIZE;
- mtd_info->erasesize = MTDRAM_ERASE_SIZE;
-#if CONFIG_MTDRAM_ABS_POS > 0
- mtd_info->priv = ioremap(CONFIG_MTDRAM_ABS_POS, MTDRAM_TOTAL_SIZE);
-#else
- mtd_info->priv = vmalloc(MTDRAM_TOTAL_SIZE);
-#endif
+ memset(mtd, 0, sizeof(*mtd));
+
+ /* Setup the MTD structure */
+ mtd->name = name;
+ mtd->type = MTD_RAM;
+ mtd->flags = MTD_CAP_RAM;
+ mtd->size = size;
+ mtd->erasesize = MTDRAM_ERASE_SIZE;
+ mtd->priv = mapped_address;
+
+ mtd->owner = THIS_MODULE;
+ mtd->erase = ram_erase;
+ mtd->point = ram_point;
+ mtd->unpoint = ram_unpoint;
+ mtd->read = ram_read;
+ mtd->write = ram_write;
+
+ if (add_mtd_device(mtd)) {
+ return -EIO;
+ }
+
+ return 0;
+}
- if (!mtd_info->priv) {
- DEBUG(MTD_DEBUG_LEVEL1, "Failed to vmalloc(/ioremap) memory region of size %ld (ABS_POS:%ld)\n", (long)MTDRAM_TOTAL_SIZE, (long)CONFIG_MTDRAM_ABS_POS);
- kfree(mtd_info);
- mtd_info = NULL;
- return -ENOMEM;
- }
- memset(mtd_info->priv, 0xff, MTDRAM_TOTAL_SIZE);
-
- mtd_info->module = THIS_MODULE;
- mtd_info->erase = ram_erase;
- mtd_info->point = ram_point;
- mtd_info->unpoint = ram_unpoint;
- mtd_info->read = ram_read;
- mtd_info->write = ram_write;
-
- if (add_mtd_device(mtd_info)) {
-#if CONFIG_MTDRAM_ABS_POS > 0
- iounmap(mtd_info->priv);
-#else
- vfree(mtd_info->priv);
-#endif
- kfree(mtd_info);
- mtd_info = NULL;
- return -EIO;
- }
-
- return 0;
+static int __init init_mtdram(void)
+{
+ void *addr;
+ int err;
+
+ if (!total_size)
+ return -EINVAL;
+
+ /* Allocate some memory */
+ mtd_info = kmalloc(sizeof(struct mtd_info), GFP_KERNEL);
+ if (!mtd_info)
+ return -ENOMEM;
+
+ addr = vmalloc(MTDRAM_TOTAL_SIZE);
+ if (!addr) {
+ kfree(mtd_info);
+ mtd_info = NULL;
+ return -ENOMEM;
+ }
+ err = mtdram_init_device(mtd_info, addr, MTDRAM_TOTAL_SIZE, "mtdram test device");
+ if (err) {
+ vfree(addr);
+ kfree(mtd_info);
+ mtd_info = NULL;
+ return err;
+ }
+ memset(mtd_info->priv, 0xff, MTDRAM_TOTAL_SIZE);
+ return err;
}
module_init(init_mtdram);
@@ -174,4 +157,3 @@ module_exit(cleanup_mtdram);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Alexander Larsson <alexl@redhat.com>");
MODULE_DESCRIPTION("Simulated MTD driver for testing");
-
diff --git a/linux-2.4.x/drivers/mtd/devices/phram.c b/linux-2.4.x/drivers/mtd/devices/phram.c
new file mode 100644
index 0000000..e8685ee
--- /dev/null
+++ b/linux-2.4.x/drivers/mtd/devices/phram.c
@@ -0,0 +1,300 @@
+/**
+ * $Id: phram.c,v 1.16 2005/11/07 11:14:25 gleixner Exp $
+ *
+ * Copyright (c) ???? Jochen Schäuble <psionic@psionic.de>
+ * Copyright (c) 2003-2004 Jörn Engel <joern@wh.fh-wedel.de>
+ *
+ * Usage:
+ *
+ * one commend line parameter per device, each in the form:
+ * phram=<name>,<start>,<len>
+ * <name> may be up to 63 characters.
+ * <start> and <len> can be octal, decimal or hexadecimal. If followed
+ * by "ki", "Mi" or "Gi", the numbers will be interpreted as kilo, mega or
+ * gigabytes.
+ *
+ * Example:
+ * phram=swap,64Mi,128Mi phram=test,900Mi,1Mi
+ */
+#include <asm/io.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/slab.h>
+#include <linux/mtd/mtd.h>
+
+#define ERROR(fmt, args...) printk(KERN_ERR "phram: " fmt , ## args)
+
+struct phram_mtd_list {
+ struct mtd_info mtd;
+ struct list_head list;
+};
+
+static LIST_HEAD(phram_list);
+
+
+static int phram_erase(struct mtd_info *mtd, struct erase_info *instr)
+{
+ u_char *start = mtd->priv;
+
+ if (instr->addr + instr->len > mtd->size)
+ return -EINVAL;
+
+ memset(start + instr->addr, 0xff, instr->len);
+
+ /* This'll catch a few races. Free the thing before returning :)
+ * I don't feel at all ashamed. This kind of thing is possible anyway
+ * with flash, but unlikely.
+ */
+
+ instr->state = MTD_ERASE_DONE;
+
+ mtd_erase_callback(instr);
+
+ return 0;
+}
+
+static int phram_point(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, u_char **mtdbuf)
+{
+ u_char *start = mtd->priv;
+
+ if (from + len > mtd->size)
+ return -EINVAL;
+
+ *mtdbuf = start + from;
+ *retlen = len;
+ return 0;
+}
+
+static void phram_unpoint(struct mtd_info *mtd, u_char *addr, loff_t from,
+ size_t len)
+{
+}
+
+static int phram_read(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, u_char *buf)
+{
+ u_char *start = mtd->priv;
+
+ if (from >= mtd->size)
+ return -EINVAL;
+
+ if (len > mtd->size - from)
+ len = mtd->size - from;
+
+ memcpy(buf, start + from, len);
+
+ *retlen = len;
+ return 0;
+}
+
+static int phram_write(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t *retlen, const u_char *buf)
+{
+ u_char *start = mtd->priv;
+
+ if (to >= mtd->size)
+ return -EINVAL;
+
+ if (len > mtd->size - to)
+ len = mtd->size - to;
+
+ memcpy(start + to, buf, len);
+
+ *retlen = len;
+ return 0;
+}
+
+
+
+static void unregister_devices(void)
+{
+ struct phram_mtd_list *this, *safe;
+
+ list_for_each_entry_safe(this, safe, &phram_list, list) {
+ del_mtd_device(&this->mtd);
+ iounmap(this->mtd.priv);
+ kfree(this);
+ }
+}
+
+static int register_device(char *name, unsigned long start, unsigned long len)
+{
+ struct phram_mtd_list *new;
+ int ret = -ENOMEM;
+
+ new = kmalloc(sizeof(*new), GFP_KERNEL);
+ if (!new)
+ goto out0;
+
+ memset(new, 0, sizeof(*new));
+
+ ret = -EIO;
+ new->mtd.priv = ioremap(start, len);
+ if (!new->mtd.priv) {
+ ERROR("ioremap failed\n");
+ goto out1;
+ }
+
+
+ new->mtd.name = name;
+ new->mtd.size = len;
+ new->mtd.flags = MTD_CAP_RAM | MTD_ERASEABLE | MTD_VOLATILE;
+ new->mtd.erase = phram_erase;
+ new->mtd.point = phram_point;
+ new->mtd.unpoint = phram_unpoint;
+ new->mtd.read = phram_read;
+ new->mtd.write = phram_write;
+ new->mtd.owner = THIS_MODULE;
+ new->mtd.type = MTD_RAM;
+ new->mtd.erasesize = PAGE_SIZE;
+
+ ret = -EAGAIN;
+ if (add_mtd_device(&new->mtd)) {
+ ERROR("Failed to register new device\n");
+ goto out2;
+ }
+
+ list_add_tail(&new->list, &phram_list);
+ return 0;
+
+out2:
+ iounmap(new->mtd.priv);
+out1:
+ kfree(new);
+out0:
+ return ret;
+}
+
+static int ustrtoul(const char *cp, char **endp, unsigned int base)
+{
+ unsigned long result = simple_strtoul(cp, endp, base);
+
+ switch (**endp) {
+ case 'G':
+ result *= 1024;
+ case 'M':
+ result *= 1024;
+ case 'k':
+ result *= 1024;
+ /* By dwmw2 editorial decree, "ki", "Mi" or "Gi" are to be used. */
+ if ((*endp)[1] == 'i')
+ (*endp) += 2;
+ }
+ return result;
+}
+
+static int parse_num32(uint32_t *num32, const char *token)
+{
+ char *endp;
+ unsigned long n;
+
+ n = ustrtoul(token, &endp, 0);
+ if (*endp)
+ return -EINVAL;
+
+ *num32 = n;
+ return 0;
+}
+
+static int parse_name(char **pname, const char *token)
+{
+ size_t len;
+ char *name;
+
+ len = strlen(token) + 1;
+ if (len > 64)
+ return -ENOSPC;
+
+ name = kmalloc(len, GFP_KERNEL);
+ if (!name)
+ return -ENOMEM;
+
+ strcpy(name, token);
+
+ *pname = name;
+ return 0;
+}
+
+
+static inline void kill_final_newline(char *str)
+{
+ char *newline = strrchr(str, '\n');
+ if (newline && !newline[1])
+ *newline = 0;
+}
+
+
+#define parse_err(fmt, args...) do { \
+ ERROR(fmt , ## args); \
+ return 0; \
+} while (0)
+
+static int phram_setup(const char *val, struct kernel_param *kp)
+{
+ char buf[64+12+12], *str = buf;
+ char *token[3];
+ char *name;
+ uint32_t start;
+ uint32_t len;
+ int i, ret;
+
+ if (strnlen(val, sizeof(buf)) >= sizeof(buf))
+ parse_err("parameter too long\n");
+
+ strcpy(str, val);
+ kill_final_newline(str);
+
+ for (i=0; i<3; i++)
+ token[i] = strsep(&str, ",");
+
+ if (str)
+ parse_err("too many arguments\n");
+
+ if (!token[2])
+ parse_err("not enough arguments\n");
+
+ ret = parse_name(&name, token[0]);
+ if (ret == -ENOMEM)
+ parse_err("out of memory\n");
+ if (ret == -ENOSPC)
+ parse_err("name too long\n");
+ if (ret)
+ return 0;
+
+ ret = parse_num32(&start, token[1]);
+ if (ret)
+ parse_err("illegal start address\n");
+
+ ret = parse_num32(&len, token[2]);
+ if (ret)
+ parse_err("illegal device length\n");
+
+ register_device(name, start, len);
+
+ return 0;
+}
+
+module_param_call(phram, phram_setup, NULL, NULL, 000);
+MODULE_PARM_DESC(phram,"Memory region to map. \"map=<name>,<start>,<length>\"");
+
+
+static int __init init_phram(void)
+{
+ return 0;
+}
+
+static void __exit cleanup_phram(void)
+{
+ unregister_devices();
+}
+
+module_init(init_phram);
+module_exit(cleanup_phram);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jörn Engel <joern@wh.fh-wedel.de>");
+MODULE_DESCRIPTION("MTD driver for physical RAM");
diff --git a/linux-2.4.x/drivers/mtd/devices/pmc551.c b/linux-2.4.x/drivers/mtd/devices/pmc551.c
index 80afcc6..8d577c8 100644
--- a/linux-2.4.x/drivers/mtd/devices/pmc551.c
+++ b/linux-2.4.x/drivers/mtd/devices/pmc551.c
@@ -1,5 +1,5 @@
/*
- * $Id: pmc551.c,v 1.20 2002/03/05 13:47:08 dwmw2 Exp $
+ * $Id: pmc551.c,v 1.33 2005/11/29 20:01:27 gleixner Exp $
*
* PMC551 PCI Mezzanine Ram Device
*
@@ -27,7 +27,7 @@
* it as high speed swap or for a high speed disk device of some
* sort. Which becomes very useful on diskless systems in the
* embedded market I might add.
- *
+ *
* Notes:
* Due to what I assume is more buggy SROM, the 64M PMC551 I
* have available claims that all 4 of it's DRAM banks have 64M
@@ -63,10 +63,10 @@
* Minyard set up the card to utilize a 1M sliding apature.
*
* Corey Minyard <minyard@nortelnetworks.com>
- * * Modified driver to utilize a sliding aperture instead of
+ * * Modified driver to utilize a sliding aperture instead of
* mapping all memory into kernel space which turned out to
* be very wasteful.
- * * Located a bug in the SROM's initialization sequence that
+ * * Located a bug in the SROM's initialization sequence that
* made the memory unusable, added a fix to code to touch up
* the DRAM some.
*
@@ -98,8 +98,6 @@
#include <linux/ioctl.h>
#include <asm/io.h>
#include <asm/system.h>
-#include <asm/segment.h>
-#include <stdarg.h>
#include <linux/pci.h>
#ifndef CONFIG_PCI
@@ -110,17 +108,11 @@
#include <linux/mtd/pmc551.h>
#include <linux/mtd/compatmac.h>
-#if LINUX_VERSION_CODE > 0x20300
-#define PCI_BASE_ADDRESS(dev) (dev->resource[0].start)
-#else
-#define PCI_BASE_ADDRESS(dev) (dev->base_address[0])
-#endif
-
static struct mtd_info *pmc551list;
static int pmc551_erase (struct mtd_info *mtd, struct erase_info *instr)
{
- struct mypriv *priv = (struct mypriv *)mtd->priv;
+ struct mypriv *priv = mtd->priv;
u32 soff_hi, soff_lo; /* start address offset hi/lo */
u32 eoff_hi, eoff_lo; /* end address offset hi/lo */
unsigned long end;
@@ -176,16 +168,14 @@ out:
printk(KERN_DEBUG "pmc551_erase() done\n");
#endif
- if (instr->callback) {
- (*(instr->callback))(instr);
- }
+ mtd_erase_callback(instr);
return 0;
}
static int pmc551_point (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char **mtdbuf)
{
- struct mypriv *priv = (struct mypriv *)mtd->priv;
+ struct mypriv *priv = mtd->priv;
u32 soff_hi;
u32 soff_lo;
@@ -216,7 +206,7 @@ static int pmc551_point (struct mtd_info *mtd, loff_t from, size_t len, size_t *
}
-static void pmc551_unpoint (struct mtd_info *mtd, u_char *addr)
+static void pmc551_unpoint (struct mtd_info *mtd, u_char *addr, loff_t from, size_t len)
{
#ifdef CONFIG_MTD_PMC551_DEBUG
printk(KERN_DEBUG "pmc551_unpoint()\n");
@@ -226,7 +216,7 @@ static void pmc551_unpoint (struct mtd_info *mtd, u_char *addr)
static int pmc551_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
{
- struct mypriv *priv = (struct mypriv *)mtd->priv;
+ struct mypriv *priv = mtd->priv;
u32 soff_hi, soff_lo; /* start address offset hi/lo */
u32 eoff_hi, eoff_lo; /* end address offset hi/lo */
unsigned long end;
@@ -288,7 +278,7 @@ out:
static int pmc551_write (struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, const u_char *buf)
{
- struct mypriv *priv = (struct mypriv *)mtd->priv;
+ struct mypriv *priv = mtd->priv;
u32 soff_hi, soff_lo; /* start address offset hi/lo */
u32 eoff_hi, eoff_lo; /* end address offset hi/lo */
unsigned long end;
@@ -399,7 +389,7 @@ static u32 fixup_pmc551 (struct pci_dev *dev)
bcmd |= (0x40|0x20);
pci_write_config_byte(dev, PMC551_SYS_CTRL_REG, bcmd);
- /*
+ /*
* Take care and turn off the memory on the device while we
* tweak the configurations
*/
@@ -417,7 +407,7 @@ static u32 fixup_pmc551 (struct pci_dev *dev)
* Grab old BAR0 config so that we can figure out memory size
* This is another bit of kludge going on. The reason for the
* redundancy is I am hoping to retain the original configuration
- * previously assigned to the card by the BIOS or some previous
+ * previously assigned to the card by the BIOS or some previous
* fixup routine in the kernel. So we read the old config into cfg,
* then write all 1's to the memory space, read back the result into
* "size", and then write back all the old config.
@@ -489,7 +479,7 @@ static u32 fixup_pmc551 (struct pci_dev *dev)
} while ( (PCI_COMMAND_IO) & cmd );
/*
- * Turn on auto refresh
+ * Turn on auto refresh
* The loop is taken directly from Ramix's example code. I assume that
* this must be held high for some duration of time, but I can find no
* documentation refrencing the reasons why.
@@ -565,7 +555,7 @@ static u32 fixup_pmc551 (struct pci_dev *dev)
(size<1024)?size:(size<1048576)?size>>10:size>>20,
(size<1024)?'B':(size<1048576)?'K':'M',
size, ((dcmd&(0x1<<3)) == 0)?"non-":"",
- PCI_BASE_ADDRESS(dev)&PCI_BASE_ADDRESS_MEM_MASK );
+ (dev->resource[0].start)&PCI_BASE_ADDRESS_MEM_MASK );
/*
* Check to see the state of the memory
@@ -624,7 +614,7 @@ static u32 fixup_pmc551 (struct pci_dev *dev)
pci_read_config_byte(dev, PMC551_SYS_CTRL_REG, &bcmd );
printk( KERN_DEBUG "pmc551: EEPROM is under %s control\n"
"pmc551: System Control Register is %slocked to PCI access\n"
- "pmc551: System Control Register is %slocked to EEPROM access\n",
+ "pmc551: System Control Register is %slocked to EEPROM access\n",
(bcmd&0x1)?"software":"hardware",
(bcmd&0x20)?"":"un", (bcmd&0x40)?"":"un");
#endif
@@ -639,10 +629,6 @@ static u32 fixup_pmc551 (struct pci_dev *dev)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Mark Ferrell <mferrell@mvista.com>");
MODULE_DESCRIPTION(PMC551_VERSION);
-MODULE_PARM(msize, "i");
-MODULE_PARM_DESC(msize, "memory size in Megabytes [1 - 1024]");
-MODULE_PARM(asize, "i");
-MODULE_PARM_DESC(asize, "aperture size, must be <= memsize [1-1024]");
/*
* Stuff these outside the ifdef so as to not bust compiled in driver support
@@ -654,10 +640,15 @@ static int asize=CONFIG_MTD_PMC551_APERTURE_SIZE
static int asize=0;
#endif
+module_param(msize, int, 0);
+MODULE_PARM_DESC(msize, "memory size in Megabytes [1 - 1024]");
+module_param(asize, int, 0);
+MODULE_PARM_DESC(asize, "aperture size, must be <= memsize [1-1024]");
+
/*
* PMC551 Card Initialization
*/
-int __init init_pmc551(void)
+static int __init init_pmc551(void)
{
struct pci_dev *PCI_Device = NULL;
struct mypriv *priv;
@@ -683,11 +674,6 @@ int __init init_pmc551(void)
printk(KERN_INFO PMC551_VERSION);
- if(!pci_present()) {
- printk(KERN_NOTICE "pmc551: PCI not enabled.\n");
- return -ENODEV;
- }
-
/*
* PCU-bus chipset probe.
*/
@@ -700,7 +686,7 @@ int __init init_pmc551(void)
}
printk(KERN_NOTICE "pmc551: Found PCI V370PDC at 0x%lX\n",
- PCI_BASE_ADDRESS(PCI_Device));
+ PCI_Device->resource[0].start);
/*
* The PMC551 device acts VERY weird if you don't init it
@@ -754,10 +740,10 @@ int __init init_pmc551(void)
printk(KERN_NOTICE "pmc551: Using specified aperture size %dM\n", asize>>20);
priv->asize = asize;
}
- priv->start = ioremap((PCI_BASE_ADDRESS(PCI_Device)
+ priv->start = ioremap(((PCI_Device->resource[0].start)
& PCI_BASE_ADDRESS_MEM_MASK),
priv->asize);
-
+
if (!priv->start) {
printk(KERN_NOTICE "pmc551: Unable to map IO space\n");
kfree(mtd->priv);
@@ -778,7 +764,7 @@ int __init init_pmc551(void)
priv->curr_map0 );
#ifdef CONFIG_MTD_PMC551_DEBUG
- printk( KERN_DEBUG "pmc551: aperture set to %d\n",
+ printk( KERN_DEBUG "pmc551: aperture set to %d\n",
(priv->base_map0 & 0xF0)>>4 );
#endif
@@ -789,10 +775,10 @@ int __init init_pmc551(void)
mtd->write = pmc551_write;
mtd->point = pmc551_point;
mtd->unpoint = pmc551_unpoint;
- mtd->module = THIS_MODULE;
mtd->type = MTD_RAM;
mtd->name = "PMC551 RAM board";
mtd->erasesize = 0x10000;
+ mtd->owner = THIS_MODULE;
if (add_mtd_device(mtd)) {
printk(KERN_NOTICE "pmc551: Failed to register new device\n");
@@ -834,15 +820,15 @@ static void __exit cleanup_pmc551(void)
struct mypriv *priv;
while((mtd=pmc551list)) {
- priv = (struct mypriv *)mtd->priv;
+ priv = mtd->priv;
pmc551list = priv->nextpmc551;
-
+
if(priv->start) {
printk (KERN_DEBUG "pmc551: unmapping %dM starting at 0x%p\n",
priv->asize>>20, priv->start);
iounmap (priv->start);
}
-
+
kfree (mtd->priv);
del_mtd_device (mtd);
kfree (mtd);
diff --git a/linux-2.4.x/drivers/mtd/devices/ramtd.c b/linux-2.4.x/drivers/mtd/devices/ramtd.c
new file mode 100644
index 0000000..882ff33
--- /dev/null
+++ b/linux-2.4.x/drivers/mtd/devices/ramtd.c
@@ -0,0 +1,249 @@
+/**
+ * Simple mtd driver that dynamically allocates/frees memory
+ *
+ * $Id: ramtd.c,v 1.7 2006/03/08 17:48:28 joern Exp $
+ *
+ * Copyright (c) 2005 Joern Engel <joern@wh.fh-wedel.de>
+ * Copyright (c) 2006 iSteve <isteve@bofh.cz>
+ * - Added module parameters, immediate allocation and fixed few leaks.
+ */
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/mtd/mtd.h>
+#include <linux/vmalloc.h>
+
+struct ramtd {
+ struct mtd_info mtd;
+ struct list_head list;
+ u32 size;
+ void *page[];
+};
+
+static LIST_HEAD(ramtd_list);
+static DECLARE_MUTEX(ramtd_mutex);
+
+static unsigned long ramtd_size = 4*1024*1024;
+static int ramtd_now = 0;
+
+module_param(ramtd_size, ulong, 0);
+MODULE_PARM_DESC(ramtd_size, "Total device size in bytes");
+
+module_param(ramtd_now, bool, 0);
+MODULE_PARM_DESC(ramtd_now, "Allocate all memory when loaded");
+
+
+static void *get_pool_page(void)
+{
+ void *ret = (void*)__get_free_page(GFP_KERNEL);
+ return ret;
+}
+
+
+static void free_pool_page(void *page)
+{
+ free_page((unsigned long)page);
+}
+
+static void free_all_pages(struct ramtd *this)
+{
+ u32 page;
+
+ for (page = 0; page < this->mtd.size / PAGE_SIZE; page++) {
+ if (this->page[page])
+ free_pool_page(this->page[page]);
+ }
+}
+
+static int alloc_all_pages(struct ramtd *this)
+{
+ u32 page;
+
+ if (!ramtd_now)
+ return 0;
+
+ for (page = 0; page < this->mtd.size / PAGE_SIZE; page++) {
+ if (this->page[page])
+ continue;
+
+ this->page[page] = get_pool_page();
+ if (!this->page[page])
+ return -ENOMEM;
+
+ memset(this->page[page], 0xff, PAGE_SIZE);
+ }
+
+ return 0;
+}
+
+
+static int ramtd_erase(struct mtd_info *mtd, struct erase_info *instr)
+{
+ struct ramtd *ramtd = container_of(mtd, typeof(*ramtd), mtd);
+ u32 addr = instr->addr;
+ u32 len = instr->len;
+
+ if (addr + len > mtd->size)
+ return -EINVAL;
+ if (addr % PAGE_SIZE)
+ return -EINVAL;
+ if (len % PAGE_SIZE)
+ return -EINVAL;
+
+ while (len) {
+ u32 page = addr / PAGE_SIZE;
+
+ down_interruptible(&ramtd_mutex);
+ free_pool_page(ramtd->page[page]);
+ ramtd->page[page] = NULL;
+ up(&ramtd_mutex);
+
+ addr += PAGE_SIZE;
+ len -= PAGE_SIZE;
+ }
+ instr->state = MTD_ERASE_DONE;
+ mtd_erase_callback(instr);
+
+ return 0;
+}
+
+
+static int ramtd_read(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, u_char *buf)
+{
+ struct ramtd *ramtd = container_of(mtd, typeof(*ramtd), mtd);
+
+ if (from >= mtd->size)
+ return -EINVAL;
+
+ if (len > mtd->size - from)
+ len = mtd->size - from;
+
+ *retlen = 0;
+ while (len) {
+ u32 page = from / PAGE_SIZE;
+ u32 ofs = from % PAGE_SIZE;
+ u32 rlen = min_t(u32, len, PAGE_SIZE - ofs);
+
+ down_interruptible(&ramtd_mutex);
+ if (!ramtd->page[page])
+ memset(buf, 0xff, rlen);
+ else
+ memcpy(buf, ramtd->page[page] + ofs, rlen);
+ up(&ramtd_mutex);
+
+ buf += rlen;
+ from += rlen;
+ *retlen += rlen;
+ len -= rlen;
+ }
+ return 0;
+}
+
+
+static int ramtd_write(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t *retlen, const u_char *buf)
+{
+ struct ramtd *ramtd = container_of(mtd, typeof(*ramtd), mtd);
+
+ if (to >= mtd->size)
+ return -EINVAL;
+
+ if (len > mtd->size - to)
+ len = mtd->size - to;
+
+ *retlen = 0;
+ while (len) {
+ u32 page = to / PAGE_SIZE;
+ u32 ofs = to % PAGE_SIZE;
+ u32 wlen = min_t(u32, len, PAGE_SIZE - ofs);
+
+ down_interruptible(&ramtd_mutex);
+ if (!ramtd->page[page]) {
+ ramtd->page[page] = get_pool_page();
+ memset(ramtd->page[page], 0xff, PAGE_SIZE);
+ }
+ if (!ramtd->page[page])
+ return -EIO;
+
+ memcpy(ramtd->page[page] + ofs, buf, wlen);
+ up(&ramtd_mutex);
+
+ buf += wlen;
+ to += wlen;
+ *retlen += wlen;
+ len -= wlen;
+ }
+ return 0;
+}
+
+
+static int register_device(const char *name, u32 size)
+{
+ struct ramtd *new;
+ u32 pages_size;
+ int err;
+
+ size = PAGE_ALIGN(size);
+ pages_size = size / PAGE_SIZE * sizeof(void*);
+ new = vmalloc(sizeof(*new) + pages_size);
+ if (!new)
+ return -ENOMEM;
+ memset(new, 0, sizeof(*new) + pages_size);
+
+ new->mtd.name = (char*)name;
+ new->mtd.size = size;
+ new->mtd.flags = MTD_CAP_RAM | MTD_ERASEABLE | MTD_VOLATILE;
+ new->mtd.owner = THIS_MODULE;
+ new->mtd.type = MTD_RAM;
+ new->mtd.erasesize = PAGE_SIZE;
+ new->mtd.write = ramtd_write;
+ new->mtd.read = ramtd_read;
+ new->mtd.erase = ramtd_erase;
+
+ if (add_mtd_device(&new->mtd)) {
+ free_pool_page(new);
+ return -EAGAIN;
+ }
+
+ err = alloc_all_pages(new);
+ if (err) {
+ free_all_pages(new);
+ vfree(new);
+ return err;
+ }
+
+ down_interruptible(&ramtd_mutex);
+ list_add(&new->list, &ramtd_list);
+ up(&ramtd_mutex);
+ return 0;
+}
+
+
+static int __init ramtd_init(void)
+{
+ return register_device("ramtd", ramtd_size); /* FIXME */
+}
+
+static void __exit ramtd_exit(void)
+{
+ struct ramtd *this, *next;
+
+ down_interruptible(&ramtd_mutex);
+ list_for_each_entry_safe(this, next, &ramtd_list, list) {
+ free_all_pages(this);
+ del_mtd_device(&this->mtd);
+ vfree(this);
+ }
+ up(&ramtd_mutex);
+}
+
+
+module_init(ramtd_init);
+module_exit(ramtd_exit);
+
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Joern Engel <joern@wh.fh-wedel.de>");
+MODULE_AUTHOR("iSteve <isteve@bofh.cz>");
+MODULE_DESCRIPTION("MTD using dynamic memory allocation");
diff --git a/linux-2.4.x/drivers/mtd/devices/slram.c b/linux-2.4.x/drivers/mtd/devices/slram.c
index 6220e73..6faee6c 100644
--- a/linux-2.4.x/drivers/mtd/devices/slram.c
+++ b/linux-2.4.x/drivers/mtd/devices/slram.c
@@ -1,6 +1,32 @@
/*======================================================================
- $Id: slram.c,v 1.25 2001/10/02 15:05:13 dwmw2 Exp $
+ $Id: slram.c,v 1.36 2005/11/07 11:14:25 gleixner Exp $
+
+ This driver provides a method to access memory not used by the kernel
+ itself (i.e. if the kernel commandline mem=xxx is used). To actually
+ use slram at least mtdblock or mtdchar is required (for block or
+ character device access).
+
+ Usage:
+
+ if compiled as loadable module:
+ modprobe slram map=<name>,<start>,<end/offset>
+ if statically linked into the kernel use the following kernel cmd.line
+ slram=<name>,<start>,<end/offset>
+
+ <name>: name of the device that will be listed in /proc/mtd
+ <start>: start of the memory region, decimal or hex (0xabcdef)
+ <end/offset>: end of the memory region. It's possible to use +0x1234
+ to specify the offset instead of the absolute address
+
+ NOTE:
+ With slram it's only possible to map a contigous memory region. Therfore
+ if there's a device mapped somewhere in the region specified slram will
+ fail to load (see kernel log if modprobe fails).
+
+ -
+
+ Jochen Schaeuble <psionic@psionic.de>
======================================================================*/
@@ -20,12 +46,11 @@
#include <linux/init.h>
#include <asm/io.h>
#include <asm/system.h>
-#include <asm/segment.h>
-#include <stdarg.h>
#include <linux/mtd/mtd.h>
#define SLRAM_MAX_DEVICES_PARAMS 6 /* 3 parameters / device */
+#define SLRAM_BLK_SZ 0x4000
#define T(fmt, args...) printk(KERN_DEBUG fmt, ## args)
#define E(fmt, args...) printk(KERN_NOTICE fmt, ## args)
@@ -42,77 +67,84 @@ typedef struct slram_mtd_list {
#ifdef MODULE
static char *map[SLRAM_MAX_DEVICES_PARAMS];
+
+module_param_array(map, charp, NULL, 0);
+MODULE_PARM_DESC(map, "List of memory regions to map. \"map=<name>, <start>, <length / end>\"");
#else
static char *map;
#endif
-MODULE_PARM(map, "3-" __MODULE_STRING(SLRAM_MAX_DEVICES_PARAMS) "s");
-MODULE_PARM_DESC(map, "List of memory regions to map. \"map=<name>, <start>, <length / end>\"");
-
static slram_mtd_list_t *slram_mtdlist = NULL;
-int slram_erase(struct mtd_info *, struct erase_info *);
-int slram_point(struct mtd_info *, loff_t, size_t, size_t *, u_char **);
-void slram_unpoint(struct mtd_info *, u_char *);
-int slram_read(struct mtd_info *, loff_t, size_t, size_t *, u_char *);
-int slram_write(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
+static int slram_erase(struct mtd_info *, struct erase_info *);
+static int slram_point(struct mtd_info *, loff_t, size_t, size_t *, u_char **);
+static void slram_unpoint(struct mtd_info *, u_char *, loff_t, size_t);
+static int slram_read(struct mtd_info *, loff_t, size_t, size_t *, u_char *);
+static int slram_write(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
-int slram_erase(struct mtd_info *mtd, struct erase_info *instr)
+static int slram_erase(struct mtd_info *mtd, struct erase_info *instr)
{
slram_priv_t *priv = mtd->priv;
if (instr->addr + instr->len > mtd->size) {
return(-EINVAL);
}
-
+
memset(priv->start + instr->addr, 0xff, instr->len);
- /* This'll catch a few races. Free the thing before returning :)
+ /* This'll catch a few races. Free the thing before returning :)
* I don't feel at all ashamed. This kind of thing is possible anyway
* with flash, but unlikely.
*/
instr->state = MTD_ERASE_DONE;
- if (instr->callback) {
- (*(instr->callback))(instr);
- }
- else {
- kfree(instr);
- }
+ mtd_erase_callback(instr);
return(0);
}
-int slram_point(struct mtd_info *mtd, loff_t from, size_t len,
+static int slram_point(struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, u_char **mtdbuf)
{
- slram_priv_t *priv = (slram_priv_t *)mtd->priv;
+ slram_priv_t *priv = mtd->priv;
+
+ if (from + len > mtd->size)
+ return -EINVAL;
*mtdbuf = priv->start + from;
*retlen = len;
return(0);
}
-void slram_unpoint(struct mtd_info *mtd, u_char *addr)
+static void slram_unpoint(struct mtd_info *mtd, u_char *addr, loff_t from, size_t len)
{
}
-int slram_read(struct mtd_info *mtd, loff_t from, size_t len,
+static int slram_read(struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, u_char *buf)
{
- slram_priv_t *priv = (slram_priv_t *)mtd->priv;
-
+ slram_priv_t *priv = mtd->priv;
+
+ if (from > mtd->size)
+ return -EINVAL;
+
+ if (from + len > mtd->size)
+ len = mtd->size - from;
+
memcpy(buf, priv->start + from, len);
*retlen = len;
return(0);
}
-int slram_write(struct mtd_info *mtd, loff_t to, size_t len,
+static int slram_write(struct mtd_info *mtd, loff_t to, size_t len,
size_t *retlen, const u_char *buf)
{
- slram_priv_t *priv = (slram_priv_t *)mtd->priv;
+ slram_priv_t *priv = mtd->priv;
+
+ if (to + len > mtd->size)
+ return -EINVAL;
memcpy(priv->start + to, buf, len);
@@ -122,7 +154,7 @@ int slram_write(struct mtd_info *mtd, loff_t to, size_t len,
/*====================================================================*/
-int register_device(char *name, unsigned long start, unsigned long length)
+static int register_device(char *name, unsigned long start, unsigned long length)
{
slram_mtd_list_t **curmtd;
@@ -138,12 +170,12 @@ int register_device(char *name, unsigned long start, unsigned long length)
}
(*curmtd)->mtdinfo = kmalloc(sizeof(struct mtd_info), GFP_KERNEL);
(*curmtd)->next = NULL;
-
+
if ((*curmtd)->mtdinfo) {
memset((char *)(*curmtd)->mtdinfo, 0, sizeof(struct mtd_info));
(*curmtd)->mtdinfo->priv =
- (void *)kmalloc(sizeof(slram_priv_t), GFP_KERNEL);
-
+ kmalloc(sizeof(slram_priv_t), GFP_KERNEL);
+
if (!(*curmtd)->mtdinfo->priv) {
kfree((*curmtd)->mtdinfo);
(*curmtd)->mtdinfo = NULL;
@@ -156,7 +188,7 @@ int register_device(char *name, unsigned long start, unsigned long length)
E("slram: Cannot allocate new MTD device.\n");
return(-ENOMEM);
}
-
+
if (!(((slram_priv_t *)(*curmtd)->mtdinfo->priv)->start =
ioremap(start, length))) {
E("slram: ioremap failed\n");
@@ -169,15 +201,15 @@ int register_device(char *name, unsigned long start, unsigned long length)
(*curmtd)->mtdinfo->name = name;
(*curmtd)->mtdinfo->size = length;
(*curmtd)->mtdinfo->flags = MTD_CLEAR_BITS | MTD_SET_BITS |
- MTD_WRITEB_WRITEABLE | MTD_VOLATILE;
+ MTD_WRITEB_WRITEABLE | MTD_VOLATILE | MTD_CAP_RAM;
(*curmtd)->mtdinfo->erase = slram_erase;
(*curmtd)->mtdinfo->point = slram_point;
(*curmtd)->mtdinfo->unpoint = slram_unpoint;
(*curmtd)->mtdinfo->read = slram_read;
(*curmtd)->mtdinfo->write = slram_write;
- (*curmtd)->mtdinfo->module = THIS_MODULE;
+ (*curmtd)->mtdinfo->owner = THIS_MODULE;
(*curmtd)->mtdinfo->type = MTD_RAM;
- (*curmtd)->mtdinfo->erasesize = 0x10000;
+ (*curmtd)->mtdinfo->erasesize = SLRAM_BLK_SZ;
if (add_mtd_device((*curmtd)->mtdinfo)) {
E("slram: Failed to register new device\n");
@@ -191,10 +223,10 @@ int register_device(char *name, unsigned long start, unsigned long length)
T("slram: Mapped from 0x%p to 0x%p\n",
((slram_priv_t *)(*curmtd)->mtdinfo->priv)->start,
((slram_priv_t *)(*curmtd)->mtdinfo->priv)->end);
- return(0);
+ return(0);
}
-void unregister_devices(void)
+static void unregister_devices(void)
{
slram_mtd_list_t *nextitem;
@@ -209,7 +241,7 @@ void unregister_devices(void)
}
}
-unsigned long handle_unit(unsigned long value, char *unit)
+static unsigned long handle_unit(unsigned long value, char *unit)
{
if ((*unit == 'M') || (*unit == 'm')) {
return(value * 1024 * 1024);
@@ -219,12 +251,12 @@ unsigned long handle_unit(unsigned long value, char *unit)
return(value);
}
-int parse_cmdline(char *devname, char *szstart, char *szlength)
+static int parse_cmdline(char *devname, char *szstart, char *szlength)
{
char *buffer;
unsigned long devstart;
unsigned long devlength;
-
+
if ((!devname) || (!szstart) || (!szlength)) {
unregister_devices();
return(-EINVAL);
@@ -232,7 +264,7 @@ int parse_cmdline(char *devname, char *szstart, char *szlength)
devstart = simple_strtoul(szstart, &buffer, 0);
devstart = handle_unit(devstart, buffer);
-
+
if (*(szlength) != '+') {
devlength = simple_strtoul(szlength, &buffer, 0);
devlength = handle_unit(devlength, buffer) - devstart;
@@ -242,11 +274,11 @@ int parse_cmdline(char *devname, char *szstart, char *szlength)
}
T("slram: devname=%s, devstart=0x%lx, devlength=0x%lx\n",
devname, devstart, devlength);
- if ((devstart < 0) || (devlength < 0)) {
+ if ((devstart < 0) || (devlength < 0) || (devlength % SLRAM_BLK_SZ != 0)) {
E("slram: Illegal start / length parameter.\n");
return(-EINVAL);
}
-
+
if ((devstart = register_device(devname, devstart, devlength))){
unregister_devices();
return((int)devstart);
@@ -266,7 +298,7 @@ __setup("slram=", mtd_slram_setup);
#endif
-int init_slram(void)
+static int init_slram(void)
{
char *devname;
int i;
@@ -303,7 +335,7 @@ int init_slram(void)
}
#else
int count;
-
+
for (count = 0; (map[count]) && (count < SLRAM_MAX_DEVICES_PARAMS);
count++) {
}
@@ -318,10 +350,10 @@ int init_slram(void)
if (parse_cmdline(devname, map[i * 3 + 1], map[i * 3 + 2])!=0) {
return(-EINVAL);
}
-
+
}
#endif /* !MODULE */
-
+
return(0);
}
diff --git a/linux-2.4.x/drivers/mtd/ftl.c b/linux-2.4.x/drivers/mtd/ftl.c
index de429d8..8a878b3 100644
--- a/linux-2.4.x/drivers/mtd/ftl.c
+++ b/linux-2.4.x/drivers/mtd/ftl.c
@@ -1,5 +1,5 @@
/* This version ported to the Linux-MTD system by dwmw2@infradead.org
- * $Id: ftl.c,v 1.43 2002/02/13 15:31:37 dwmw2 Exp $
+ * $Id: ftl.c,v 1.59 2005/11/29 14:48:31 gleixner Exp $
*
* Fixes: Arnaldo Carvalho de Melo <acme@conectiva.com.br>
* - fixes some leaks on failure in build_maps and ftl_notify_add, cleanups
@@ -53,10 +53,10 @@
Use of the FTL format for non-PCMCIA applications may be an
infringement of these patents. For additional information,
contact M-Systems (http://www.m-sys.com) directly.
-
+
======================================================================*/
+#include <linux/mtd/blktrans.h>
#include <linux/module.h>
-#include <linux/mtd/compatmac.h>
#include <linux/mtd/mtd.h>
/*#define PSYCHO_DEBUG */
@@ -68,50 +68,19 @@
#include <linux/timer.h>
#include <linux/major.h>
#include <linux/fs.h>
-#include <linux/ioctl.h>
+#include <linux/init.h>
#include <linux/hdreg.h>
-#include <stdarg.h>
-
-#if (LINUX_VERSION_CODE >= 0x20100)
#include <linux/vmalloc.h>
-#endif
-#if (LINUX_VERSION_CODE >= 0x20303)
#include <linux/blkpg.h>
-#endif
+#include <asm/uaccess.h>
#include <linux/mtd/ftl.h>
-/*====================================================================*/
-/* Stuff which really ought to be in compatmac.h */
-
-#if (LINUX_VERSION_CODE < 0x20328)
-#define register_disk(dev, drive, minors, ops, size) \
- do { (dev)->part[(drive)*(minors)].nr_sects = size; \
- if (size == 0) (dev)->part[(drive)*(minors)].start_sect = -1; \
- resetup_one_dev(dev, drive); } while (0)
-#endif
-
-#if (LINUX_VERSION_CODE < 0x20320)
-#define BLK_DEFAULT_QUEUE(n) blk_dev[n].request_fn
-#define blk_init_queue(q, req) q = (req)
-#define blk_cleanup_queue(q) q = NULL
-#define request_arg_t void
-#else
-#define request_arg_t request_queue_t *q
-#endif
-
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,14)
-#define BLK_INC_USE_COUNT MOD_INC_USE_COUNT
-#define BLK_DEC_USE_COUNT MOD_DEC_USE_COUNT
-#else
-#define BLK_INC_USE_COUNT do {} while(0)
-#define BLK_DEC_USE_COUNT do {} while(0)
-#endif
/*====================================================================*/
/* Parameters that can be set with 'insmod' */
static int shuffle_freq = 50;
-MODULE_PARM(shuffle_freq, "i");
+module_param(shuffle_freq, int, 0);
/*====================================================================*/
@@ -120,19 +89,6 @@ MODULE_PARM(shuffle_freq, "i");
#define FTL_MAJOR 44
#endif
-/* Funky stuff for setting up a block device */
-#define MAJOR_NR FTL_MAJOR
-#define DEVICE_NAME "ftl"
-#define DEVICE_REQUEST do_ftl_request
-#define DEVICE_ON(device)
-#define DEVICE_OFF(device)
-
-#define DEVICE_NR(minor) ((minor)>>5)
-#define REGION_NR(minor) (((minor)>>3)&3)
-#define PART_NR(minor) ((minor)&7)
-#define MINOR_NR(dev,reg,part) (((dev)<<5)+((reg)<<3)+(part))
-
-#include <linux/blk.h>
/*====================================================================*/
@@ -143,8 +99,7 @@ MODULE_PARM(shuffle_freq, "i");
#define MAX_REGION 4
/* Maximum number of partitions in an FTL region */
-#define PART_BITS 3
-#define MAX_PART 8
+#define PART_BITS 4
/* Maximum number of outstanding erase requests per socket */
#define MAX_ERASE 8
@@ -155,7 +110,7 @@ MODULE_PARM(shuffle_freq, "i");
/* Each memory region corresponds to a minor device */
typedef struct partition_t {
- struct mtd_info *mtd;
+ struct mtd_blktrans_dev mbd;
u_int32_t state;
u_int32_t *VirtualBlockMap;
u_int32_t *VirtualPageMap;
@@ -180,21 +135,10 @@ typedef struct partition_t {
region_info_t region;
memory_handle_t handle;
#endif
- atomic_t open;
} partition_t;
-partition_t *myparts[MAX_MTD_DEVICES];
-
-static void ftl_notify_add(struct mtd_info *mtd);
-static void ftl_notify_remove(struct mtd_info *mtd);
-
void ftl_freepart(partition_t *part);
-static struct mtd_notifier ftl_notifier = {
- add: ftl_notify_add,
- remove: ftl_notify_remove,
-};
-
/* Partition state flags */
#define FTL_FORMATTED 0x01
@@ -205,77 +149,38 @@ static struct mtd_notifier ftl_notifier = {
#define XFER_PREPARED 0x03
#define XFER_FAILED 0x04
-static struct hd_struct ftl_hd[MINOR_NR(MAX_DEV, 0, 0)];
-static int ftl_sizes[MINOR_NR(MAX_DEV, 0, 0)];
-static int ftl_blocksizes[MINOR_NR(MAX_DEV, 0, 0)];
-
-static struct gendisk ftl_gendisk = {
- major: FTL_MAJOR,
- major_name: "ftl",
- minor_shift: PART_BITS,
- max_p: MAX_PART,
-#if (LINUX_VERSION_CODE < 0x20328)
- max_nr: MAX_DEV*MAX_PART,
-#endif
- part: ftl_hd,
- sizes: ftl_sizes,
-};
-
/*====================================================================*/
-static int ftl_ioctl(struct inode *inode, struct file *file,
- u_int cmd, u_long arg);
-static int ftl_open(struct inode *inode, struct file *file);
-static release_t ftl_close(struct inode *inode, struct file *file);
-static int ftl_reread_partitions(int minor);
static void ftl_erase_callback(struct erase_info *done);
-#if LINUX_VERSION_CODE < 0x20326
-static struct file_operations ftl_blk_fops = {
- open: ftl_open,
- release: ftl_close,
- ioctl: ftl_ioctl,
- read: block_read,
- write: block_write,
- fsync: block_fsync
-};
-#else
-static struct block_device_operations ftl_blk_fops = {
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,14)
- owner: THIS_MODULE,
-#endif
- open: ftl_open,
- release: ftl_close,
- ioctl: ftl_ioctl,
-};
-#endif
/*======================================================================
Scan_header() checks to see if a memory region contains an FTL
partition. build_maps() reads all the erase unit headers, builds
the erase unit map, and then builds the virtual page map.
-
+
======================================================================*/
static int scan_header(partition_t *part)
{
erase_unit_header_t header;
loff_t offset, max_offset;
- int ret;
+ size_t ret;
+ int err;
part->header.FormattedSize = 0;
- max_offset = (0x100000<part->mtd->size)?0x100000:part->mtd->size;
+ max_offset = (0x100000<part->mbd.mtd->size)?0x100000:part->mbd.mtd->size;
/* Search first megabyte for a valid FTL header */
for (offset = 0;
(offset + sizeof(header)) < max_offset;
- offset += part->mtd->erasesize ? : 0x2000) {
+ offset += part->mbd.mtd->erasesize ? : 0x2000) {
- ret = part->mtd->read(part->mtd, offset, sizeof(header), &ret,
+ err = part->mbd.mtd->read(part->mbd.mtd, offset, sizeof(header), &ret,
(unsigned char *)&header);
-
- if (ret)
- return ret;
+
+ if (err)
+ return err;
if (strcmp(header.DataOrgTuple+3, "FTL100") == 0) break;
}
@@ -284,15 +189,15 @@ static int scan_header(partition_t *part)
printk(KERN_NOTICE "ftl_cs: FTL header not found.\n");
return -ENOENT;
}
- if ((le16_to_cpu(header.NumEraseUnits) > 65536) || header.BlockSize != 9 ||
+ if (header.BlockSize != 9 ||
(header.EraseUnitSize < 10) || (header.EraseUnitSize > 31) ||
(header.NumTransferUnits >= le16_to_cpu(header.NumEraseUnits))) {
printk(KERN_NOTICE "ftl_cs: FTL header corrupt!\n");
return -1;
}
- if ((1 << header.EraseUnitSize) != part->mtd->erasesize) {
+ if ((1 << header.EraseUnitSize) != part->mbd.mtd->erasesize) {
printk(KERN_NOTICE "ftl: FTL EraseUnitSize %x != MTD erasesize %x\n",
- 1 << header.EraseUnitSize,part->mtd->erasesize);
+ 1 << header.EraseUnitSize,part->mbd.mtd->erasesize);
return -1;
}
part->header = header;
@@ -327,10 +232,10 @@ static int build_maps(partition_t *part)
for (i = 0; i < le16_to_cpu(part->header.NumEraseUnits); i++) {
offset = ((i + le16_to_cpu(part->header.FirstPhysicalEUN))
<< part->header.EraseUnitSize);
- ret = part->mtd->read(part->mtd, offset, sizeof(header), &retval,
+ ret = part->mbd.mtd->read(part->mbd.mtd, offset, sizeof(header), &retval,
(unsigned char *)&header);
-
- if (ret)
+
+ if (ret)
goto out_XferInfo;
ret = -1;
@@ -369,7 +274,7 @@ static int build_maps(partition_t *part)
"don't add up!\n");
goto out_XferInfo;
}
-
+
/* Set up virtual page map */
blocks = le32_to_cpu(header.FormattedSize) >> header.BlockSize;
part->VirtualBlockMap = vmalloc(blocks * sizeof(u_int32_t));
@@ -391,12 +296,12 @@ static int build_maps(partition_t *part)
part->EUNInfo[i].Free = 0;
part->EUNInfo[i].Deleted = 0;
offset = part->EUNInfo[i].Offset + le32_to_cpu(header.BAMOffset);
-
- ret = part->mtd->read(part->mtd, offset,
- part->BlocksPerUnit * sizeof(u_int32_t), &retval,
+
+ ret = part->mbd.mtd->read(part->mbd.mtd, offset,
+ part->BlocksPerUnit * sizeof(u_int32_t), &retval,
(unsigned char *)part->bam_cache);
-
- if (ret)
+
+ if (ret)
goto out_bam_cache;
for (j = 0; j < part->BlocksPerUnit; j++) {
@@ -411,7 +316,7 @@ static int build_maps(partition_t *part)
part->EUNInfo[i].Deleted++;
}
}
-
+
ret = 0;
goto out;
@@ -431,7 +336,7 @@ out:
Erase_xfer() schedules an asynchronous erase operation for a
transfer unit.
-
+
======================================================================*/
static int erase_xfer(partition_t *part,
@@ -446,18 +351,19 @@ static int erase_xfer(partition_t *part,
xfer->state = XFER_ERASING;
/* Is there a free erase slot? Always in MTD. */
-
-
+
+
erase=kmalloc(sizeof(struct erase_info), GFP_KERNEL);
- if (!erase)
+ if (!erase)
return -ENOMEM;
+ erase->mtd = part->mbd.mtd;
erase->callback = ftl_erase_callback;
erase->addr = xfer->Offset;
erase->len = 1 << part->header.EraseUnitSize;
erase->priv = (u_long)part;
-
- ret = part->mtd->erase(part->mtd, erase);
+
+ ret = part->mbd.mtd->erase(part->mbd.mtd, erase);
if (!ret)
xfer->EraseCount++;
@@ -471,7 +377,7 @@ static int erase_xfer(partition_t *part,
Prepare_xfer() takes a freshly erased transfer unit and gives
it an appropriate header.
-
+
======================================================================*/
static void ftl_erase_callback(struct erase_info *erase)
@@ -479,7 +385,7 @@ static void ftl_erase_callback(struct erase_info *erase)
partition_t *part;
struct xfer_info_t *xfer;
int i;
-
+
/* Look up the transfer unit */
part = (partition_t *)(erase->priv);
@@ -516,7 +422,7 @@ static int prepare_xfer(partition_t *part, int i)
xfer = &part->XferInfo[i];
xfer->state = XFER_FAILED;
-
+
DEBUG(1, "ftl_cs: preparing xfer unit at 0x%x\n", xfer->Offset);
/* Write the transfer unit header */
@@ -524,7 +430,7 @@ static int prepare_xfer(partition_t *part, int i)
header.LogicalEUN = cpu_to_le16(0xffff);
header.EraseCount = cpu_to_le32(xfer->EraseCount);
- ret = part->mtd->write(part->mtd, xfer->Offset, sizeof(header),
+ ret = part->mbd.mtd->write(part->mbd.mtd, xfer->Offset, sizeof(header),
&retlen, (u_char *)&header);
if (ret) {
@@ -540,7 +446,7 @@ static int prepare_xfer(partition_t *part, int i)
for (i = 0; i < nbam; i++, offset += sizeof(u_int32_t)) {
- ret = part->mtd->write(part->mtd, offset, sizeof(u_int32_t),
+ ret = part->mbd.mtd->write(part->mbd.mtd, offset, sizeof(u_int32_t),
&retlen, (u_char *)&ctl);
if (ret)
@@ -548,7 +454,7 @@ static int prepare_xfer(partition_t *part, int i)
}
xfer->state = XFER_PREPARED;
return 0;
-
+
} /* prepare_xfer */
/*======================================================================
@@ -560,7 +466,7 @@ static int prepare_xfer(partition_t *part, int i)
All data blocks are copied to the corresponding blocks in the
target unit, so the virtual block map does not need to be
updated.
-
+
======================================================================*/
static int copy_erase_unit(partition_t *part, u_int16_t srcunit,
@@ -580,14 +486,14 @@ static int copy_erase_unit(partition_t *part, u_int16_t srcunit,
xfer = &part->XferInfo[xferunit];
DEBUG(2, "ftl_cs: copying block 0x%x to 0x%x\n",
eun->Offset, xfer->Offset);
-
-
+
+
/* Read current BAM */
if (part->bam_index != srcunit) {
offset = eun->Offset + le32_to_cpu(part->header.BAMOffset);
- ret = part->mtd->read(part->mtd, offset,
+ ret = part->mbd.mtd->read(part->mbd.mtd, offset,
part->BlocksPerUnit * sizeof(u_int32_t),
&retlen, (u_char *) (part->bam_cache));
@@ -595,24 +501,24 @@ static int copy_erase_unit(partition_t *part, u_int16_t srcunit,
part->bam_index = 0xffff;
if (ret) {
- printk( KERN_WARNING "ftl: Failed to read BAM cache in copy_erase_unit()!\n");
+ printk( KERN_WARNING "ftl: Failed to read BAM cache in copy_erase_unit()!\n");
return ret;
}
}
-
+
/* Write the LogicalEUN for the transfer unit */
xfer->state = XFER_UNKNOWN;
offset = xfer->Offset + 20; /* Bad! */
unit = cpu_to_le16(0x7fff);
- ret = part->mtd->write(part->mtd, offset, sizeof(u_int16_t),
+ ret = part->mbd.mtd->write(part->mbd.mtd, offset, sizeof(u_int16_t),
&retlen, (u_char *) &unit);
-
+
if (ret) {
printk( KERN_WARNING "ftl: Failed to write back to BAM cache in copy_erase_unit()!\n");
return ret;
}
-
+
/* Copy all data blocks from source unit to transfer unit */
src = eun->Offset; dest = xfer->Offset;
@@ -625,7 +531,7 @@ static int copy_erase_unit(partition_t *part, u_int16_t srcunit,
break;
case BLOCK_DATA:
case BLOCK_REPLACEMENT:
- ret = part->mtd->read(part->mtd, src, SECTOR_SIZE,
+ ret = part->mbd.mtd->read(part->mbd.mtd, src, SECTOR_SIZE,
&retlen, (u_char *) buf);
if (ret) {
printk(KERN_WARNING "ftl: Error reading old xfer unit in copy_erase_unit\n");
@@ -633,7 +539,7 @@ static int copy_erase_unit(partition_t *part, u_int16_t srcunit,
}
- ret = part->mtd->write(part->mtd, dest, SECTOR_SIZE,
+ ret = part->mbd.mtd->write(part->mbd.mtd, dest, SECTOR_SIZE,
&retlen, (u_char *) buf);
if (ret) {
printk(KERN_WARNING "ftl: Error writing new xfer unit in copy_erase_unit\n");
@@ -652,25 +558,25 @@ static int copy_erase_unit(partition_t *part, u_int16_t srcunit,
}
/* Write the BAM to the transfer unit */
- ret = part->mtd->write(part->mtd, xfer->Offset + le32_to_cpu(part->header.BAMOffset),
- part->BlocksPerUnit * sizeof(int32_t), &retlen,
+ ret = part->mbd.mtd->write(part->mbd.mtd, xfer->Offset + le32_to_cpu(part->header.BAMOffset),
+ part->BlocksPerUnit * sizeof(int32_t), &retlen,
(u_char *)part->bam_cache);
if (ret) {
printk( KERN_WARNING "ftl: Error writing BAM in copy_erase_unit\n");
return ret;
}
-
+
/* All clear? Then update the LogicalEUN again */
- ret = part->mtd->write(part->mtd, xfer->Offset + 20, sizeof(u_int16_t),
+ ret = part->mbd.mtd->write(part->mbd.mtd, xfer->Offset + 20, sizeof(u_int16_t),
&retlen, (u_char *)&srcunitswap);
if (ret) {
printk(KERN_WARNING "ftl: Error writing new LogicalEUN in copy_erase_unit\n");
return ret;
- }
-
-
+ }
+
+
/* Update the maps and usage stats*/
i = xfer->EraseCount;
xfer->EraseCount = eun->EraseCount;
@@ -682,10 +588,10 @@ static int copy_erase_unit(partition_t *part, u_int16_t srcunit,
part->FreeTotal += free;
eun->Free = free;
eun->Deleted = 0;
-
+
/* Now, the cache should be valid for the new block */
part->bam_index = srcunit;
-
+
return 0;
} /* copy_erase_unit */
@@ -702,7 +608,7 @@ static int copy_erase_unit(partition_t *part, u_int16_t srcunit,
oldest data unit instead. This means that we generally postpone
the next reclaimation as long as possible, but shuffle static
stuff around a bit for wear leveling.
-
+
======================================================================*/
static int reclaim_block(partition_t *part)
@@ -750,8 +656,8 @@ static int reclaim_block(partition_t *part)
if (queued) {
DEBUG(1, "ftl_cs: waiting for transfer "
"unit to be prepared...\n");
- if (part->mtd->sync)
- part->mtd->sync(part->mtd);
+ if (part->mbd.mtd->sync)
+ part->mbd.mtd->sync(part->mbd.mtd);
} else {
static int ne = 0;
if (++ne < 5)
@@ -760,7 +666,7 @@ static int reclaim_block(partition_t *part)
else
DEBUG(1, "ftl_cs: reclaim failed: no "
"suitable transfer units!\n");
-
+
return -EIO;
}
}
@@ -809,7 +715,7 @@ static int reclaim_block(partition_t *part)
returns the block index -- the erase unit is just the currently
cached unit. If there are no free blocks, it returns 0 -- this
is never a valid data block because it contains the header.
-
+
======================================================================*/
#ifdef PSYCHO_DEBUG
@@ -831,7 +737,7 @@ static u_int32_t find_free(partition_t *part)
u_int32_t blk;
size_t retlen;
int ret;
-
+
/* Find an erase unit with some free space */
stop = (part->bam_index == 0xffff) ? 0 : part->bam_index;
eun = stop;
@@ -843,17 +749,17 @@ static u_int32_t find_free(partition_t *part)
if (part->EUNInfo[eun].Free == 0)
return 0;
-
+
/* Is this unit's BAM cached? */
if (eun != part->bam_index) {
/* Invalidate cache */
part->bam_index = 0xffff;
- ret = part->mtd->read(part->mtd,
+ ret = part->mbd.mtd->read(part->mbd.mtd,
part->EUNInfo[eun].Offset + le32_to_cpu(part->header.BAMOffset),
part->BlocksPerUnit * sizeof(u_int32_t),
&retlen, (u_char *) (part->bam_cache));
-
+
if (ret) {
printk(KERN_WARNING"ftl: Error reading BAM in find_free\n");
return 0;
@@ -875,86 +781,14 @@ static u_int32_t find_free(partition_t *part)
}
DEBUG(2, "ftl_cs: found free block at %d in %d\n", blk, eun);
return blk;
-
-} /* find_free */
-
-/*======================================================================
-
- This gets a memory handle for the region corresponding to the
- minor device number.
-
-======================================================================*/
-
-static int ftl_open(struct inode *inode, struct file *file)
-{
- int minor = MINOR(inode->i_rdev);
- partition_t *partition;
-
- if (minor>>4 >= MAX_MTD_DEVICES)
- return -ENODEV;
-
- partition = myparts[minor>>4];
-
- if (!partition)
- return -ENODEV;
-
- if (partition->state != FTL_FORMATTED)
- return -ENXIO;
-
- if (ftl_gendisk.part[minor].nr_sects == 0)
- return -ENXIO;
-
- BLK_INC_USE_COUNT;
-
- if (!get_mtd_device(partition->mtd, -1)) {
- BLK_DEC_USE_COUNT;
- return -ENXIO;
- }
-
- if ((file->f_mode & 2) && !(partition->mtd->flags & MTD_CLEAR_BITS) ) {
- put_mtd_device(partition->mtd);
- BLK_DEC_USE_COUNT;
- return -EROFS;
- }
-
- DEBUG(0, "ftl_cs: ftl_open(%d)\n", minor);
-
- atomic_inc(&partition->open);
-
- return 0;
-}
-
-/*====================================================================*/
-
-static release_t ftl_close(struct inode *inode, struct file *file)
-{
- int minor = MINOR(inode->i_rdev);
- partition_t *part = myparts[minor >> 4];
- int i;
-
- DEBUG(0, "ftl_cs: ftl_close(%d)\n", minor);
-
- /* Wait for any pending erase operations to complete */
- if (part->mtd->sync)
- part->mtd->sync(part->mtd);
-
- for (i = 0; i < part->header.NumTransferUnits; i++) {
- if (part->XferInfo[i].state == XFER_ERASED)
- prepare_xfer(part, i);
- }
-
- atomic_dec(&part->open);
- put_mtd_device(part->mtd);
- BLK_DEC_USE_COUNT;
- release_return(0);
-} /* ftl_close */
+} /* find_free */
/*======================================================================
Read a series of sectors from an FTL partition.
-
+
======================================================================*/
static int ftl_read(partition_t *part, caddr_t buffer,
@@ -964,7 +798,7 @@ static int ftl_read(partition_t *part, caddr_t buffer,
u_long i;
int ret;
size_t offset, retlen;
-
+
DEBUG(2, "ftl_cs: ftl_read(0x%p, 0x%lx, %ld)\n",
part, sector, nblocks);
if (!(part->state & FTL_FORMATTED)) {
@@ -984,7 +818,7 @@ static int ftl_read(partition_t *part, caddr_t buffer,
else {
offset = (part->EUNInfo[log_addr / bsize].Offset
+ (log_addr % bsize));
- ret = part->mtd->read(part->mtd, offset, SECTOR_SIZE,
+ ret = part->mbd.mtd->read(part->mbd.mtd, offset, SECTOR_SIZE,
&retlen, (u_char *) buffer);
if (ret) {
@@ -1000,7 +834,7 @@ static int ftl_read(partition_t *part, caddr_t buffer,
/*======================================================================
Write a series of sectors to an FTL partition
-
+
======================================================================*/
static int set_bam_entry(partition_t *part, u_int32_t log_addr,
@@ -1021,9 +855,9 @@ static int set_bam_entry(partition_t *part, u_int32_t log_addr,
blk = (log_addr % bsize) / SECTOR_SIZE;
offset = (part->EUNInfo[eun].Offset + blk * sizeof(u_int32_t) +
le32_to_cpu(part->header.BAMOffset));
-
+
#ifdef PSYCHO_DEBUG
- ret = part->mtd->read(part->mtd, offset, sizeof(u_int32_t),
+ ret = part->mbd.mtd->read(part->mbd.mtd, offset, sizeof(u_int32_t),
&retlen, (u_char *)&old_addr);
if (ret) {
printk(KERN_WARNING"ftl: Error reading old_addr in set_bam_entry: %d\n",ret);
@@ -1060,7 +894,7 @@ static int set_bam_entry(partition_t *part, u_int32_t log_addr,
#endif
part->bam_cache[blk] = le_virt_addr;
}
- ret = part->mtd->write(part->mtd, offset, sizeof(u_int32_t),
+ ret = part->mbd.mtd->write(part->mbd.mtd, offset, sizeof(u_int32_t),
&retlen, (u_char *)&le_virt_addr);
if (ret) {
@@ -1091,7 +925,7 @@ static int ftl_write(partition_t *part, caddr_t buffer,
if (ret)
return ret;
}
-
+
bsize = 1 << part->header.EraseUnitSize;
virt_addr = sector * SECTOR_SIZE | BLOCK_DATA;
@@ -1115,22 +949,22 @@ static int ftl_write(partition_t *part, caddr_t buffer,
log_addr = part->bam_index * bsize + blk * SECTOR_SIZE;
part->EUNInfo[part->bam_index].Free--;
part->FreeTotal--;
- if (set_bam_entry(part, log_addr, 0xfffffffe))
+ if (set_bam_entry(part, log_addr, 0xfffffffe))
return -EIO;
part->EUNInfo[part->bam_index].Deleted++;
offset = (part->EUNInfo[part->bam_index].Offset +
blk * SECTOR_SIZE);
- ret = part->mtd->write(part->mtd, offset, SECTOR_SIZE, &retlen,
+ ret = part->mbd.mtd->write(part->mbd.mtd, offset, SECTOR_SIZE, &retlen,
buffer);
if (ret) {
printk(KERN_NOTICE "ftl_cs: block write failed!\n");
printk(KERN_NOTICE "ftl_cs: log_addr = 0x%x, virt_addr"
- " = 0x%x, Offset = 0x%x\n", log_addr, virt_addr,
+ " = 0x%x, Offset = 0x%zx\n", log_addr, virt_addr,
offset);
return -EIO;
}
-
+
/* Only delete the old entry when the new entry is ready */
old_addr = part->VirtualBlockMap[sector+i];
if (old_addr != 0xffffffff) {
@@ -1145,309 +979,121 @@ static int ftl_write(partition_t *part, caddr_t buffer,
return -EIO;
part->VirtualBlockMap[sector+i] = log_addr;
part->EUNInfo[part->bam_index].Deleted--;
-
+
buffer += SECTOR_SIZE;
virt_addr += SECTOR_SIZE;
}
return 0;
} /* ftl_write */
-/*======================================================================
-
- IOCTL calls for getting device parameters.
-
-======================================================================*/
-
-static int ftl_ioctl(struct inode *inode, struct file *file,
- u_int cmd, u_long arg)
+static int ftl_getgeo(struct mtd_blktrans_dev *dev, struct hd_geometry *geo)
{
- struct hd_geometry *geo = (struct hd_geometry *)arg;
- int ret = 0, minor = MINOR(inode->i_rdev);
- partition_t *part= myparts[minor >> 4];
- u_long sect;
-
- if (!part)
- return -ENODEV; /* How? */
-
- switch (cmd) {
- case HDIO_GETGEO:
- ret = verify_area(VERIFY_WRITE, (long *)arg, sizeof(*geo));
- if (ret) return ret;
- /* Sort of arbitrary: round size down to 4K boundary */
- sect = le32_to_cpu(part->header.FormattedSize)/SECTOR_SIZE;
- put_user(1, (char *)&geo->heads);
- put_user(8, (char *)&geo->sectors);
- put_user((sect>>3), (short *)&geo->cylinders);
- put_user(ftl_hd[minor].start_sect, (u_long *)&geo->start);
- break;
- case BLKGETSIZE:
- ret = put_user(ftl_hd[minor].nr_sects, (unsigned long *)arg);
- break;
-#ifdef BLKGETSIZE64
- case BLKGETSIZE64:
- ret = put_user((u64)ftl_hd[minor].nr_sects << 9, (u64 *)arg);
- break;
-#endif
- case BLKRRPART:
- ret = ftl_reread_partitions(minor);
- break;
-#if (LINUX_VERSION_CODE < 0x20303)
- case BLKFLSBUF:
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,2,0)
- if (!capable(CAP_SYS_ADMIN)) return -EACCES;
-#endif
- fsync_dev(inode->i_rdev);
- invalidate_buffers(inode->i_rdev);
- break;
- RO_IOCTLS(inode->i_rdev, arg);
-#else
- case BLKROSET:
- case BLKROGET:
- case BLKFLSBUF:
- ret = blk_ioctl(inode->i_rdev, cmd, arg);
- break;
-#endif
- default:
- ret = -EINVAL;
- }
-
- return ret;
-} /* ftl_ioctl */
+ partition_t *part = (void *)dev;
+ u_long sect;
-/*======================================================================
+ /* Sort of arbitrary: round size down to 4KiB boundary */
+ sect = le32_to_cpu(part->header.FormattedSize)/SECTOR_SIZE;
- Handler for block device requests
+ geo->heads = 1;
+ geo->sectors = 8;
+ geo->cylinders = sect >> 3;
-======================================================================*/
+ return 0;
+}
-static int ftl_reread_partitions(int minor)
+static int ftl_readsect(struct mtd_blktrans_dev *dev,
+ unsigned long block, char *buf)
{
- partition_t *part = myparts[minor >> 4];
- int i, whole;
-
- DEBUG(0, "ftl_cs: ftl_reread_partition(%d)\n", minor);
- if ((atomic_read(&part->open) > 1)) {
- return -EBUSY;
- }
- whole = minor & ~(MAX_PART-1);
-
- i = MAX_PART - 1;
- while (i-- > 0) {
- if (ftl_hd[whole+i].nr_sects > 0) {
- kdev_t rdev = MKDEV(FTL_MAJOR, whole+i);
-
- invalidate_device(rdev, 1);
- }
- ftl_hd[whole+i].start_sect = 0;
- ftl_hd[whole+i].nr_sects = 0;
- }
-
- scan_header(part);
-
- register_disk(&ftl_gendisk, whole >> PART_BITS, MAX_PART,
- &ftl_blk_fops, le32_to_cpu(part->header.FormattedSize)/SECTOR_SIZE);
-
-#ifdef PCMCIA_DEBUG
- for (i = 0; i < MAX_PART; i++) {
- if (ftl_hd[whole+i].nr_sects > 0)
- printk(KERN_INFO " %d: start %ld size %ld\n", i,
- ftl_hd[whole+i].start_sect,
- ftl_hd[whole+i].nr_sects);
- }
-#endif
- return 0;
+ return ftl_read((void *)dev, buf, block, 1);
}
-/*======================================================================
-
- Handler for block device requests
-
-======================================================================*/
-
-static void do_ftl_request(request_arg_t)
+static int ftl_writesect(struct mtd_blktrans_dev *dev,
+ unsigned long block, char *buf)
{
- int ret, minor;
- partition_t *part;
-
- do {
- // sti();
- INIT_REQUEST;
-
- minor = MINOR(CURRENT->rq_dev);
-
- part = myparts[minor >> 4];
- if (part) {
- ret = 0;
-
- switch (CURRENT->cmd) {
- case READ:
- ret = ftl_read(part, CURRENT->buffer,
- CURRENT->sector+ftl_hd[minor].start_sect,
- CURRENT->current_nr_sectors);
- if (ret) printk("ftl_read returned %d\n", ret);
- break;
-
- case WRITE:
- ret = ftl_write(part, CURRENT->buffer,
- CURRENT->sector+ftl_hd[minor].start_sect,
- CURRENT->current_nr_sectors);
- if (ret) printk("ftl_write returned %d\n", ret);
- break;
-
- default:
- panic("ftl_cs: unknown block command!\n");
-
- }
- } else {
- ret = 1;
- printk("NULL part in ftl_request\n");
- }
-
- if (!ret) {
- CURRENT->sector += CURRENT->current_nr_sectors;
- }
-
- end_request((ret == 0) ? 1 : 0);
- } while (1);
-} /* do_ftl_request */
+ return ftl_write((void *)dev, buf, block, 1);
+}
/*====================================================================*/
void ftl_freepart(partition_t *part)
{
- if (part->VirtualBlockMap) {
vfree(part->VirtualBlockMap);
part->VirtualBlockMap = NULL;
- }
- if (part->VirtualPageMap) {
kfree(part->VirtualPageMap);
part->VirtualPageMap = NULL;
- }
- if (part->EUNInfo) {
kfree(part->EUNInfo);
part->EUNInfo = NULL;
- }
- if (part->XferInfo) {
kfree(part->XferInfo);
part->XferInfo = NULL;
- }
- if (part->bam_cache) {
kfree(part->bam_cache);
part->bam_cache = NULL;
- }
-
} /* ftl_freepart */
-static void ftl_notify_add(struct mtd_info *mtd)
+static void ftl_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
{
partition_t *partition;
- int device;
-
- for (device=0; device < MAX_MTD_DEVICES && myparts[device]; device++)
- ;
-
- if (device == MAX_MTD_DEVICES) {
- printk(KERN_NOTICE "Maximum number of FTL partitions reached\n"
- "Not scanning <%s>\n", mtd->name);
- return;
- }
partition = kmalloc(sizeof(partition_t), GFP_KERNEL);
-
+
if (!partition) {
printk(KERN_WARNING "No memory to scan for FTL on %s\n",
mtd->name);
return;
- }
+ }
memset(partition, 0, sizeof(partition_t));
- partition->mtd = mtd;
+ partition->mbd.mtd = mtd;
- if ((scan_header(partition) == 0) &&
+ if ((scan_header(partition) == 0) &&
(build_maps(partition) == 0)) {
-
+
partition->state = FTL_FORMATTED;
- atomic_set(&partition->open, 0);
- myparts[device] = partition;
- ftl_reread_partitions(device << 4);
#ifdef PCMCIA_DEBUG
- printk(KERN_INFO "ftl_cs: opening %d kb FTL partition\n",
+ printk(KERN_INFO "ftl_cs: opening %d KiB FTL partition\n",
le32_to_cpu(partition->header.FormattedSize) >> 10);
#endif
- } else
- kfree(partition);
+ partition->mbd.size = le32_to_cpu(partition->header.FormattedSize) >> 9;
+ partition->mbd.blksize = SECTOR_SIZE;
+ partition->mbd.tr = tr;
+ partition->mbd.devnum = -1;
+ if (!add_mtd_blktrans_dev((void *)partition))
+ return;
+ }
+
+ ftl_freepart(partition);
+ kfree(partition);
}
-static void ftl_notify_remove(struct mtd_info *mtd)
+static void ftl_remove_dev(struct mtd_blktrans_dev *dev)
{
- int i,j;
-
- /* Q: What happens if you try to remove a device which has
- * a currently-open FTL partition on it?
- *
- * A: You don't. The ftl_open routine is responsible for
- * increasing the use count of the driver module which
- * it uses.
- */
-
- /* That's the theory, anyway :) */
-
- for (i=0; i< MAX_MTD_DEVICES; i++)
- if (myparts[i] && myparts[i]->mtd == mtd) {
-
- if (myparts[i]->state == FTL_FORMATTED)
- ftl_freepart(myparts[i]);
-
- myparts[i]->state = 0;
- for (j=0; j<16; j++) {
- ftl_gendisk.part[j].nr_sects=0;
- ftl_gendisk.part[j].start_sect=0;
- }
- kfree(myparts[i]);
- myparts[i] = NULL;
- }
+ del_mtd_blktrans_dev(dev);
+ ftl_freepart((partition_t *)dev);
+ kfree(dev);
}
-int init_ftl(void)
+struct mtd_blktrans_ops ftl_tr = {
+ .name = "ftl",
+ .major = FTL_MAJOR,
+ .part_bits = PART_BITS,
+ .readsect = ftl_readsect,
+ .writesect = ftl_writesect,
+ .getgeo = ftl_getgeo,
+ .add_mtd = ftl_add_mtd,
+ .remove_dev = ftl_remove_dev,
+ .owner = THIS_MODULE,
+};
+
+static int init_ftl(void)
{
- int i;
+ DEBUG(0, "$Id: ftl.c,v 1.59 2005/11/29 14:48:31 gleixner Exp $\n");
- memset(myparts, 0, sizeof(myparts));
-
- DEBUG(0, "$Id: ftl.c,v 1.43 2002/02/13 15:31:37 dwmw2 Exp $\n");
-
- if (register_blkdev(FTL_MAJOR, "ftl", &ftl_blk_fops)) {
- printk(KERN_NOTICE "ftl_cs: unable to grab major "
- "device number!\n");
- return -EAGAIN;
- }
-
- for (i = 0; i < MINOR_NR(MAX_DEV, 0, 0); i++)
- ftl_blocksizes[i] = 1024;
- for (i = 0; i < MAX_DEV*MAX_PART; i++) {
- ftl_hd[i].nr_sects = 0;
- ftl_hd[i].start_sect = 0;
- }
- blksize_size[FTL_MAJOR] = ftl_blocksizes;
- ftl_gendisk.major = FTL_MAJOR;
- blk_init_queue(BLK_DEFAULT_QUEUE(FTL_MAJOR), &do_ftl_request);
- add_gendisk(&ftl_gendisk);
-
- register_mtd_user(&ftl_notifier);
-
- return 0;
+ return register_mtd_blktrans(&ftl_tr);
}
static void __exit cleanup_ftl(void)
{
- unregister_mtd_user(&ftl_notifier);
-
- unregister_blkdev(FTL_MAJOR, "ftl");
- blk_cleanup_queue(BLK_DEFAULT_QUEUE(FTL_MAJOR));
- blksize_size[FTL_MAJOR] = NULL;
-
- del_gendisk(&ftl_gendisk);
+ deregister_mtd_blktrans(&ftl_tr);
}
module_init(init_ftl);
@@ -1456,4 +1102,4 @@ module_exit(cleanup_ftl);
MODULE_LICENSE("Dual MPL/GPL");
MODULE_AUTHOR("David Hinds <dahinds@users.sourceforge.net>");
-MODULE_DESCRIPTION("Support code for Flash Translation Layer, used on PCMCIA devices and M-Systems DiskOnChip 1000");
+MODULE_DESCRIPTION("Support code for Flash Translation Layer, used on PCMCIA devices");
diff --git a/linux-2.4.x/drivers/mtd/inftlcore.c b/linux-2.4.x/drivers/mtd/inftlcore.c
new file mode 100644
index 0000000..a4674fa
--- /dev/null
+++ b/linux-2.4.x/drivers/mtd/inftlcore.c
@@ -0,0 +1,903 @@
+/*
+ * inftlcore.c -- Linux driver for Inverse Flash Translation Layer (INFTL)
+ *
+ * (C) Copyright 2002, Greg Ungerer (gerg@snapgear.com)
+ *
+ * Based heavily on the nftlcore.c code which is:
+ * (c) 1999 Machine Vision Holdings, Inc.
+ * Author: David Woodhouse <dwmw2@infradead.org>
+ *
+ * $Id: inftlcore.c,v 1.22 2006/03/29 08:35:59 dwmw2 Exp $
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/init.h>
+#include <linux/kmod.h>
+#include <linux/hdreg.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nftl.h>
+#include <linux/mtd/inftl.h>
+#include <asm/uaccess.h>
+#include <asm/errno.h>
+#include <asm/io.h>
+
+/*
+ * Maximum number of loops while examining next block, to have a
+ * chance to detect consistency problems (they should never happen
+ * because of the checks done in the mounting.
+ */
+#define MAX_LOOPS 10000
+
+static void inftl_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
+{
+ struct INFTLrecord *inftl;
+ unsigned long temp;
+
+ if (mtd->type != MTD_NANDFLASH)
+ return;
+ /* OK, this is moderately ugly. But probably safe. Alternatives? */
+ if (memcmp(mtd->name, "DiskOnChip", 10))
+ return;
+
+ if (!mtd->block_isbad) {
+ printk(KERN_ERR
+"INFTL no longer supports the old DiskOnChip drivers loaded via docprobe.\n"
+"Please use the new diskonchip driver under the NAND subsystem.\n");
+ return;
+ }
+
+ DEBUG(MTD_DEBUG_LEVEL3, "INFTL: add_mtd for %s\n", mtd->name);
+
+ inftl = kmalloc(sizeof(*inftl), GFP_KERNEL);
+
+ if (!inftl) {
+ printk(KERN_WARNING "INFTL: Out of memory for data structures\n");
+ return;
+ }
+ memset(inftl, 0, sizeof(*inftl));
+
+ inftl->mbd.mtd = mtd;
+ inftl->mbd.devnum = -1;
+ inftl->mbd.blksize = 512;
+ inftl->mbd.tr = tr;
+ memcpy(&inftl->oobinfo, &mtd->oobinfo, sizeof(struct nand_oobinfo));
+ inftl->oobinfo.useecc = MTD_NANDECC_PLACEONLY;
+
+ if (INFTL_mount(inftl) < 0) {
+ printk(KERN_WARNING "INFTL: could not mount device\n");
+ kfree(inftl);
+ return;
+ }
+
+ /* OK, it's a new one. Set up all the data structures. */
+
+ /* Calculate geometry */
+ inftl->cylinders = 1024;
+ inftl->heads = 16;
+
+ temp = inftl->cylinders * inftl->heads;
+ inftl->sectors = inftl->mbd.size / temp;
+ if (inftl->mbd.size % temp) {
+ inftl->sectors++;
+ temp = inftl->cylinders * inftl->sectors;
+ inftl->heads = inftl->mbd.size / temp;
+
+ if (inftl->mbd.size % temp) {
+ inftl->heads++;
+ temp = inftl->heads * inftl->sectors;
+ inftl->cylinders = inftl->mbd.size / temp;
+ }
+ }
+
+ if (inftl->mbd.size != inftl->heads * inftl->cylinders * inftl->sectors) {
+ /*
+ Oh no we don't have
+ mbd.size == heads * cylinders * sectors
+ */
+ printk(KERN_WARNING "INFTL: cannot calculate a geometry to "
+ "match size of 0x%lx.\n", inftl->mbd.size);
+ printk(KERN_WARNING "INFTL: using C:%d H:%d S:%d "
+ "(== 0x%lx sects)\n",
+ inftl->cylinders, inftl->heads , inftl->sectors,
+ (long)inftl->cylinders * (long)inftl->heads *
+ (long)inftl->sectors );
+ }
+
+ if (add_mtd_blktrans_dev(&inftl->mbd)) {
+ kfree(inftl->PUtable);
+ kfree(inftl->VUtable);
+ kfree(inftl);
+ return;
+ }
+#ifdef PSYCHO_DEBUG
+ printk(KERN_INFO "INFTL: Found new inftl%c\n", inftl->mbd.devnum + 'a');
+#endif
+ return;
+}
+
+static void inftl_remove_dev(struct mtd_blktrans_dev *dev)
+{
+ struct INFTLrecord *inftl = (void *)dev;
+
+ DEBUG(MTD_DEBUG_LEVEL3, "INFTL: remove_dev (i=%d)\n", dev->devnum);
+
+ del_mtd_blktrans_dev(dev);
+
+ kfree(inftl->PUtable);
+ kfree(inftl->VUtable);
+ kfree(inftl);
+}
+
+/*
+ * Actual INFTL access routines.
+ */
+
+/*
+ * INFTL_findfreeblock: Find a free Erase Unit on the INFTL partition.
+ * This function is used when the give Virtual Unit Chain.
+ */
+static u16 INFTL_findfreeblock(struct INFTLrecord *inftl, int desperate)
+{
+ u16 pot = inftl->LastFreeEUN;
+ int silly = inftl->nb_blocks;
+
+ DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_findfreeblock(inftl=%p,"
+ "desperate=%d)\n", inftl, desperate);
+
+ /*
+ * Normally, we force a fold to happen before we run out of free
+ * blocks completely.
+ */
+ if (!desperate && inftl->numfreeEUNs < 2) {
+ DEBUG(MTD_DEBUG_LEVEL1, "INFTL: there are too few free "
+ "EUNs (%d)\n", inftl->numfreeEUNs);
+ return 0xffff;
+ }
+
+ /* Scan for a free block */
+ do {
+ if (inftl->PUtable[pot] == BLOCK_FREE) {
+ inftl->LastFreeEUN = pot;
+ return pot;
+ }
+
+ if (++pot > inftl->lastEUN)
+ pot = 0;
+
+ if (!silly--) {
+ printk(KERN_WARNING "INFTL: no free blocks found! "
+ "EUN range = %d - %d\n", 0, inftl->LastFreeEUN);
+ return BLOCK_NIL;
+ }
+ } while (pot != inftl->LastFreeEUN);
+
+ return BLOCK_NIL;
+}
+
+static u16 INFTL_foldchain(struct INFTLrecord *inftl, unsigned thisVUC, unsigned pendingblock)
+{
+ u16 BlockMap[MAX_SECTORS_PER_UNIT];
+ unsigned char BlockDeleted[MAX_SECTORS_PER_UNIT];
+ unsigned int thisEUN, prevEUN, status;
+ int block, silly;
+ unsigned int targetEUN;
+ struct inftl_oob oob;
+ size_t retlen;
+
+ DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_foldchain(inftl=%p,thisVUC=%d,"
+ "pending=%d)\n", inftl, thisVUC, pendingblock);
+
+ memset(BlockMap, 0xff, sizeof(BlockMap));
+ memset(BlockDeleted, 0, sizeof(BlockDeleted));
+
+ thisEUN = targetEUN = inftl->VUtable[thisVUC];
+
+ if (thisEUN == BLOCK_NIL) {
+ printk(KERN_WARNING "INFTL: trying to fold non-existent "
+ "Virtual Unit Chain %d!\n", thisVUC);
+ return BLOCK_NIL;
+ }
+
+ /*
+ * Scan to find the Erase Unit which holds the actual data for each
+ * 512-byte block within the Chain.
+ */
+ silly = MAX_LOOPS;
+ while (thisEUN < inftl->nb_blocks) {
+ for (block = 0; block < inftl->EraseSize/SECTORSIZE; block ++) {
+ if ((BlockMap[block] != 0xffff) || BlockDeleted[block])
+ continue;
+
+ if (MTD_READOOB(inftl->mbd.mtd, (thisEUN * inftl->EraseSize)
+ + (block * SECTORSIZE), 16 , &retlen,
+ (char *)&oob) < 0)
+ status = SECTOR_IGNORE;
+ else
+ status = oob.b.Status | oob.b.Status1;
+
+ switch(status) {
+ case SECTOR_FREE:
+ case SECTOR_IGNORE:
+ break;
+ case SECTOR_USED:
+ BlockMap[block] = thisEUN;
+ continue;
+ case SECTOR_DELETED:
+ BlockDeleted[block] = 1;
+ continue;
+ default:
+ printk(KERN_WARNING "INFTL: unknown status "
+ "for block %d in EUN %d: %x\n",
+ block, thisEUN, status);
+ break;
+ }
+ }
+
+ if (!silly--) {
+ printk(KERN_WARNING "INFTL: infinite loop in Virtual "
+ "Unit Chain 0x%x\n", thisVUC);
+ return BLOCK_NIL;
+ }
+
+ thisEUN = inftl->PUtable[thisEUN];
+ }
+
+ /*
+ * OK. We now know the location of every block in the Virtual Unit
+ * Chain, and the Erase Unit into which we are supposed to be copying.
+ * Go for it.
+ */
+ DEBUG(MTD_DEBUG_LEVEL1, "INFTL: folding chain %d into unit %d\n",
+ thisVUC, targetEUN);
+
+ for (block = 0; block < inftl->EraseSize/SECTORSIZE ; block++) {
+ unsigned char movebuf[SECTORSIZE];
+ int ret;
+
+ /*
+ * If it's in the target EUN already, or if it's pending write,
+ * do nothing.
+ */
+ if (BlockMap[block] == targetEUN || (pendingblock ==
+ (thisVUC * (inftl->EraseSize / SECTORSIZE) + block))) {
+ continue;
+ }
+
+ /*
+ * Copy only in non free block (free blocks can only
+ * happen in case of media errors or deleted blocks).
+ */
+ if (BlockMap[block] == BLOCK_NIL)
+ continue;
+
+ ret = MTD_READ(inftl->mbd.mtd, (inftl->EraseSize *
+ BlockMap[block]) + (block * SECTORSIZE), SECTORSIZE,
+ &retlen, movebuf);
+ if (ret < 0) {
+ ret = MTD_READ(inftl->mbd.mtd, (inftl->EraseSize *
+ BlockMap[block]) + (block * SECTORSIZE),
+ SECTORSIZE, &retlen, movebuf);
+ if (ret != -EIO)
+ DEBUG(MTD_DEBUG_LEVEL1, "INFTL: error went "
+ "away on retry?\n");
+ }
+ memset(&oob, 0xff, sizeof(struct inftl_oob));
+ oob.b.Status = oob.b.Status1 = SECTOR_USED;
+ MTD_WRITEECC(inftl->mbd.mtd, (inftl->EraseSize * targetEUN) +
+ (block * SECTORSIZE), SECTORSIZE, &retlen,
+ movebuf, (char *)&oob, &inftl->oobinfo);
+ }
+
+ /*
+ * Newest unit in chain now contains data from _all_ older units.
+ * So go through and erase each unit in chain, oldest first. (This
+ * is important, by doing oldest first if we crash/reboot then it
+ * it is relatively simple to clean up the mess).
+ */
+ DEBUG(MTD_DEBUG_LEVEL1, "INFTL: want to erase virtual chain %d\n",
+ thisVUC);
+
+ for (;;) {
+ /* Find oldest unit in chain. */
+ thisEUN = inftl->VUtable[thisVUC];
+ prevEUN = BLOCK_NIL;
+ while (inftl->PUtable[thisEUN] != BLOCK_NIL) {
+ prevEUN = thisEUN;
+ thisEUN = inftl->PUtable[thisEUN];
+ }
+
+ /* Check if we are all done */
+ if (thisEUN == targetEUN)
+ break;
+
+ if (INFTL_formatblock(inftl, thisEUN) < 0) {
+ /*
+ * Could not erase : mark block as reserved.
+ */
+ inftl->PUtable[thisEUN] = BLOCK_RESERVED;
+ } else {
+ /* Correctly erased : mark it as free */
+ inftl->PUtable[thisEUN] = BLOCK_FREE;
+ inftl->PUtable[prevEUN] = BLOCK_NIL;
+ inftl->numfreeEUNs++;
+ }
+ }
+
+ return targetEUN;
+}
+
+static u16 INFTL_makefreeblock(struct INFTLrecord *inftl, unsigned pendingblock)
+{
+ /*
+ * This is the part that needs some cleverness applied.
+ * For now, I'm doing the minimum applicable to actually
+ * get the thing to work.
+ * Wear-levelling and other clever stuff needs to be implemented
+ * and we also need to do some assessment of the results when
+ * the system loses power half-way through the routine.
+ */
+ u16 LongestChain = 0;
+ u16 ChainLength = 0, thislen;
+ u16 chain, EUN;
+
+ DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_makefreeblock(inftl=%p,"
+ "pending=%d)\n", inftl, pendingblock);
+
+ for (chain = 0; chain < inftl->nb_blocks; chain++) {
+ EUN = inftl->VUtable[chain];
+ thislen = 0;
+
+ while (EUN <= inftl->lastEUN) {
+ thislen++;
+ EUN = inftl->PUtable[EUN];
+ if (thislen > 0xff00) {
+ printk(KERN_WARNING "INFTL: endless loop in "
+ "Virtual Chain %d: Unit %x\n",
+ chain, EUN);
+ /*
+ * Actually, don't return failure.
+ * Just ignore this chain and get on with it.
+ */
+ thislen = 0;
+ break;
+ }
+ }
+
+ if (thislen > ChainLength) {
+ ChainLength = thislen;
+ LongestChain = chain;
+ }
+ }
+
+ if (ChainLength < 2) {
+ printk(KERN_WARNING "INFTL: no Virtual Unit Chains available "
+ "for folding. Failing request\n");
+ return BLOCK_NIL;
+ }
+
+ return INFTL_foldchain(inftl, LongestChain, pendingblock);
+}
+
+static int nrbits(unsigned int val, int bitcount)
+{
+ int i, total = 0;
+
+ for (i = 0; (i < bitcount); i++)
+ total += (((0x1 << i) & val) ? 1 : 0);
+ return total;
+}
+
+/*
+ * INFTL_findwriteunit: Return the unit number into which we can write
+ * for this block. Make it available if it isn't already.
+ */
+static inline u16 INFTL_findwriteunit(struct INFTLrecord *inftl, unsigned block)
+{
+ unsigned int thisVUC = block / (inftl->EraseSize / SECTORSIZE);
+ unsigned int thisEUN, writeEUN, prev_block, status;
+ unsigned long blockofs = (block * SECTORSIZE) & (inftl->EraseSize -1);
+ struct inftl_oob oob;
+ struct inftl_bci bci;
+ unsigned char anac, nacs, parity;
+ size_t retlen;
+ int silly, silly2 = 3;
+
+ DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_findwriteunit(inftl=%p,"
+ "block=%d)\n", inftl, block);
+
+ do {
+ /*
+ * Scan the media to find a unit in the VUC which has
+ * a free space for the block in question.
+ */
+ writeEUN = BLOCK_NIL;
+ thisEUN = inftl->VUtable[thisVUC];
+ silly = MAX_LOOPS;
+
+ while (thisEUN <= inftl->lastEUN) {
+ MTD_READOOB(inftl->mbd.mtd, (thisEUN * inftl->EraseSize) +
+ blockofs, 8, &retlen, (char *)&bci);
+
+ status = bci.Status | bci.Status1;
+ DEBUG(MTD_DEBUG_LEVEL3, "INFTL: status of block %d in "
+ "EUN %d is %x\n", block , writeEUN, status);
+
+ switch(status) {
+ case SECTOR_FREE:
+ writeEUN = thisEUN;
+ break;
+ case SECTOR_DELETED:
+ case SECTOR_USED:
+ /* Can't go any further */
+ goto hitused;
+ case SECTOR_IGNORE:
+ break;
+ default:
+ /*
+ * Invalid block. Don't use it any more.
+ * Must implement.
+ */
+ break;
+ }
+
+ if (!silly--) {
+ printk(KERN_WARNING "INFTL: infinite loop in "
+ "Virtual Unit Chain 0x%x\n", thisVUC);
+ return 0xffff;
+ }
+
+ /* Skip to next block in chain */
+ thisEUN = inftl->PUtable[thisEUN];
+ }
+
+hitused:
+ if (writeEUN != BLOCK_NIL)
+ return writeEUN;
+
+
+ /*
+ * OK. We didn't find one in the existing chain, or there
+ * is no existing chain. Allocate a new one.
+ */
+ writeEUN = INFTL_findfreeblock(inftl, 0);
+
+ if (writeEUN == BLOCK_NIL) {
+ /*
+ * That didn't work - there were no free blocks just
+ * waiting to be picked up. We're going to have to fold
+ * a chain to make room.
+ */
+ thisEUN = INFTL_makefreeblock(inftl, 0xffff);
+
+ /*
+ * Hopefully we free something, lets try again.
+ * This time we are desperate...
+ */
+ DEBUG(MTD_DEBUG_LEVEL1, "INFTL: using desperate==1 "
+ "to find free EUN to accommodate write to "
+ "VUC %d\n", thisVUC);
+ writeEUN = INFTL_findfreeblock(inftl, 1);
+ if (writeEUN == BLOCK_NIL) {
+ /*
+ * Ouch. This should never happen - we should
+ * always be able to make some room somehow.
+ * If we get here, we've allocated more storage
+ * space than actual media, or our makefreeblock
+ * routine is missing something.
+ */
+ printk(KERN_WARNING "INFTL: cannot make free "
+ "space.\n");
+#ifdef DEBUG
+ INFTL_dumptables(inftl);
+ INFTL_dumpVUchains(inftl);
+#endif
+ return BLOCK_NIL;
+ }
+ }
+
+ /*
+ * Insert new block into virtual chain. Firstly update the
+ * block headers in flash...
+ */
+ anac = 0;
+ nacs = 0;
+ thisEUN = inftl->VUtable[thisVUC];
+ if (thisEUN != BLOCK_NIL) {
+ MTD_READOOB(inftl->mbd.mtd, thisEUN * inftl->EraseSize
+ + 8, 8, &retlen, (char *)&oob.u);
+ anac = oob.u.a.ANAC + 1;
+ nacs = oob.u.a.NACs + 1;
+ }
+
+ prev_block = inftl->VUtable[thisVUC];
+ if (prev_block < inftl->nb_blocks)
+ prev_block -= inftl->firstEUN;
+
+ parity = (nrbits(thisVUC, 16) & 0x1) ? 0x1 : 0;
+ parity |= (nrbits(prev_block, 16) & 0x1) ? 0x2 : 0;
+ parity |= (nrbits(anac, 8) & 0x1) ? 0x4 : 0;
+ parity |= (nrbits(nacs, 8) & 0x1) ? 0x8 : 0;
+
+ oob.u.a.virtualUnitNo = cpu_to_le16(thisVUC);
+ oob.u.a.prevUnitNo = cpu_to_le16(prev_block);
+ oob.u.a.ANAC = anac;
+ oob.u.a.NACs = nacs;
+ oob.u.a.parityPerField = parity;
+ oob.u.a.discarded = 0xaa;
+
+ MTD_WRITEOOB(inftl->mbd.mtd, writeEUN * inftl->EraseSize + 8, 8,
+ &retlen, (char *)&oob.u);
+
+ /* Also back up header... */
+ oob.u.b.virtualUnitNo = cpu_to_le16(thisVUC);
+ oob.u.b.prevUnitNo = cpu_to_le16(prev_block);
+ oob.u.b.ANAC = anac;
+ oob.u.b.NACs = nacs;
+ oob.u.b.parityPerField = parity;
+ oob.u.b.discarded = 0xaa;
+
+ MTD_WRITEOOB(inftl->mbd.mtd, writeEUN * inftl->EraseSize +
+ SECTORSIZE * 4 + 8, 8, &retlen, (char *)&oob.u);
+
+ inftl->PUtable[writeEUN] = inftl->VUtable[thisVUC];
+ inftl->VUtable[thisVUC] = writeEUN;
+
+ inftl->numfreeEUNs--;
+ return writeEUN;
+
+ } while (silly2--);
+
+ printk(KERN_WARNING "INFTL: error folding to make room for Virtual "
+ "Unit Chain 0x%x\n", thisVUC);
+ return 0xffff;
+}
+
+/*
+ * Given a Virtual Unit Chain, see if it can be deleted, and if so do it.
+ */
+static void INFTL_trydeletechain(struct INFTLrecord *inftl, unsigned thisVUC)
+{
+ unsigned char BlockUsed[MAX_SECTORS_PER_UNIT];
+ unsigned char BlockDeleted[MAX_SECTORS_PER_UNIT];
+ unsigned int thisEUN, status;
+ int block, silly;
+ struct inftl_bci bci;
+ size_t retlen;
+
+ DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_trydeletechain(inftl=%p,"
+ "thisVUC=%d)\n", inftl, thisVUC);
+
+ memset(BlockUsed, 0, sizeof(BlockUsed));
+ memset(BlockDeleted, 0, sizeof(BlockDeleted));
+
+ thisEUN = inftl->VUtable[thisVUC];
+ if (thisEUN == BLOCK_NIL) {
+ printk(KERN_WARNING "INFTL: trying to delete non-existent "
+ "Virtual Unit Chain %d!\n", thisVUC);
+ return;
+ }
+
+ /*
+ * Scan through the Erase Units to determine whether any data is in
+ * each of the 512-byte blocks within the Chain.
+ */
+ silly = MAX_LOOPS;
+ while (thisEUN < inftl->nb_blocks) {
+ for (block = 0; block < inftl->EraseSize/SECTORSIZE; block++) {
+ if (BlockUsed[block] || BlockDeleted[block])
+ continue;
+
+ if (MTD_READOOB(inftl->mbd.mtd, (thisEUN * inftl->EraseSize)
+ + (block * SECTORSIZE), 8 , &retlen,
+ (char *)&bci) < 0)
+ status = SECTOR_IGNORE;
+ else
+ status = bci.Status | bci.Status1;
+
+ switch(status) {
+ case SECTOR_FREE:
+ case SECTOR_IGNORE:
+ break;
+ case SECTOR_USED:
+ BlockUsed[block] = 1;
+ continue;
+ case SECTOR_DELETED:
+ BlockDeleted[block] = 1;
+ continue;
+ default:
+ printk(KERN_WARNING "INFTL: unknown status "
+ "for block %d in EUN %d: 0x%x\n",
+ block, thisEUN, status);
+ }
+ }
+
+ if (!silly--) {
+ printk(KERN_WARNING "INFTL: infinite loop in Virtual "
+ "Unit Chain 0x%x\n", thisVUC);
+ return;
+ }
+
+ thisEUN = inftl->PUtable[thisEUN];
+ }
+
+ for (block = 0; block < inftl->EraseSize/SECTORSIZE; block++)
+ if (BlockUsed[block])
+ return;
+
+ /*
+ * For each block in the chain free it and make it available
+ * for future use. Erase from the oldest unit first.
+ */
+ DEBUG(MTD_DEBUG_LEVEL1, "INFTL: deleting empty VUC %d\n", thisVUC);
+
+ for (;;) {
+ u16 *prevEUN = &inftl->VUtable[thisVUC];
+ thisEUN = *prevEUN;
+
+ /* If the chain is all gone already, we're done */
+ if (thisEUN == BLOCK_NIL) {
+ DEBUG(MTD_DEBUG_LEVEL2, "INFTL: Empty VUC %d for deletion was already absent\n", thisEUN);
+ return;
+ }
+
+ /* Find oldest unit in chain. */
+ while (inftl->PUtable[thisEUN] != BLOCK_NIL) {
+ BUG_ON(thisEUN >= inftl->nb_blocks);
+
+ prevEUN = &inftl->PUtable[thisEUN];
+ thisEUN = *prevEUN;
+ }
+
+ DEBUG(MTD_DEBUG_LEVEL3, "Deleting EUN %d from VUC %d\n",
+ thisEUN, thisVUC);
+
+ if (INFTL_formatblock(inftl, thisEUN) < 0) {
+ /*
+ * Could not erase : mark block as reserved.
+ */
+ inftl->PUtable[thisEUN] = BLOCK_RESERVED;
+ } else {
+ /* Correctly erased : mark it as free */
+ inftl->PUtable[thisEUN] = BLOCK_FREE;
+ inftl->numfreeEUNs++;
+ }
+
+ /* Now sort out whatever was pointing to it... */
+ *prevEUN = BLOCK_NIL;
+
+ /* Ideally we'd actually be responsive to new
+ requests while we're doing this -- if there's
+ free space why should others be made to wait? */
+ cond_resched();
+ }
+
+ inftl->VUtable[thisVUC] = BLOCK_NIL;
+}
+
+static int INFTL_deleteblock(struct INFTLrecord *inftl, unsigned block)
+{
+ unsigned int thisEUN = inftl->VUtable[block / (inftl->EraseSize / SECTORSIZE)];
+ unsigned long blockofs = (block * SECTORSIZE) & (inftl->EraseSize - 1);
+ unsigned int status;
+ int silly = MAX_LOOPS;
+ size_t retlen;
+ struct inftl_bci bci;
+
+ DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_deleteblock(inftl=%p,"
+ "block=%d)\n", inftl, block);
+
+ while (thisEUN < inftl->nb_blocks) {
+ if (MTD_READOOB(inftl->mbd.mtd, (thisEUN * inftl->EraseSize) +
+ blockofs, 8, &retlen, (char *)&bci) < 0)
+ status = SECTOR_IGNORE;
+ else
+ status = bci.Status | bci.Status1;
+
+ switch (status) {
+ case SECTOR_FREE:
+ case SECTOR_IGNORE:
+ break;
+ case SECTOR_DELETED:
+ thisEUN = BLOCK_NIL;
+ goto foundit;
+ case SECTOR_USED:
+ goto foundit;
+ default:
+ printk(KERN_WARNING "INFTL: unknown status for "
+ "block %d in EUN %d: 0x%x\n",
+ block, thisEUN, status);
+ break;
+ }
+
+ if (!silly--) {
+ printk(KERN_WARNING "INFTL: infinite loop in Virtual "
+ "Unit Chain 0x%x\n",
+ block / (inftl->EraseSize / SECTORSIZE));
+ return 1;
+ }
+ thisEUN = inftl->PUtable[thisEUN];
+ }
+
+foundit:
+ if (thisEUN != BLOCK_NIL) {
+ loff_t ptr = (thisEUN * inftl->EraseSize) + blockofs;
+
+ if (MTD_READOOB(inftl->mbd.mtd, ptr, 8, &retlen, (char *)&bci) < 0)
+ return -EIO;
+ bci.Status = bci.Status1 = SECTOR_DELETED;
+ if (MTD_WRITEOOB(inftl->mbd.mtd, ptr, 8, &retlen, (char *)&bci) < 0)
+ return -EIO;
+ INFTL_trydeletechain(inftl, block / (inftl->EraseSize / SECTORSIZE));
+ }
+ return 0;
+}
+
+static int inftl_writeblock(struct mtd_blktrans_dev *mbd, unsigned long block,
+ char *buffer)
+{
+ struct INFTLrecord *inftl = (void *)mbd;
+ unsigned int writeEUN;
+ unsigned long blockofs = (block * SECTORSIZE) & (inftl->EraseSize - 1);
+ size_t retlen;
+ struct inftl_oob oob;
+ char *p, *pend;
+
+ DEBUG(MTD_DEBUG_LEVEL3, "INFTL: inftl_writeblock(inftl=%p,block=%ld,"
+ "buffer=%p)\n", inftl, block, buffer);
+
+ /* Is block all zero? */
+ pend = buffer + SECTORSIZE;
+ for (p = buffer; p < pend && !*p; p++)
+ ;
+
+ if (p < pend) {
+ writeEUN = INFTL_findwriteunit(inftl, block);
+
+ if (writeEUN == BLOCK_NIL) {
+ printk(KERN_WARNING "inftl_writeblock(): cannot find "
+ "block to write to\n");
+ /*
+ * If we _still_ haven't got a block to use,
+ * we're screwed.
+ */
+ return 1;
+ }
+
+ memset(&oob, 0xff, sizeof(struct inftl_oob));
+ oob.b.Status = oob.b.Status1 = SECTOR_USED;
+ MTD_WRITEECC(inftl->mbd.mtd, (writeEUN * inftl->EraseSize) +
+ blockofs, SECTORSIZE, &retlen, (char *)buffer,
+ (char *)&oob, &inftl->oobinfo);
+ /*
+ * need to write SECTOR_USED flags since they are not written
+ * in mtd_writeecc
+ */
+ } else {
+ INFTL_deleteblock(inftl, block);
+ }
+
+ return 0;
+}
+
+static int inftl_readblock(struct mtd_blktrans_dev *mbd, unsigned long block,
+ char *buffer)
+{
+ struct INFTLrecord *inftl = (void *)mbd;
+ unsigned int thisEUN = inftl->VUtable[block / (inftl->EraseSize / SECTORSIZE)];
+ unsigned long blockofs = (block * SECTORSIZE) & (inftl->EraseSize - 1);
+ unsigned int status;
+ int silly = MAX_LOOPS;
+ struct inftl_bci bci;
+ size_t retlen;
+
+ DEBUG(MTD_DEBUG_LEVEL3, "INFTL: inftl_readblock(inftl=%p,block=%ld,"
+ "buffer=%p)\n", inftl, block, buffer);
+
+ while (thisEUN < inftl->nb_blocks) {
+ if (MTD_READOOB(inftl->mbd.mtd, (thisEUN * inftl->EraseSize) +
+ blockofs, 8, &retlen, (char *)&bci) < 0)
+ status = SECTOR_IGNORE;
+ else
+ status = bci.Status | bci.Status1;
+
+ switch (status) {
+ case SECTOR_DELETED:
+ thisEUN = BLOCK_NIL;
+ goto foundit;
+ case SECTOR_USED:
+ goto foundit;
+ case SECTOR_FREE:
+ case SECTOR_IGNORE:
+ break;
+ default:
+ printk(KERN_WARNING "INFTL: unknown status for "
+ "block %ld in EUN %d: 0x%04x\n",
+ block, thisEUN, status);
+ break;
+ }
+
+ if (!silly--) {
+ printk(KERN_WARNING "INFTL: infinite loop in "
+ "Virtual Unit Chain 0x%lx\n",
+ block / (inftl->EraseSize / SECTORSIZE));
+ return 1;
+ }
+
+ thisEUN = inftl->PUtable[thisEUN];
+ }
+
+foundit:
+ if (thisEUN == BLOCK_NIL) {
+ /* The requested block is not on the media, return all 0x00 */
+ memset(buffer, 0, SECTORSIZE);
+ } else {
+ size_t retlen;
+ loff_t ptr = (thisEUN * inftl->EraseSize) + blockofs;
+ if (MTD_READ(inftl->mbd.mtd, ptr, SECTORSIZE, &retlen,
+ buffer))
+ return -EIO;
+ }
+ return 0;
+}
+
+static int inftl_getgeo(struct mtd_blktrans_dev *dev, struct hd_geometry *geo)
+{
+ struct INFTLrecord *inftl = (void *)dev;
+
+ geo->heads = inftl->heads;
+ geo->sectors = inftl->sectors;
+ geo->cylinders = inftl->cylinders;
+
+ return 0;
+}
+
+static struct mtd_blktrans_ops inftl_tr = {
+ .name = "inftl",
+ .major = INFTL_MAJOR,
+ .part_bits = INFTL_PARTN_BITS,
+ .getgeo = inftl_getgeo,
+ .readsect = inftl_readblock,
+ .writesect = inftl_writeblock,
+ .add_mtd = inftl_add_mtd,
+ .remove_dev = inftl_remove_dev,
+ .owner = THIS_MODULE,
+};
+
+static int __init init_inftl(void)
+{
+ printk(KERN_INFO "INFTL: inftlcore.c $Revision: 1.22 $, "
+ "inftlmount.c %s\n", inftlmountrev);
+
+ return register_mtd_blktrans(&inftl_tr);
+}
+
+static void __exit cleanup_inftl(void)
+{
+ deregister_mtd_blktrans(&inftl_tr);
+}
+
+module_init(init_inftl);
+module_exit(cleanup_inftl);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Greg Ungerer <gerg@snapgear.com>, David Woodhouse <dwmw2@infradead.org>, Fabrice Bellard <fabrice.bellard@netgem.com> et al.");
+MODULE_DESCRIPTION("Support code for Inverse Flash Translation Layer, used on M-Systems DiskOnChip 2000, Millennium and Millennium Plus");
diff --git a/linux-2.4.x/drivers/mtd/inftlmount.c b/linux-2.4.x/drivers/mtd/inftlmount.c
new file mode 100644
index 0000000..b26aea1
--- /dev/null
+++ b/linux-2.4.x/drivers/mtd/inftlmount.c
@@ -0,0 +1,809 @@
+/*
+ * inftlmount.c -- INFTL mount code with extensive checks.
+ *
+ * Author: Greg Ungerer (gerg@snapgear.com)
+ * (C) Copyright 2002-2003, Greg Ungerer (gerg@snapgear.com)
+ *
+ * Based heavily on the nftlmount.c code which is:
+ * Author: Fabrice Bellard (fabrice.bellard@netgem.com)
+ * Copyright (C) 2000 Netgem S.A.
+ *
+ * $Id: inftlmount.c,v 1.18 2005/11/07 11:14:20 gleixner Exp $
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <asm/errno.h>
+#include <asm/io.h>
+#include <asm/uaccess.h>
+#include <linux/miscdevice.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/init.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nftl.h>
+#include <linux/mtd/inftl.h>
+#include <linux/mtd/compatmac.h>
+
+char inftlmountrev[]="$Revision: 1.18 $";
+
+/*
+ * find_boot_record: Find the INFTL Media Header and its Spare copy which
+ * contains the various device information of the INFTL partition and
+ * Bad Unit Table. Update the PUtable[] table according to the Bad
+ * Unit Table. PUtable[] is used for management of Erase Unit in
+ * other routines in inftlcore.c and inftlmount.c.
+ */
+static int find_boot_record(struct INFTLrecord *inftl)
+{
+ struct inftl_unittail h1;
+ //struct inftl_oob oob;
+ unsigned int i, block;
+ u8 buf[SECTORSIZE];
+ struct INFTLMediaHeader *mh = &inftl->MediaHdr;
+ struct INFTLPartition *ip;
+ size_t retlen;
+
+ DEBUG(MTD_DEBUG_LEVEL3, "INFTL: find_boot_record(inftl=%p)\n", inftl);
+
+ /*
+ * Assume logical EraseSize == physical erasesize for starting the
+ * scan. We'll sort it out later if we find a MediaHeader which says
+ * otherwise.
+ */
+ inftl->EraseSize = inftl->mbd.mtd->erasesize;
+ inftl->nb_blocks = inftl->mbd.mtd->size / inftl->EraseSize;
+
+ inftl->MediaUnit = BLOCK_NIL;
+
+ /* Search for a valid boot record */
+ for (block = 0; block < inftl->nb_blocks; block++) {
+ int ret;
+
+ /*
+ * Check for BNAND header first. Then whinge if it's found
+ * but later checks fail.
+ */
+ ret = MTD_READ(inftl->mbd.mtd, block * inftl->EraseSize,
+ SECTORSIZE, &retlen, buf);
+ /* We ignore ret in case the ECC of the MediaHeader is invalid
+ (which is apparently acceptable) */
+ if (retlen != SECTORSIZE) {
+ static int warncount = 5;
+
+ if (warncount) {
+ printk(KERN_WARNING "INFTL: block read at 0x%x "
+ "of mtd%d failed: %d\n",
+ block * inftl->EraseSize,
+ inftl->mbd.mtd->index, ret);
+ if (!--warncount)
+ printk(KERN_WARNING "INFTL: further "
+ "failures for this block will "
+ "not be printed\n");
+ }
+ continue;
+ }
+
+ if (retlen < 6 || memcmp(buf, "BNAND", 6)) {
+ /* BNAND\0 not found. Continue */
+ continue;
+ }
+
+ /* To be safer with BIOS, also use erase mark as discriminant */
+ if ((ret = MTD_READOOB(inftl->mbd.mtd, block * inftl->EraseSize +
+ SECTORSIZE + 8, 8, &retlen, (char *)&h1) < 0)) {
+ printk(KERN_WARNING "INFTL: ANAND header found at "
+ "0x%x in mtd%d, but OOB data read failed "
+ "(err %d)\n", block * inftl->EraseSize,
+ inftl->mbd.mtd->index, ret);
+ continue;
+ }
+
+
+ /*
+ * This is the first we've seen.
+ * Copy the media header structure into place.
+ */
+ memcpy(mh, buf, sizeof(struct INFTLMediaHeader));
+
+ /* Read the spare media header at offset 4096 */
+ MTD_READ(inftl->mbd.mtd, block * inftl->EraseSize + 4096,
+ SECTORSIZE, &retlen, buf);
+ if (retlen != SECTORSIZE) {
+ printk(KERN_WARNING "INFTL: Unable to read spare "
+ "Media Header\n");
+ return -1;
+ }
+ /* Check if this one is the same as the first one we found. */
+ if (memcmp(mh, buf, sizeof(struct INFTLMediaHeader))) {
+ printk(KERN_WARNING "INFTL: Primary and spare Media "
+ "Headers disagree.\n");
+ return -1;
+ }
+
+ mh->NoOfBootImageBlocks = le32_to_cpu(mh->NoOfBootImageBlocks);
+ mh->NoOfBinaryPartitions = le32_to_cpu(mh->NoOfBinaryPartitions);
+ mh->NoOfBDTLPartitions = le32_to_cpu(mh->NoOfBDTLPartitions);
+ mh->BlockMultiplierBits = le32_to_cpu(mh->BlockMultiplierBits);
+ mh->FormatFlags = le32_to_cpu(mh->FormatFlags);
+ mh->PercentUsed = le32_to_cpu(mh->PercentUsed);
+
+#ifdef CONFIG_MTD_DEBUG_VERBOSE
+ if (CONFIG_MTD_DEBUG_VERBOSE >= 2) {
+ printk("INFTL: Media Header ->\n"
+ " bootRecordID = %s\n"
+ " NoOfBootImageBlocks = %d\n"
+ " NoOfBinaryPartitions = %d\n"
+ " NoOfBDTLPartitions = %d\n"
+ " BlockMultiplerBits = %d\n"
+ " FormatFlgs = %d\n"
+ " OsakVersion = 0x%x\n"
+ " PercentUsed = %d\n",
+ mh->bootRecordID, mh->NoOfBootImageBlocks,
+ mh->NoOfBinaryPartitions,
+ mh->NoOfBDTLPartitions,
+ mh->BlockMultiplierBits, mh->FormatFlags,
+ mh->OsakVersion, mh->PercentUsed);
+ }
+#endif
+
+ if (mh->NoOfBDTLPartitions == 0) {
+ printk(KERN_WARNING "INFTL: Media Header sanity check "
+ "failed: NoOfBDTLPartitions (%d) == 0, "
+ "must be at least 1\n", mh->NoOfBDTLPartitions);
+ return -1;
+ }
+
+ if ((mh->NoOfBDTLPartitions + mh->NoOfBinaryPartitions) > 4) {
+ printk(KERN_WARNING "INFTL: Media Header sanity check "
+ "failed: Total Partitions (%d) > 4, "
+ "BDTL=%d Binary=%d\n", mh->NoOfBDTLPartitions +
+ mh->NoOfBinaryPartitions,
+ mh->NoOfBDTLPartitions,
+ mh->NoOfBinaryPartitions);
+ return -1;
+ }
+
+ if (mh->BlockMultiplierBits > 1) {
+ printk(KERN_WARNING "INFTL: sorry, we don't support "
+ "UnitSizeFactor 0x%02x\n",
+ mh->BlockMultiplierBits);
+ return -1;
+ } else if (mh->BlockMultiplierBits == 1) {
+ printk(KERN_WARNING "INFTL: support for INFTL with "
+ "UnitSizeFactor 0x%02x is experimental\n",
+ mh->BlockMultiplierBits);
+ inftl->EraseSize = inftl->mbd.mtd->erasesize <<
+ mh->BlockMultiplierBits;
+ inftl->nb_blocks = inftl->mbd.mtd->size / inftl->EraseSize;
+ block >>= mh->BlockMultiplierBits;
+ }
+
+ /* Scan the partitions */
+ for (i = 0; (i < 4); i++) {
+ ip = &mh->Partitions[i];
+ ip->virtualUnits = le32_to_cpu(ip->virtualUnits);
+ ip->firstUnit = le32_to_cpu(ip->firstUnit);
+ ip->lastUnit = le32_to_cpu(ip->lastUnit);
+ ip->flags = le32_to_cpu(ip->flags);
+ ip->spareUnits = le32_to_cpu(ip->spareUnits);
+ ip->Reserved0 = le32_to_cpu(ip->Reserved0);
+
+#ifdef CONFIG_MTD_DEBUG_VERBOSE
+ if (CONFIG_MTD_DEBUG_VERBOSE >= 2) {
+ printk(" PARTITION[%d] ->\n"
+ " virtualUnits = %d\n"
+ " firstUnit = %d\n"
+ " lastUnit = %d\n"
+ " flags = 0x%x\n"
+ " spareUnits = %d\n",
+ i, ip->virtualUnits, ip->firstUnit,
+ ip->lastUnit, ip->flags,
+ ip->spareUnits);
+ }
+#endif
+
+ if (ip->Reserved0 != ip->firstUnit) {
+ struct erase_info *instr = &inftl->instr;
+
+ instr->mtd = inftl->mbd.mtd;
+
+ /*
+ * Most likely this is using the
+ * undocumented qiuck mount feature.
+ * We don't support that, we will need
+ * to erase the hidden block for full
+ * compatibility.
+ */
+ instr->addr = ip->Reserved0 * inftl->EraseSize;
+ instr->len = inftl->EraseSize;
+ MTD_ERASE(inftl->mbd.mtd, instr);
+ }
+ if ((ip->lastUnit - ip->firstUnit + 1) < ip->virtualUnits) {
+ printk(KERN_WARNING "INFTL: Media Header "
+ "Partition %d sanity check failed\n"
+ " firstUnit %d : lastUnit %d > "
+ "virtualUnits %d\n", i, ip->lastUnit,
+ ip->firstUnit, ip->Reserved0);
+ return -1;
+ }
+ if (ip->Reserved1 != 0) {
+ printk(KERN_WARNING "INFTL: Media Header "
+ "Partition %d sanity check failed: "
+ "Reserved1 %d != 0\n",
+ i, ip->Reserved1);
+ return -1;
+ }
+
+ if (ip->flags & INFTL_BDTL)
+ break;
+ }
+
+ if (i >= 4) {
+ printk(KERN_WARNING "INFTL: Media Header Partition "
+ "sanity check failed:\n No partition "
+ "marked as Disk Partition\n");
+ return -1;
+ }
+
+ inftl->nb_boot_blocks = ip->firstUnit;
+ inftl->numvunits = ip->virtualUnits;
+ if (inftl->numvunits > (inftl->nb_blocks -
+ inftl->nb_boot_blocks - 2)) {
+ printk(KERN_WARNING "INFTL: Media Header sanity check "
+ "failed:\n numvunits (%d) > nb_blocks "
+ "(%d) - nb_boot_blocks(%d) - 2\n",
+ inftl->numvunits, inftl->nb_blocks,
+ inftl->nb_boot_blocks);
+ return -1;
+ }
+
+ inftl->mbd.size = inftl->numvunits *
+ (inftl->EraseSize / SECTORSIZE);
+
+ /*
+ * Block count is set to last used EUN (we won't need to keep
+ * any meta-data past that point).
+ */
+ inftl->firstEUN = ip->firstUnit;
+ inftl->lastEUN = ip->lastUnit;
+ inftl->nb_blocks = ip->lastUnit + 1;
+
+ /* Memory alloc */
+ inftl->PUtable = kmalloc(inftl->nb_blocks * sizeof(u16), GFP_KERNEL);
+ if (!inftl->PUtable) {
+ printk(KERN_WARNING "INFTL: allocation of PUtable "
+ "failed (%zd bytes)\n",
+ inftl->nb_blocks * sizeof(u16));
+ return -ENOMEM;
+ }
+
+ inftl->VUtable = kmalloc(inftl->nb_blocks * sizeof(u16), GFP_KERNEL);
+ if (!inftl->VUtable) {
+ kfree(inftl->PUtable);
+ printk(KERN_WARNING "INFTL: allocation of VUtable "
+ "failed (%zd bytes)\n",
+ inftl->nb_blocks * sizeof(u16));
+ return -ENOMEM;
+ }
+
+ /* Mark the blocks before INFTL MediaHeader as reserved */
+ for (i = 0; i < inftl->nb_boot_blocks; i++)
+ inftl->PUtable[i] = BLOCK_RESERVED;
+ /* Mark all remaining blocks as potentially containing data */
+ for (; i < inftl->nb_blocks; i++)
+ inftl->PUtable[i] = BLOCK_NOTEXPLORED;
+
+ /* Mark this boot record (NFTL MediaHeader) block as reserved */
+ inftl->PUtable[block] = BLOCK_RESERVED;
+
+ /* Read Bad Erase Unit Table and modify PUtable[] accordingly */
+ for (i = 0; i < inftl->nb_blocks; i++) {
+ int physblock;
+ /* If any of the physical eraseblocks are bad, don't
+ use the unit. */
+ for (physblock = 0; physblock < inftl->EraseSize; physblock += inftl->mbd.mtd->erasesize) {
+ if (inftl->mbd.mtd->block_isbad(inftl->mbd.mtd, i * inftl->EraseSize + physblock))
+ inftl->PUtable[i] = BLOCK_RESERVED;
+ }
+ }
+
+ inftl->MediaUnit = block;
+ return 0;
+ }
+
+ /* Not found. */
+ return -1;
+}
+
+static int memcmpb(void *a, int c, int n)
+{
+ int i;
+ for (i = 0; i < n; i++) {
+ if (c != ((unsigned char *)a)[i])
+ return 1;
+ }
+ return 0;
+}
+
+/*
+ * check_free_sector: check if a free sector is actually FREE,
+ * i.e. All 0xff in data and oob area.
+ */
+static int check_free_sectors(struct INFTLrecord *inftl, unsigned int address,
+ int len, int check_oob)
+{
+ u8 buf[SECTORSIZE + inftl->mbd.mtd->oobsize];
+ size_t retlen;
+ int i;
+
+ DEBUG(MTD_DEBUG_LEVEL3, "INFTL: check_free_sectors(inftl=%p,"
+ "address=0x%x,len=%d,check_oob=%d)\n", inftl,
+ address, len, check_oob);
+
+ for (i = 0; i < len; i += SECTORSIZE) {
+ if (MTD_READECC(inftl->mbd.mtd, address, SECTORSIZE, &retlen, buf, &buf[SECTORSIZE], &inftl->oobinfo) < 0)
+ return -1;
+ if (memcmpb(buf, 0xff, SECTORSIZE) != 0)
+ return -1;
+
+ if (check_oob) {
+ if (memcmpb(buf + SECTORSIZE, 0xff, inftl->mbd.mtd->oobsize) != 0)
+ return -1;
+ }
+ address += SECTORSIZE;
+ }
+
+ return 0;
+}
+
+/*
+ * INFTL_format: format a Erase Unit by erasing ALL Erase Zones in the Erase
+ * Unit and Update INFTL metadata. Each erase operation is
+ * checked with check_free_sectors.
+ *
+ * Return: 0 when succeed, -1 on error.
+ *
+ * ToDo: 1. Is it neceressary to check_free_sector after erasing ??
+ */
+int INFTL_formatblock(struct INFTLrecord *inftl, int block)
+{
+ size_t retlen;
+ struct inftl_unittail uci;
+ struct erase_info *instr = &inftl->instr;
+ int physblock;
+
+ DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_formatblock(inftl=%p,"
+ "block=%d)\n", inftl, block);
+
+ memset(instr, 0, sizeof(struct erase_info));
+
+ /* FIXME: Shouldn't we be setting the 'discarded' flag to zero
+ _first_? */
+
+ /* Use async erase interface, test return code */
+ instr->mtd = inftl->mbd.mtd;
+ instr->addr = block * inftl->EraseSize;
+ instr->len = inftl->mbd.mtd->erasesize;
+ /* Erase one physical eraseblock at a time, even though the NAND api
+ allows us to group them. This way we if we have a failure, we can
+ mark only the failed block in the bbt. */
+ for (physblock = 0; physblock < inftl->EraseSize; physblock += instr->len, instr->addr += instr->len) {
+ MTD_ERASE(inftl->mbd.mtd, instr);
+
+ if (instr->state == MTD_ERASE_FAILED) {
+ printk(KERN_WARNING "INFTL: error while formatting block %d\n",
+ block);
+ goto fail;
+ }
+
+ /*
+ * Check the "freeness" of Erase Unit before updating metadata.
+ * FixMe: is this check really necessary? Since we have check the
+ * return code after the erase operation.
+ */
+ if (check_free_sectors(inftl, instr->addr, instr->len, 1) != 0)
+ goto fail;
+ }
+
+ uci.EraseMark = cpu_to_le16(ERASE_MARK);
+ uci.EraseMark1 = cpu_to_le16(ERASE_MARK);
+ uci.Reserved[0] = 0;
+ uci.Reserved[1] = 0;
+ uci.Reserved[2] = 0;
+ uci.Reserved[3] = 0;
+ instr->addr = block * inftl->EraseSize + SECTORSIZE * 2;
+ if (MTD_WRITEOOB(inftl->mbd.mtd, instr->addr +
+ 8, 8, &retlen, (char *)&uci) < 0)
+ goto fail;
+ return 0;
+fail:
+ /* could not format, update the bad block table (caller is responsible
+ for setting the PUtable to BLOCK_RESERVED on failure) */
+ inftl->mbd.mtd->block_markbad(inftl->mbd.mtd, instr->addr);
+ return -1;
+}
+
+/*
+ * format_chain: Format an invalid Virtual Unit chain. It frees all the Erase
+ * Units in a Virtual Unit Chain, i.e. all the units are disconnected.
+ *
+ * Since the chain is invalid then we will have to erase it from its
+ * head (normally for INFTL we go from the oldest). But if it has a
+ * loop then there is no oldest...
+ */
+static void format_chain(struct INFTLrecord *inftl, unsigned int first_block)
+{
+ unsigned int block = first_block, block1;
+
+ printk(KERN_WARNING "INFTL: formatting chain at block %d\n",
+ first_block);
+
+ for (;;) {
+ block1 = inftl->PUtable[block];
+
+ printk(KERN_WARNING "INFTL: formatting block %d\n", block);
+ if (INFTL_formatblock(inftl, block) < 0) {
+ /*
+ * Cannot format !!!! Mark it as Bad Unit,
+ */
+ inftl->PUtable[block] = BLOCK_RESERVED;
+ } else {
+ inftl->PUtable[block] = BLOCK_FREE;
+ }
+
+ /* Goto next block on the chain */
+ block = block1;
+
+ if (block == BLOCK_NIL || block >= inftl->lastEUN)
+ break;
+ }
+}
+
+void INFTL_dumptables(struct INFTLrecord *s)
+{
+ int i;
+
+ printk("-------------------------------------------"
+ "----------------------------------\n");
+
+ printk("VUtable[%d] ->", s->nb_blocks);
+ for (i = 0; i < s->nb_blocks; i++) {
+ if ((i % 8) == 0)
+ printk("\n%04x: ", i);
+ printk("%04x ", s->VUtable[i]);
+ }
+
+ printk("\n-------------------------------------------"
+ "----------------------------------\n");
+
+ printk("PUtable[%d-%d=%d] ->", s->firstEUN, s->lastEUN, s->nb_blocks);
+ for (i = 0; i <= s->lastEUN; i++) {
+ if ((i % 8) == 0)
+ printk("\n%04x: ", i);
+ printk("%04x ", s->PUtable[i]);
+ }
+
+ printk("\n-------------------------------------------"
+ "----------------------------------\n");
+
+ printk("INFTL ->\n"
+ " EraseSize = %d\n"
+ " h/s/c = %d/%d/%d\n"
+ " numvunits = %d\n"
+ " firstEUN = %d\n"
+ " lastEUN = %d\n"
+ " numfreeEUNs = %d\n"
+ " LastFreeEUN = %d\n"
+ " nb_blocks = %d\n"
+ " nb_boot_blocks = %d",
+ s->EraseSize, s->heads, s->sectors, s->cylinders,
+ s->numvunits, s->firstEUN, s->lastEUN, s->numfreeEUNs,
+ s->LastFreeEUN, s->nb_blocks, s->nb_boot_blocks);
+
+ printk("\n-------------------------------------------"
+ "----------------------------------\n");
+}
+
+void INFTL_dumpVUchains(struct INFTLrecord *s)
+{
+ int logical, block, i;
+
+ printk("-------------------------------------------"
+ "----------------------------------\n");
+
+ printk("INFTL Virtual Unit Chains:\n");
+ for (logical = 0; logical < s->nb_blocks; logical++) {
+ block = s->VUtable[logical];
+ if (block > s->nb_blocks)
+ continue;
+ printk(" LOGICAL %d --> %d ", logical, block);
+ for (i = 0; i < s->nb_blocks; i++) {
+ if (s->PUtable[block] == BLOCK_NIL)
+ break;
+ block = s->PUtable[block];
+ printk("%d ", block);
+ }
+ printk("\n");
+ }
+
+ printk("-------------------------------------------"
+ "----------------------------------\n");
+}
+
+int INFTL_mount(struct INFTLrecord *s)
+{
+ unsigned int block, first_block, prev_block, last_block;
+ unsigned int first_logical_block, logical_block, erase_mark;
+ int chain_length, do_format_chain;
+ struct inftl_unithead1 h0;
+ struct inftl_unittail h1;
+ size_t retlen;
+ int i;
+ u8 *ANACtable, ANAC;
+
+ DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_mount(inftl=%p)\n", s);
+
+ /* Search for INFTL MediaHeader and Spare INFTL Media Header */
+ if (find_boot_record(s) < 0) {
+ printk(KERN_WARNING "INFTL: could not find valid boot record?\n");
+ return -ENXIO;
+ }
+
+ /* Init the logical to physical table */
+ for (i = 0; i < s->nb_blocks; i++)
+ s->VUtable[i] = BLOCK_NIL;
+
+ logical_block = block = BLOCK_NIL;
+
+ /* Temporary buffer to store ANAC numbers. */
+ ANACtable = kmalloc(s->nb_blocks * sizeof(u8), GFP_KERNEL);
+ if (!ANACtable) {
+ printk(KERN_ERR "INFTL: Out of memory.\n");
+ return -ENOMEM;
+ }
+
+ memset(ANACtable, 0, s->nb_blocks);
+
+ /*
+ * First pass is to explore each physical unit, and construct the
+ * virtual chains that exist (newest physical unit goes into VUtable).
+ * Any block that is in any way invalid will be left in the
+ * NOTEXPLORED state. Then at the end we will try to format it and
+ * mark it as free.
+ */
+ DEBUG(MTD_DEBUG_LEVEL3, "INFTL: pass 1, explore each unit\n");
+ for (first_block = s->firstEUN; first_block <= s->lastEUN; first_block++) {
+ if (s->PUtable[first_block] != BLOCK_NOTEXPLORED)
+ continue;
+
+ do_format_chain = 0;
+ first_logical_block = BLOCK_NIL;
+ last_block = BLOCK_NIL;
+ block = first_block;
+
+ for (chain_length = 0; ; chain_length++) {
+
+ if ((chain_length == 0) &&
+ (s->PUtable[block] != BLOCK_NOTEXPLORED)) {
+ /* Nothing to do here, onto next block */
+ break;
+ }
+
+ if (MTD_READOOB(s->mbd.mtd, block * s->EraseSize + 8,
+ 8, &retlen, (char *)&h0) < 0 ||
+ MTD_READOOB(s->mbd.mtd, block * s->EraseSize +
+ 2 * SECTORSIZE + 8, 8, &retlen, (char *)&h1) < 0) {
+ /* Should never happen? */
+ do_format_chain++;
+ break;
+ }
+
+ logical_block = le16_to_cpu(h0.virtualUnitNo);
+ prev_block = le16_to_cpu(h0.prevUnitNo);
+ erase_mark = le16_to_cpu((h1.EraseMark | h1.EraseMark1));
+ ANACtable[block] = h0.ANAC;
+
+ /* Previous block is relative to start of Partition */
+ if (prev_block < s->nb_blocks)
+ prev_block += s->firstEUN;
+
+ /* Already explored partial chain? */
+ if (s->PUtable[block] != BLOCK_NOTEXPLORED) {
+ /* Check if chain for this logical */
+ if (logical_block == first_logical_block) {
+ if (last_block != BLOCK_NIL)
+ s->PUtable[last_block] = block;
+ }
+ break;
+ }
+
+ /* Check for invalid block */
+ if (erase_mark != ERASE_MARK) {
+ printk(KERN_WARNING "INFTL: corrupt block %d "
+ "in chain %d, chain length %d, erase "
+ "mark 0x%x?\n", block, first_block,
+ chain_length, erase_mark);
+ /*
+ * Assume end of chain, probably incomplete
+ * fold/erase...
+ */
+ if (chain_length == 0)
+ do_format_chain++;
+ break;
+ }
+
+ /* Check for it being free already then... */
+ if ((logical_block == BLOCK_FREE) ||
+ (logical_block == BLOCK_NIL)) {
+ s->PUtable[block] = BLOCK_FREE;
+ break;
+ }
+
+ /* Sanity checks on block numbers */
+ if ((logical_block >= s->nb_blocks) ||
+ ((prev_block >= s->nb_blocks) &&
+ (prev_block != BLOCK_NIL))) {
+ if (chain_length > 0) {
+ printk(KERN_WARNING "INFTL: corrupt "
+ "block %d in chain %d?\n",
+ block, first_block);
+ do_format_chain++;
+ }
+ break;
+ }
+
+ if (first_logical_block == BLOCK_NIL) {
+ first_logical_block = logical_block;
+ } else {
+ if (first_logical_block != logical_block) {
+ /* Normal for folded chain... */
+ break;
+ }
+ }
+
+ /*
+ * Current block is valid, so if we followed a virtual
+ * chain to get here then we can set the previous
+ * block pointer in our PUtable now. Then move onto
+ * the previous block in the chain.
+ */
+ s->PUtable[block] = BLOCK_NIL;
+ if (last_block != BLOCK_NIL)
+ s->PUtable[last_block] = block;
+ last_block = block;
+ block = prev_block;
+
+ /* Check for end of chain */
+ if (block == BLOCK_NIL)
+ break;
+
+ /* Validate next block before following it... */
+ if (block > s->lastEUN) {
+ printk(KERN_WARNING "INFTL: invalid previous "
+ "block %d in chain %d?\n", block,
+ first_block);
+ do_format_chain++;
+ break;
+ }
+ }
+
+ if (do_format_chain) {
+ format_chain(s, first_block);
+ continue;
+ }
+
+ /*
+ * Looks like a valid chain then. It may not really be the
+ * newest block in the chain, but it is the newest we have
+ * found so far. We might update it in later iterations of
+ * this loop if we find something newer.
+ */
+ s->VUtable[first_logical_block] = first_block;
+ logical_block = BLOCK_NIL;
+ }
+
+#ifdef CONFIG_MTD_DEBUG_VERBOSE
+ if (CONFIG_MTD_DEBUG_VERBOSE >= 2)
+ INFTL_dumptables(s);
+#endif
+
+ /*
+ * Second pass, check for infinite loops in chains. These are
+ * possible because we don't update the previous pointers when
+ * we fold chains. No big deal, just fix them up in PUtable.
+ */
+ DEBUG(MTD_DEBUG_LEVEL3, "INFTL: pass 2, validate virtual chains\n");
+ for (logical_block = 0; logical_block < s->numvunits; logical_block++) {
+ block = s->VUtable[logical_block];
+ last_block = BLOCK_NIL;
+
+ /* Check for free/reserved/nil */
+ if (block >= BLOCK_RESERVED)
+ continue;
+
+ ANAC = ANACtable[block];
+ for (i = 0; i < s->numvunits; i++) {
+ if (s->PUtable[block] == BLOCK_NIL)
+ break;
+ if (s->PUtable[block] > s->lastEUN) {
+ printk(KERN_WARNING "INFTL: invalid prev %d, "
+ "in virtual chain %d\n",
+ s->PUtable[block], logical_block);
+ s->PUtable[block] = BLOCK_NIL;
+
+ }
+ if (ANACtable[block] != ANAC) {
+ /*
+ * Chain must point back to itself. This is ok,
+ * but we will need adjust the tables with this
+ * newest block and oldest block.
+ */
+ s->VUtable[logical_block] = block;
+ s->PUtable[last_block] = BLOCK_NIL;
+ break;
+ }
+
+ ANAC--;
+ last_block = block;
+ block = s->PUtable[block];
+ }
+
+ if (i >= s->nb_blocks) {
+ /*
+ * Uhoo, infinite chain with valid ANACS!
+ * Format whole chain...
+ */
+ format_chain(s, first_block);
+ }
+ }
+
+#ifdef CONFIG_MTD_DEBUG_VERBOSE
+ if (CONFIG_MTD_DEBUG_VERBOSE >= 2)
+ INFTL_dumptables(s);
+ if (CONFIG_MTD_DEBUG_VERBOSE >= 2)
+ INFTL_dumpVUchains(s);
+#endif
+
+ /*
+ * Third pass, format unreferenced blocks and init free block count.
+ */
+ s->numfreeEUNs = 0;
+ s->LastFreeEUN = BLOCK_NIL;
+
+ DEBUG(MTD_DEBUG_LEVEL3, "INFTL: pass 3, format unused blocks\n");
+ for (block = s->firstEUN; block <= s->lastEUN; block++) {
+ if (s->PUtable[block] == BLOCK_NOTEXPLORED) {
+ printk("INFTL: unreferenced block %d, formatting it\n",
+ block);
+ if (INFTL_formatblock(s, block) < 0)
+ s->PUtable[block] = BLOCK_RESERVED;
+ else
+ s->PUtable[block] = BLOCK_FREE;
+ }
+ if (s->PUtable[block] == BLOCK_FREE) {
+ s->numfreeEUNs++;
+ if (s->LastFreeEUN == BLOCK_NIL)
+ s->LastFreeEUN = block;
+ }
+ }
+
+ kfree(ANACtable);
+ return 0;
+}
diff --git a/linux-2.4.x/drivers/mtd/maps/Config.in b/linux-2.4.x/drivers/mtd/maps/Config.in
index 4ff8796..4a1d69b 100644
--- a/linux-2.4.x/drivers/mtd/maps/Config.in
+++ b/linux-2.4.x/drivers/mtd/maps/Config.in
@@ -1,34 +1,20 @@
# drivers/mtd/maps/Config.in
-# $Id: Config.in,v 1.28 2002/03/08 16:34:35 rkaiser Exp $
+# $Id: Config.in,v 1.72 2005/02/27 21:50:21 ppopov Exp $
mainmenu_option next_comment
comment 'Mapping drivers for chip access'
-dep_tristate ' CFI Flash device in physical memory map' CONFIG_MTD_PHYSMAP $CONFIG_MTD_GEN_PROBE
-if [ "$CONFIG_MTD_PHYSMAP" = "y" -o "$CONFIG_MTD_PHYSMAP" = "m" ]; then
+bool ' Support for non-linear mappings of flash chips' CONFIG_MTD_COMPLEX_MAPPINGS
+
+bool ' CFI Flash device in physical memory map' CONFIG_MTD_PHYSMAP $CONFIG_MTD_GEN_PROBE
+if [ "$CONFIG_MTD_PHYSMAP" = "y" ]; then
hex ' Physical start address of flash mapping' CONFIG_MTD_PHYSMAP_START 0x8000000
hex ' Physical length of flash mapping' CONFIG_MTD_PHYSMAP_LEN 0x4000000
- int ' Bus width in octets' CONFIG_MTD_PHYSMAP_BUSWIDTH 2
+ int ' Bank width in octets' CONFIG_MTD_PHYSMAP_BANKWIDTH 2
fi
-dep_tristate ' Generic uClinux RAM/ROM filesystem support' CONFIG_MTD_UCLINUX $CONFIG_MTD_PARTITIONS
-dep_tristate ' CFI Flash device mapped on Dragonix VZ' CONFIG_MTD_DRAGONIX $CONFIG_MTD_CFI_INTELEXT $CONFIG_MTD_CFI_AMDSTD $CONFIG_MTD_PARTITIONS
-dep_tristate ' CFI Flash device mapped on SnapGear/SecureEdge' CONFIG_MTD_NETtel $CONFIG_MTD_PARTITIONS
-dep_tristate ' CFI Flash device mapped on SnapGear/SecureEdge (uClinux)' CONFIG_MTD_NETteluC $CONFIG_UCLINUX $CONFIG_MTD_PARTITIONS
-if [ "$CONFIG_MTD_NETteluC" = "y" ]; then
- choice ' FLASH size' \
- " AUTO CONFIG_FLASHAUTO \
- 128KB CONFIG_FLASH128KB \
- 1MB CONFIG_FLASH1MB \
- 2MB CONFIG_FLASH2MB \
- 4MB CONFIG_FLASH4MB \
- 6MB CONFIG_FLASH6MB \
- 8MB CONFIG_FLASH8MB \
- 16MB CONFIG_FLASH16MB" AUTO
-fi
-dep_tristate ' CFI Flash device mapped on Key Technology devices' CONFIG_MTD_KeyTechnology $CONFIG_UCLINUX $CONFIG_MTD_PARTITIONS
if [ "$CONFIG_SPARC" = "y" -o "$CONFIG_SPARC64" = "y" ]; then
dep_tristate ' Sun Microsystems userflash support' CONFIG_MTD_SUN_UFLASH $CONFIG_MTD_CFI
fi
@@ -37,37 +23,58 @@ if [ "$CONFIG_X86" = "y" ]; then
dep_tristate ' CFI Flash device mapped on Photron PNC-2000' CONFIG_MTD_PNC2000 $CONFIG_MTD_CFI $CONFIG_MTD_PARTITIONS
dep_tristate ' CFI Flash device mapped on AMD SC520 CDP' CONFIG_MTD_SC520CDP $CONFIG_MTD_CFI
dep_tristate ' CFI Flash device mapped on AMD NetSc520' CONFIG_MTD_NETSC520 $CONFIG_MTD_CFI $CONFIG_MTD_PARTITIONS
- dep_tristate ' CFI Flash device mapped on Arcom SBC-GXx boards' CONFIG_MTD_SBC_GXX $CONFIG_MTD_CFI_INTELEXT $CONFIG_MTD_PARTITIONS
- dep_tristate ' CFI Flash device mapped on Arcom ELAN-104NC' CONFIG_MTD_ELAN_104NC $CONFIG_MTD_CFI_INTELEXT $CONFIG_MTD_PARTITIONS
+ dep_tristate ' CFI Flash device mapped on Arcom SBC-GXx boards' CONFIG_MTD_SBC_GXX $CONFIG_MTD_CFI_INTELEXT $CONFIG_MTD_PARTITIONS $CONFIG_MTD_COMPLEX_MAPPINGS
+ dep_tristate ' CFI Flash device mapped on Arcom ELAN-104NC' CONFIG_MTD_ELAN_104NC $CONFIG_MTD_CFI_INTELEXT $CONFIG_MTD_PARTITIONS $CONFIG_MTD_COMPLEX_MAPPINGS
dep_tristate ' CFI Flash device mapped on DIL/Net PC' CONFIG_MTD_DILNETPC $CONFIG_MTD_CFI_INTELEXT $CONFIG_MTD_PARTITIONS $CONFIG_MTD_CONCAT
if [ "$CONFIG_MTD_DILNETPC" = "y" -o "$CONFIG_MTD_DILNETPC" = "m" ]; then
hex ' Size of boot partition' CONFIG_MTD_DILNETPC_BOOTSIZE 0x80000
fi
- dep_tristate ' JEDEC Flash device mapped on Mixcom piggyback card' CONFIG_MTD_MIXMEM $CONFIG_MTD_JEDEC
- dep_tristate ' JEDEC Flash device mapped on Octagon 5066 SBC' CONFIG_MTD_OCTAGON $CONFIG_MTD_JEDEC
- dep_tristate ' JEDEC Flash device mapped on Tempustech VMAX SBC301' CONFIG_MTD_VMAX $CONFIG_MTD_JEDEC
+ dep_tristate ' JEDEC Flash device mapped on Octagon 5066 SBC' CONFIG_MTD_OCTAGON $CONFIG_MTD_JEDEC $CONFIG_MTD_COMPLEX_MAPPINGS
+ dep_tristate ' JEDEC Flash device mapped on Tempustech VMAX SBC301' CONFIG_MTD_VMAX $CONFIG_MTD_JEDEC $CONFIG_MTD_COMPLEX_MAPPINGS
+ dep_tristate ' Flash device mapped with DOCCS on NatSemi SCx200' CONFIG_MTD_SCx200_DOCFLASH $CONFIG_MTD_CFI
dep_tristate ' BIOS flash chip on Intel L440GX boards' CONFIG_MTD_L440GX $CONFIG_MTD_JEDECPROBE
- dep_tristate ' ROM connected to AMD766 southbridge' CONFIG_MTD_AMD766ROM $CONFIG_MTD_GEN_PROBE
- dep_tristate ' ROM connected to Intel Hub Controller 2' CONFIG_MTD_ICH2ROM $CONFIG_MTD_JEDECPROBE
+ dep_tristate ' ROM connected to AMD76X southbridge' CONFIG_MTD_AMD76XROM $CONFIG_MTD_GEN_PROBE
+ dep_tristate ' ROM connected to Intel Hub Controller 2/3/4/5' CONFIG_MTD_ICHXROM $CONFIG_MTD_JEDECPROBE
+ dep_tristate ' CFI Flash device mapped on SnapGear/SecureEdge' CONFIG_MTD_NETtel $CONFIG_MTD_PARTITIONS
+ dep_tristate ' BIOS flash chip on Intel SCB2 boards' CONFIG_MTD_SCB2_FLASH $CONFIG_MTD_GEN_PROBE
fi
-if [ "$CONFIG_PPC" = "y" ]; then
- dep_tristate ' CFI Flash device mapped on TQM8XXL' CONFIG_MTD_TQM8XXL $CONFIG_MTD_CFI $CONFIG_TQM8xxL
- dep_tristate ' CFI Flash device mapped on RPX Lite or CLLF' CONFIG_MTD_RPXLITE $CONFIG_MTD_CFI
- dep_tristate ' System flash on MBX860 board' CONFIG_MTD_MBX860 $CONFIG_MTD_CFI
- dep_tristate ' CFI Flash device mapped on D-Box2' CONFIG_MTD_DBOX2 $CONFIG_MTD_CFI
- dep_tristate ' CFI Flash device mapping on FlagaDM' CONFIG_MTD_CFI_FLAGADM $CONFIG_MTD_CFI
+if [ "$CONFIG_PPC32" = "y" ]; then
+ if [ "$CONFIG_6xx" = "y" -a "$CONFIG_8260" = "y" ]; then
+ dep_tristate ' Flash device on SBC8240' CONFIG_MTD_SBC8240 $CONFIG_MTD_JEDECPROBE
+ fi
+ if [ "$CONFIG_8xx" = "y" ]; then
+ if [ "$CONFIG_TQM8xxL" = "y" ]; then
+ dep_tristate ' CFI Flash device mapped on TQM8XXL' CONFIG_MTD_TQM8XXL $CONFIG_MTD_CFI
+ fi
+ if [ "$CONFIG_RPXLITE" = "y" -o "$CONFIG_RPXCLASSIC" = "y" ]; then
+ dep_tristate ' CFI Flash device mapped on RPX Lite or CLLF' CONFIG_MTD_RPXLITE $CONFIG_MTD_CFI
+ fi
+ if [ "$CONFIG_MBX" = "y" ]; then
+ dep_tristate ' System flash on MBX860 board' CONFIG_MTD_MBX860 $CONFIG_MTD_CFI
+ fi
+ if [ "$CONFIG_DBOX2" = "y" ]; then
+ dep_tristate ' CFI Flash device mapped on D-Box2' CONFIG_MTD_DBOX2 $CONFIG_MTD_CFI
+ fi
+ dep_tristate ' CFI Flash device mapping on FlagaDM' CONFIG_MTD_CFI_FLAGADM $CONFIG_MTD_CFI
+ fi
+ if [ "$CONFIG_4xx" = "y" ]; then
+ if [ "$CONFIG_40x" = "y" ]; then
+ if [ "$CONFIG_REDWOOD_4" = "y" -o "$CONFIG_REDWOOD_5" = "y" -o "$CONFIG_REDWOOD_6" = "y" ]; then
+ dep_tristate ' CFI Flash device mapped on IBM Redwood' CONFIG_MTD_REDWOOD $CONFIG_MTD_CFI
+ fi
+ dep_tristate ' CFI Flash device mapped on IBM Beech' CONFIG_MTD_BEECH $CONFIG_MTD_CFI $CONFIG_BEECH
+ dep_tristate ' CFI Flash device mapped on IBM Arctic' CONFIG_MTD_ARCTIC $CONFIG_MTD_CFI $CONFIG_ARCTIC2
+ dep_tristate ' Flash device mapped on IBM Walnut' CONFIG_MTD_WALNUT $CONFIG_MTD_JEDECPROBE $CONFIG_WALNUT
+ fi
+ if [ "$CONFIG_440" = "y" ]; then
+ dep_tristate ' Flash devices mapped on IBM Ebony' CONFIG_MTD_EBONY $CONFIG_MTD_JEDECPROBE $CONFIG_EBONY
+ fi
+ fi
fi
-if [ "$CONFIG_MIPS" = "y" ]; then
- dep_tristate ' Pb1000 boot flash device' CONFIG_MTD_PB1000 $CONFIG_MIPS_PB1000
- dep_tristate ' Pb1500 MTD support' CONFIG_MTD_PB1500 $CONFIG_MIPS_PB1500
- dep_tristate ' Pb1100 MTD support' CONFIG_MTD_PB1100 $CONFIG_MIPS_PB1100
- if [ "$CONFIG_MTD_PB1500" = "y" -o "$CONFIG_MTD_PB1500" = "m" \
- -o "$CONFIG_MTD_PB1100" = "y" -o "$CONFIG_MTD_PB1100" = "m" ]; then
- bool ' Pb1[015]00 boot flash device' CONFIG_MTD_PB1500_BOOT
- bool ' Pb1[015]00 user flash device (2nd 32MB bank)' CONFIG_MTD_PB1500_USER
- fi
+if [ "$CONFIG_MIPS" = "y" -o "$CONFIG_MIPS64" = "y" ]; then
+ dep_tristate ' AMD Alchemy Pb1xxx/Db1xxx/RDK MTD support' CONFIG_MTD_ALCHEMY $CONFIG_SOC_AU1X00
dep_tristate ' Flash chip mapping on ITE QED-4N-S01B, Globespan IVR or custom board' CONFIG_MTD_CSTM_MIPS_IXX $CONFIG_MTD_CFI $CONFIG_MTD_JEDEC $CONFIG_MTD_PARTITIONS
if [ "$CONFIG_MTD_CSTM_MIPS_IXX" = "y" -o "$CONFIG_MTD_CSTM_MIPS_IXX" = "m" ]; then
hex ' Physical start address of flash mapping' CONFIG_MTD_CSTM_MIPS_IXX_START 0x8000000
@@ -75,6 +82,7 @@ if [ "$CONFIG_MIPS" = "y" ]; then
int ' Bus width in octets' CONFIG_MTD_CSTM_MIPS_IXX_BUSWIDTH 2
fi
dep_tristate ' Momenco Ocelot boot flash device' CONFIG_MTD_OCELOT $CONFIG_MOMENCO_OCELOT
+ dep_tristate ' LASAT flash device' CONFIG_MTD_LASAT $CONFIG_LASAT
fi
if [ "$CONFIG_SUPERH" = "y" ]; then
@@ -86,30 +94,32 @@ if [ "$CONFIG_SUPERH" = "y" ]; then
fi
if [ "$CONFIG_ARM" = "y" ]; then
- dep_tristate ' CFI Flash device mapped on Nora' CONFIG_MTD_NORA $CONFIG_MTD_CFI
dep_tristate ' CFI Flash device mapped on ARM Integrator/P720T' CONFIG_MTD_ARM_INTEGRATOR $CONFIG_MTD_CFI
dep_tristate ' Cirrus CDB89712 evaluation board mappings' CONFIG_MTD_CDB89712 $CONFIG_MTD_CFI $CONFIG_ARCH_CDB89712
dep_tristate ' CFI Flash device mapped on StrongARM SA11x0' CONFIG_MTD_SA1100 $CONFIG_MTD_CFI $CONFIG_ARCH_SA1100 $CONFIG_MTD_PARTITIONS
- dep_tristate ' CFI Flash device mapped on DC21285 Footbridge' CONFIG_MTD_DC21285 $CONFIG_MTD_CFI $CONFIG_ARCH_FOOTBRIDGE $CONFIG_MTD_PARTITIONS
+ dep_tristate ' CFI Flash device mapped on DC21285 Footbridge' CONFIG_MTD_DC21285 $CONFIG_MTD_CFI $CONFIG_ARCH_FOOTBRIDGE $CONFIG_MTD_COMPLEX_MAPPINGS
dep_tristate ' CFI Flash device mapped on the XScale IQ80310 board' CONFIG_MTD_IQ80310 $CONFIG_MTD_CFI $CONFIG_ARCH_IQ80310
+ dep_tristate ' CFI Flash device mapped on the XScale Lubbock board' CONFIG_MTD_LUBBOCK $CONFIG_MTD_CFI $CONFIG_ARCH_LUBBOCK
+ dep_tristate ' CFI Flash device mapped on XScale IXP425 systems' CONFIG_MTD_IXP425 $CONFIG_MTD_CFI $CONFIG_MTD_COMPLEX_MAPPINGS
dep_tristate ' CFI Flash device mapped on Epxa10db' CONFIG_MTD_EPXA10DB $CONFIG_MTD_CFI $CONFIG_MTD_PARTITIONS $CONFIG_ARCH_CAMELOT
+ dep_tristate ' CFI Flash device mapped on the FortuNet board' CONFIG_MTD_FORTUNET $CONFIG_MTD_CFI $CONFIG_MTD_PARTITIONS $CONFIG_SA1100_FORTUNET
dep_tristate ' NV-RAM mapping AUTCPU12 board' CONFIG_MTD_AUTCPU12 $CONFIG_ARCH_AUTCPU12
+ dep_tristate ' CFI Flash device mapped on EDB7312' CONFIG_MTD_EDB7312 $CONFIG_MTD_CFI
+ dep_tristate ' CFI Flash device mapped on Hynix evaluation boards' CONFIG_MTD_H720X $CONFIG_MTD_CFI
+ dep_tristate ' JEDEC Flash device mapped on impA7' CONFIG_MTD_IMPA7 $CONFIG_MTD_JEDECPROBE
+ dep_tristate ' JEDEC Flash device mapped on Ceiva/Polaroid PhotoMax Digital Picture Frame' CONFIG_MTD_CEIVA $CONFIG_MTD_JEDECPROBE $CONFIG_ARCH_CEIVA
+ dep_tristate ' NOR Flash device on TOTO board' CONFIG_MTD_NOR_TOTO $CONFIG_MTD $CONFIG_OMAP_TOTO
fi
-
-if [ "$CONFIG_M5272" = "y" -a "$CONFIG_senTec" = "y" ]; then
- dep_tristate ' CFI Flash device mapped on COBRA5272' CONFIG_MTD_COBRA5272 $CONFIG_MTD_CFI
-fi
-
if [ "$CONFIG_ALPHA" = "y" ]; then
- dep_tristate ' Flash chip mapping on TSUNAMI' CONFIG_MTD_TSUNAMI $CONFIG_MTD_GENPROBE
+ dep_tristate ' Flash chip mapping on TSUNAMI' CONFIG_MTD_TSUNAMI $CONFIG_MTD_GENPROBE $CONFIG_MTD_COMPLEX_MAPPINGS
fi
-# This needs CFI or JEDEC, depending on the cards found.
-dep_tristate ' PCI MTD driver' CONFIG_MTD_PCI $CONFIG_MTD $CONFIG_PCI
-
-if [ "$CONFIG_M5272C3" ]; then
- dep_tristate ' CFI Flash device mapped on Motorola M5272C3' CONFIG_MTD_M5272C3 $CONFIG_MTD_CFI
+if [ "$CONFIG_UCLINUX" = "y" ]; then
+ dep_tristate ' Generic uClinux RAM/ROM filesystem support' CONFIG_MTD_UCLINUX $CONFIG_MTD_PARTITIONS
fi
+# This needs CFI or JEDEC, depending on the cards found.
+dep_tristate ' PCI MTD driver' CONFIG_MTD_PCI $CONFIG_MTD $CONFIG_PCI $CONFIG_MTD_COMPLEX_MAPPINGS
+dep_tristate ' PCMCIA MTD driver' CONFIG_MTD_PCMCIA $CONFIG_MTD $CONFIG_PCMCIA $CONFIG_MTD_COMPLEX_MAPPINGS
endmenu
diff --git a/linux-2.4.x/drivers/mtd/maps/Makefile b/linux-2.4.x/drivers/mtd/maps/Makefile
index 43258d3..d778681 100644
--- a/linux-2.4.x/drivers/mtd/maps/Makefile
+++ b/linux-2.4.x/drivers/mtd/maps/Makefile
@@ -1,52 +1,11 @@
#
-# linux/drivers/maps/Makefile
+# linux/drivers/maps/Makefile.24
+# Makefile for obsolete kernels
#
-# $Id: Makefile,v 1.21 2002/02/22 09:34:44 gleixner Exp $
+# $Id: Makefile.24,v 1.1 2004/07/12 16:08:16 dwmw2 Exp $
O_TARGET := mapslink.o
+export-objs := map_funcs.o
-# Chip mappings
-obj-$(CONFIG_MTD_CDB89712) += cdb89712.o
-obj-$(CONFIG_MTD_DRAGONIX) += dragonix_map.o
-obj-$(CONFIG_MTD_ARM_INTEGRATOR)+= integrator-flash.o
-obj-$(CONFIG_MTD_CFI_FLAGADM) += cfi_flagadm.o
-obj-$(CONFIG_MTD_CSTM_MIPS_IXX) += cstm_mips_ixx.o
-obj-$(CONFIG_MTD_DC21285) += dc21285.o
-obj-$(CONFIG_MTD_DILNETPC) += dilnetpc.o
-obj-$(CONFIG_MTD_ELAN_104NC) += elan-104nc.o
-obj-$(CONFIG_MTD_EPXA10DB) += epxa10db-flash.o
-obj-$(CONFIG_MTD_IQ80310) += iq80310.o
-obj-$(CONFIG_MTD_L440GX) += l440gx.o
-obj-$(CONFIG_MTD_AMD766ROM) += amd766rom.o
-obj-$(CONFIG_MTD_ICH2ROM) += ich2rom.o
-obj-$(CONFIG_MTD_TSUNAMI) += tsunami_flash.o
-obj-$(CONFIG_MTD_MBX860) += mbx860.o
-obj-$(CONFIG_MTD_NORA) += nora.o
-obj-$(CONFIG_MTD_OCTAGON) += octagon-5066.o
-obj-$(CONFIG_MTD_PHYSMAP) += physmap.o
-obj-$(CONFIG_MTD_PNC2000) += pnc2000.o
-obj-$(CONFIG_MTD_RPXLITE) += rpxlite.o
-obj-$(CONFIG_MTD_TQM8XXL) += tqm8xxl.o
-obj-$(CONFIG_MTD_SA1100) += sa1100-flash.o
-obj-$(CONFIG_MTD_SBC_GXX) += sbc_gxx.o
-obj-$(CONFIG_MTD_SC520CDP) += sc520cdp.o
-obj-$(CONFIG_MTD_NETSC520) += netsc520.o
-obj-$(CONFIG_MTD_SUN_UFLASH) += sun_uflash.o
-obj-$(CONFIG_MTD_VMAX) += vmax301.o
-obj-$(CONFIG_MTD_DBOX2) += dbox2-flash.o
-obj-$(CONFIG_MTD_OCELOT) += ocelot.o
-obj-$(CONFIG_MTD_NETtel) += nettel.o
-obj-$(CONFIG_MTD_NETteluC) += nettel-uc.o
-obj-$(CONFIG_MTD_SOLUTIONENGINE)+= solutionengine.o
-obj-$(CONFIG_MTD_KeyTechnology) += keyTechnology.o
-obj-$(CONFIG_MTD_COBRA5272) += cobra5272.o
-obj-$(CONFIG_MTD_M5272C3) += m5272c3.o
-obj-$(CONFIG_MTD_PCI) += pci.o
-obj-$(CONFIG_MTD_PB1000) += pb1xxx-flash.o
-obj-$(CONFIG_MTD_PB1100) += pb1xxx-flash.o
-obj-$(CONFIG_MTD_PB1500) += pb1xxx-flash.o
-obj-$(CONFIG_MTD_UCLINUX) += uclinux.o
-obj-$(CONFIG_MTD_AUTCPU12) += autcpu12-nvram.o
-
+include Makefile.common
include $(TOPDIR)/Rules.make
-
diff --git a/linux-2.4.x/drivers/mtd/maps/Makefile.common b/linux-2.4.x/drivers/mtd/maps/Makefile.common
new file mode 100644
index 0000000..0ac832c
--- /dev/null
+++ b/linux-2.4.x/drivers/mtd/maps/Makefile.common
@@ -0,0 +1,75 @@
+#
+# linux/drivers/maps/Makefile
+#
+# $Id: Makefile.common,v 1.39 2005/12/02 21:07:01 tpoynor Exp $
+
+ifeq ($(CONFIG_MTD_COMPLEX_MAPPINGS),y)
+obj-$(CONFIG_MTD) += map_funcs.o
+endif
+
+# Chip mappings
+obj-$(CONFIG_MTD_CDB89712) += cdb89712.o
+obj-$(CONFIG_MTD_ARM_INTEGRATOR)+= integrator-flash.o
+obj-$(CONFIG_MTD_BAST) += bast-flash.o
+obj-$(CONFIG_MTD_CFI_FLAGADM) += cfi_flagadm.o
+obj-$(CONFIG_MTD_CSTM_MIPS_IXX) += cstm_mips_ixx.o
+obj-$(CONFIG_MTD_DC21285) += dc21285.o
+obj-$(CONFIG_MTD_DILNETPC) += dilnetpc.o
+obj-$(CONFIG_MTD_EPXA10DB) += epxa10db-flash.o
+obj-$(CONFIG_MTD_IQ80310) += iq80310.o
+obj-$(CONFIG_MTD_L440GX) += l440gx.o
+obj-$(CONFIG_MTD_AMD76XROM) += amd76xrom.o
+obj-$(CONFIG_MTD_ICHXROM) += ichxrom.o
+obj-$(CONFIG_MTD_TSUNAMI) += tsunami_flash.o
+obj-$(CONFIG_MTD_PXA2XX) += pxa2xx-flash.o
+obj-$(CONFIG_MTD_MBX860) += mbx860.o
+obj-$(CONFIG_MTD_CEIVA) += ceiva.o
+obj-$(CONFIG_MTD_OCTAGON) += octagon-5066.o
+obj-$(CONFIG_MTD_PHYSMAP) += physmap.o
+obj-$(CONFIG_MTD_MULTI_PHYSMAP) += mphysmap.o
+obj-$(CONFIG_MTD_PNC2000) += pnc2000.o
+obj-$(CONFIG_MTD_PCMCIA) += pcmciamtd.o
+obj-$(CONFIG_MTD_RPXLITE) += rpxlite.o
+obj-$(CONFIG_MTD_TQM8XXL) += tqm8xxl.o
+obj-$(CONFIG_MTD_SA1100) += sa1100-flash.o
+obj-$(CONFIG_MTD_IPAQ) += ipaq-flash.o
+obj-$(CONFIG_MTD_SBC_GXX) += sbc_gxx.o
+obj-$(CONFIG_MTD_SC520CDP) += sc520cdp.o
+obj-$(CONFIG_MTD_NETSC520) += netsc520.o
+obj-$(CONFIG_MTD_TS5500) += ts5500_flash.o
+obj-$(CONFIG_MTD_SUN_UFLASH) += sun_uflash.o
+obj-$(CONFIG_MTD_VMAX) += vmax301.o
+obj-$(CONFIG_MTD_SCx200_DOCFLASH)+= scx200_docflash.o
+obj-$(CONFIG_MTD_DBOX2) += dbox2-flash.o
+obj-$(CONFIG_MTD_OCELOT) += ocelot.o
+obj-$(CONFIG_MTD_SOLUTIONENGINE)+= solutionengine.o
+obj-$(CONFIG_MTD_PCI) += pci.o
+obj-$(CONFIG_MTD_ALCHEMY) += alchemy-flash.o
+obj-$(CONFIG_MTD_LASAT) += lasat.o
+obj-$(CONFIG_MTD_AUTCPU12) += autcpu12-nvram.o
+obj-$(CONFIG_MTD_EDB7312) += edb7312.o
+obj-$(CONFIG_MTD_IMPA7) += impa7.o
+obj-$(CONFIG_MTD_FORTUNET) += fortunet.o
+obj-$(CONFIG_MTD_REDWOOD) += redwood.o
+obj-$(CONFIG_MTD_UCLINUX) += uclinux.o
+obj-$(CONFIG_MTD_NETtel) += nettel.o
+obj-$(CONFIG_MTD_SCB2_FLASH) += scb2_flash.o
+obj-$(CONFIG_MTD_EBONY) += ebony.o
+obj-$(CONFIG_MTD_OCOTEA) += ocotea.o
+obj-$(CONFIG_MTD_BEECH) += beech-mtd.o
+obj-$(CONFIG_MTD_ARCTIC) += arctic-mtd.o
+obj-$(CONFIG_MTD_WALNUT) += walnut.o
+obj-$(CONFIG_MTD_H720X) += h720x-flash.o
+obj-$(CONFIG_MTD_SBC8240) += sbc8240.o
+obj-$(CONFIG_MTD_NOR_TOTO) += omap-toto-flash.o
+obj-$(CONFIG_MTD_MPC1211) += mpc1211.o
+obj-$(CONFIG_MTD_IXP4XX) += ixp4xx.o
+obj-$(CONFIG_MTD_IXP2000) += ixp2000.o
+obj-$(CONFIG_MTD_WRSBC8260) += wr_sbc82xx_flash.o
+obj-$(CONFIG_MTD_DMV182) += dmv182.o
+obj-$(CONFIG_MTD_SHARP_SL) += sharpsl-flash.o
+obj-$(CONFIG_MTD_PLATRAM) += plat-ram.o
+obj-$(CONFIG_MTD_OMAP_NOR) += omap_nor.o
+obj-$(CONFIG_MTD_MTX1) += mtx-1_flash.o
+obj-$(CONFIG_MTD_TQM834x) += tqm834x.o
+obj-$(CONFIG_MTD_P3P440) += p3p440.o
diff --git a/linux-2.4.x/drivers/mtd/maps/alchemy-flash.c b/linux-2.4.x/drivers/mtd/maps/alchemy-flash.c
new file mode 100644
index 0000000..96395cb
--- /dev/null
+++ b/linux-2.4.x/drivers/mtd/maps/alchemy-flash.c
@@ -0,0 +1,190 @@
+/*
+ * Flash memory access on AMD Alchemy evaluation boards
+ *
+ * $Id: alchemy-flash.c,v 1.3 2006/03/29 08:31:11 dwmw2 Exp $
+ *
+ * (C) 2003, 2004 Pete Popov <ppopov@embeddedalley.com>
+ *
+ */
+
+#include <linux/config.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/partitions.h>
+
+#include <asm/io.h>
+
+#ifdef DEBUG_RW
+#define DBG(x...) printk(x)
+#else
+#define DBG(x...)
+#endif
+
+#ifdef CONFIG_MIPS_PB1000
+#define BOARD_MAP_NAME "Pb1000 Flash"
+#define BOARD_FLASH_SIZE 0x00800000 /* 8MB */
+#define BOARD_FLASH_WIDTH 4 /* 32-bits */
+#endif
+
+#ifdef CONFIG_MIPS_PB1500
+#define BOARD_MAP_NAME "Pb1500 Flash"
+#define BOARD_FLASH_SIZE 0x04000000 /* 64MB */
+#define BOARD_FLASH_WIDTH 4 /* 32-bits */
+#endif
+
+#ifdef CONFIG_MIPS_PB1100
+#define BOARD_MAP_NAME "Pb1100 Flash"
+#define BOARD_FLASH_SIZE 0x04000000 /* 64MB */
+#define BOARD_FLASH_WIDTH 4 /* 32-bits */
+#endif
+
+#ifdef CONFIG_MIPS_PB1550
+#define BOARD_MAP_NAME "Pb1550 Flash"
+#define BOARD_FLASH_SIZE 0x08000000 /* 128MB */
+#define BOARD_FLASH_WIDTH 4 /* 32-bits */
+#endif
+
+#ifdef CONFIG_MIPS_PB1200
+#define BOARD_MAP_NAME "Pb1200 Flash"
+#define BOARD_FLASH_SIZE 0x08000000 /* 128MB */
+#define BOARD_FLASH_WIDTH 2 /* 16-bits */
+#endif
+
+#ifdef CONFIG_MIPS_DB1000
+#define BOARD_MAP_NAME "Db1000 Flash"
+#define BOARD_FLASH_SIZE 0x02000000 /* 32MB */
+#define BOARD_FLASH_WIDTH 4 /* 32-bits */
+#endif
+
+#ifdef CONFIG_MIPS_DB1500
+#define BOARD_MAP_NAME "Db1500 Flash"
+#define BOARD_FLASH_SIZE 0x02000000 /* 32MB */
+#define BOARD_FLASH_WIDTH 4 /* 32-bits */
+#endif
+
+#ifdef CONFIG_MIPS_DB1100
+#define BOARD_MAP_NAME "Db1100 Flash"
+#define BOARD_FLASH_SIZE 0x02000000 /* 32MB */
+#define BOARD_FLASH_WIDTH 4 /* 32-bits */
+#endif
+
+#ifdef CONFIG_MIPS_DB1550
+#define BOARD_MAP_NAME "Db1550 Flash"
+#define BOARD_FLASH_SIZE 0x08000000 /* 128MB */
+#define BOARD_FLASH_WIDTH 4 /* 32-bits */
+#endif
+
+#ifdef CONFIG_MIPS_DB1200
+#define BOARD_MAP_NAME "Db1200 Flash"
+#define BOARD_FLASH_SIZE 0x04000000 /* 64MB */
+#define BOARD_FLASH_WIDTH 2 /* 16-bits */
+#endif
+
+#ifdef CONFIG_MIPS_HYDROGEN3
+#define BOARD_MAP_NAME "Hydrogen3 Flash"
+#define BOARD_FLASH_SIZE 0x02000000 /* 32MB */
+#define BOARD_FLASH_WIDTH 4 /* 32-bits */
+#define USE_LOCAL_ACCESSORS /* why? */
+#endif
+
+#ifdef CONFIG_MIPS_BOSPORUS
+#define BOARD_MAP_NAME "Bosporus Flash"
+#define BOARD_FLASH_SIZE 0x01000000 /* 16MB */
+#define BOARD_FLASH_WIDTH 2 /* 16-bits */
+#endif
+
+#ifdef CONFIG_MIPS_MIRAGE
+#define BOARD_MAP_NAME "Mirage Flash"
+#define BOARD_FLASH_SIZE 0x04000000 /* 64MB */
+#define BOARD_FLASH_WIDTH 4 /* 32-bits */
+#define USE_LOCAL_ACCESSORS /* why? */
+#endif
+
+static struct map_info alchemy_map = {
+ .name = BOARD_MAP_NAME,
+};
+
+static struct mtd_partition alchemy_partitions[] = {
+ {
+ .name = "User FS",
+ .size = BOARD_FLASH_SIZE - 0x00400000,
+ .offset = 0x0000000
+ },{
+ .name = "YAMON",
+ .size = 0x0100000,
+ .offset = MTDPART_OFS_APPEND,
+ .mask_flags = MTD_WRITEABLE
+ },{
+ .name = "raw kernel",
+ .size = (0x300000 - 0x40000), /* last 256KB is yamon env */
+ .offset = MTDPART_OFS_APPEND,
+ }
+};
+
+static struct mtd_info *mymtd;
+
+int __init alchemy_mtd_init(void)
+{
+ struct mtd_partition *parts;
+ int nb_parts = 0;
+ unsigned long window_addr;
+ unsigned long window_size;
+
+ /* Default flash buswidth */
+ alchemy_map.bankwidth = BOARD_FLASH_WIDTH;
+
+ window_addr = 0x20000000 - BOARD_FLASH_SIZE;
+ window_size = BOARD_FLASH_SIZE;
+#ifdef CONFIG_MIPS_MIRAGE_WHY
+ /* Boot ROM flash bank only; no user bank */
+ window_addr = 0x1C000000;
+ window_size = 0x04000000;
+ /* USERFS from 0x1C00 0000 to 0x1FC00000 */
+ alchemy_partitions[0].size = 0x03C00000;
+#endif
+
+ /*
+ * Static partition definition selection
+ */
+ parts = alchemy_partitions;
+ nb_parts = ARRAY_SIZE(alchemy_partitions);
+ alchemy_map.size = window_size;
+
+ /*
+ * Now let's probe for the actual flash. Do it here since
+ * specific machine settings might have been set above.
+ */
+ printk(KERN_NOTICE BOARD_MAP_NAME ": probing %d-bit flash bus\n",
+ alchemy_map.bankwidth*8);
+ alchemy_map.virt = ioremap(window_addr, window_size);
+ mymtd = do_map_probe("cfi_probe", &alchemy_map);
+ if (!mymtd) {
+ iounmap(alchemy_map.virt);
+ return -ENXIO;
+ }
+ mymtd->owner = THIS_MODULE;
+
+ add_mtd_partitions(mymtd, parts, nb_parts);
+ return 0;
+}
+
+static void __exit alchemy_mtd_cleanup(void)
+{
+ if (mymtd) {
+ del_mtd_partitions(mymtd);
+ map_destroy(mymtd);
+ iounmap(alchemy_map.virt);
+ }
+}
+
+module_init(alchemy_mtd_init);
+module_exit(alchemy_mtd_cleanup);
+
+MODULE_AUTHOR("Embedded Alley Solutions, Inc");
+MODULE_DESCRIPTION(BOARD_MAP_NAME " MTD driver");
+MODULE_LICENSE("GPL");
diff --git a/linux-2.4.x/drivers/mtd/maps/amd76xrom.c b/linux-2.4.x/drivers/mtd/maps/amd76xrom.c
new file mode 100644
index 0000000..e80583b
--- /dev/null
+++ b/linux-2.4.x/drivers/mtd/maps/amd76xrom.c
@@ -0,0 +1,330 @@
+/*
+ * amd76xrom.c
+ *
+ * Normal mappings of chips in physical memory
+ * $Id: amd76xrom.c,v 1.22 2005/11/29 20:01:28 gleixner Exp $
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <asm/io.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/cfi.h>
+#include <linux/mtd/flashchip.h>
+#include <linux/config.h>
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+#include <linux/list.h>
+
+
+#define xstr(s) str(s)
+#define str(s) #s
+#define MOD_NAME xstr(KBUILD_BASENAME)
+
+#define ADDRESS_NAME_LEN 18
+
+#define ROM_PROBE_STEP_SIZE (64*1024) /* 64KiB */
+
+struct amd76xrom_window {
+ void __iomem *virt;
+ unsigned long phys;
+ unsigned long size;
+ struct list_head maps;
+ struct resource rsrc;
+ struct pci_dev *pdev;
+};
+
+struct amd76xrom_map_info {
+ struct list_head list;
+ struct map_info map;
+ struct mtd_info *mtd;
+ struct resource rsrc;
+ char map_name[sizeof(MOD_NAME) + 2 + ADDRESS_NAME_LEN];
+};
+
+static struct amd76xrom_window amd76xrom_window = {
+ .maps = LIST_HEAD_INIT(amd76xrom_window.maps),
+};
+
+static void amd76xrom_cleanup(struct amd76xrom_window *window)
+{
+ struct amd76xrom_map_info *map, *scratch;
+ u8 byte;
+
+ if (window->pdev) {
+ /* Disable writes through the rom window */
+ pci_read_config_byte(window->pdev, 0x40, &byte);
+ pci_write_config_byte(window->pdev, 0x40, byte & ~1);
+ }
+
+ /* Free all of the mtd devices */
+ list_for_each_entry_safe(map, scratch, &window->maps, list) {
+ if (map->rsrc.parent) {
+ release_resource(&map->rsrc);
+ }
+ del_mtd_device(map->mtd);
+ map_destroy(map->mtd);
+ list_del(&map->list);
+ kfree(map);
+ }
+ if (window->rsrc.parent)
+ release_resource(&window->rsrc);
+
+ if (window->virt) {
+ iounmap(window->virt);
+ window->virt = NULL;
+ window->phys = 0;
+ window->size = 0;
+ window->pdev = NULL;
+ }
+}
+
+
+static int __devinit amd76xrom_init_one (struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ static char *rom_probe_types[] = { "cfi_probe", "jedec_probe", NULL };
+ u8 byte;
+ struct amd76xrom_window *window = &amd76xrom_window;
+ struct amd76xrom_map_info *map = NULL;
+ unsigned long map_top;
+
+ /* Remember the pci dev I find the window in */
+ window->pdev = pdev;
+
+ /* Assume the rom window is properly setup, and find it's size */
+ pci_read_config_byte(pdev, 0x43, &byte);
+ if ((byte & ((1<<7)|(1<<6))) == ((1<<7)|(1<<6))) {
+ window->phys = 0xffb00000; /* 5MiB */
+ }
+ else if ((byte & (1<<7)) == (1<<7)) {
+ window->phys = 0xffc00000; /* 4MiB */
+ }
+ else {
+ window->phys = 0xffff0000; /* 64KiB */
+ }
+ window->size = 0xffffffffUL - window->phys + 1UL;
+
+ /*
+ * Try to reserve the window mem region. If this fails then
+ * it is likely due to a fragment of the window being
+ * "reseved" by the BIOS. In the case that the
+ * request_mem_region() fails then once the rom size is
+ * discovered we will try to reserve the unreserved fragment.
+ */
+ window->rsrc.name = MOD_NAME;
+ window->rsrc.start = window->phys;
+ window->rsrc.end = window->phys + window->size - 1;
+ window->rsrc.flags = IORESOURCE_MEM | IORESOURCE_BUSY;
+ if (request_resource(&iomem_resource, &window->rsrc)) {
+ window->rsrc.parent = NULL;
+ printk(KERN_ERR MOD_NAME
+ " %s(): Unable to register resource"
+ " 0x%.08lx-0x%.08lx - kernel bug?\n",
+ __func__,
+ window->rsrc.start, window->rsrc.end);
+ }
+
+#if 0
+
+ /* Enable the selected rom window */
+ pci_read_config_byte(pdev, 0x43, &byte);
+ pci_write_config_byte(pdev, 0x43, byte | rwindow->segen_bits);
+#endif
+
+ /* Enable writes through the rom window */
+ pci_read_config_byte(pdev, 0x40, &byte);
+ pci_write_config_byte(pdev, 0x40, byte | 1);
+
+ /* FIXME handle registers 0x80 - 0x8C the bios region locks */
+
+ /* For write accesses caches are useless */
+ window->virt = ioremap_nocache(window->phys, window->size);
+ if (!window->virt) {
+ printk(KERN_ERR MOD_NAME ": ioremap(%08lx, %08lx) failed\n",
+ window->phys, window->size);
+ goto out;
+ }
+
+ /* Get the first address to look for an rom chip at */
+ map_top = window->phys;
+#if 1
+ /* The probe sequence run over the firmware hub lock
+ * registers sets them to 0x7 (no access).
+ * Probe at most the last 4M of the address space.
+ */
+ if (map_top < 0xffc00000) {
+ map_top = 0xffc00000;
+ }
+#endif
+ /* Loop through and look for rom chips */
+ while((map_top - 1) < 0xffffffffUL) {
+ struct cfi_private *cfi;
+ unsigned long offset;
+ int i;
+
+ if (!map) {
+ map = kmalloc(sizeof(*map), GFP_KERNEL);
+ }
+ if (!map) {
+ printk(KERN_ERR MOD_NAME ": kmalloc failed");
+ goto out;
+ }
+ memset(map, 0, sizeof(*map));
+ INIT_LIST_HEAD(&map->list);
+ map->map.name = map->map_name;
+ map->map.phys = map_top;
+ offset = map_top - window->phys;
+ map->map.virt = (void __iomem *)
+ (((unsigned long)(window->virt)) + offset);
+ map->map.size = 0xffffffffUL - map_top + 1UL;
+ /* Set the name of the map to the address I am trying */
+ sprintf(map->map_name, "%s @%08lx",
+ MOD_NAME, map->map.phys);
+
+ /* There is no generic VPP support */
+ for(map->map.bankwidth = 32; map->map.bankwidth;
+ map->map.bankwidth >>= 1)
+ {
+ char **probe_type;
+ /* Skip bankwidths that are not supported */
+ if (!map_bankwidth_supported(map->map.bankwidth))
+ continue;
+
+ /* Setup the map methods */
+ simple_map_init(&map->map);
+
+ /* Try all of the probe methods */
+ probe_type = rom_probe_types;
+ for(; *probe_type; probe_type++) {
+ map->mtd = do_map_probe(*probe_type, &map->map);
+ if (map->mtd)
+ goto found;
+ }
+ }
+ map_top += ROM_PROBE_STEP_SIZE;
+ continue;
+ found:
+ /* Trim the size if we are larger than the map */
+ if (map->mtd->size > map->map.size) {
+ printk(KERN_WARNING MOD_NAME
+ " rom(%u) larger than window(%lu). fixing...\n",
+ map->mtd->size, map->map.size);
+ map->mtd->size = map->map.size;
+ }
+ if (window->rsrc.parent) {
+ /*
+ * Registering the MTD device in iomem may not be possible
+ * if there is a BIOS "reserved" and BUSY range. If this
+ * fails then continue anyway.
+ */
+ map->rsrc.name = map->map_name;
+ map->rsrc.start = map->map.phys;
+ map->rsrc.end = map->map.phys + map->mtd->size - 1;
+ map->rsrc.flags = IORESOURCE_MEM | IORESOURCE_BUSY;
+ if (request_resource(&window->rsrc, &map->rsrc)) {
+ printk(KERN_ERR MOD_NAME
+ ": cannot reserve MTD resource\n");
+ map->rsrc.parent = NULL;
+ }
+ }
+
+ /* Make the whole region visible in the map */
+ map->map.virt = window->virt;
+ map->map.phys = window->phys;
+ cfi = map->map.fldrv_priv;
+ for(i = 0; i < cfi->numchips; i++) {
+ cfi->chips[i].start += offset;
+ }
+
+ /* Now that the mtd devices is complete claim and export it */
+ map->mtd->owner = THIS_MODULE;
+ if (add_mtd_device(map->mtd)) {
+ map_destroy(map->mtd);
+ map->mtd = NULL;
+ goto out;
+ }
+
+
+ /* Calculate the new value of map_top */
+ map_top += map->mtd->size;
+
+ /* File away the map structure */
+ list_add(&map->list, &window->maps);
+ map = NULL;
+ }
+
+ out:
+ /* Free any left over map structures */
+ kfree(map);
+ /* See if I have any map structures */
+ if (list_empty(&window->maps)) {
+ amd76xrom_cleanup(window);
+ return -ENODEV;
+ }
+ return 0;
+}
+
+
+static void __devexit amd76xrom_remove_one (struct pci_dev *pdev)
+{
+ struct amd76xrom_window *window = &amd76xrom_window;
+
+ amd76xrom_cleanup(window);
+}
+
+static struct pci_device_id amd76xrom_pci_tbl[] = {
+ { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_VIPER_7410,
+ PCI_ANY_ID, PCI_ANY_ID, },
+ { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_VIPER_7440,
+ PCI_ANY_ID, PCI_ANY_ID, },
+ { PCI_VENDOR_ID_AMD, 0x7468 }, /* amd8111 support */
+ { 0, }
+};
+
+MODULE_DEVICE_TABLE(pci, amd76xrom_pci_tbl);
+
+#if 0
+static struct pci_driver amd76xrom_driver = {
+ .name = MOD_NAME,
+ .id_table = amd76xrom_pci_tbl,
+ .probe = amd76xrom_init_one,
+ .remove = amd76xrom_remove_one,
+};
+#endif
+
+static int __init init_amd76xrom(void)
+{
+ struct pci_dev *pdev;
+ struct pci_device_id *id;
+ pdev = NULL;
+ for(id = amd76xrom_pci_tbl; id->vendor; id++) {
+ pdev = pci_find_device(id->vendor, id->device, NULL);
+ if (pdev) {
+ break;
+ }
+ }
+ if (pdev) {
+ return amd76xrom_init_one(pdev, &amd76xrom_pci_tbl[0]);
+ }
+ return -ENXIO;
+#if 0
+ return pci_register_driver(&amd76xrom_driver);
+#endif
+}
+
+static void __exit cleanup_amd76xrom(void)
+{
+ amd76xrom_remove_one(amd76xrom_window.pdev);
+}
+
+module_init(init_amd76xrom);
+module_exit(cleanup_amd76xrom);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Eric Biederman <ebiederman@lnxi.com>");
+MODULE_DESCRIPTION("MTD map driver for BIOS chips on the AMD76X southbridge");
+
diff --git a/linux-2.4.x/drivers/mtd/maps/arctic-mtd.c b/linux-2.4.x/drivers/mtd/maps/arctic-mtd.c
new file mode 100644
index 0000000..d95ae58
--- /dev/null
+++ b/linux-2.4.x/drivers/mtd/maps/arctic-mtd.c
@@ -0,0 +1,135 @@
+/*
+ * $Id: arctic-mtd.c,v 1.14 2005/11/07 11:14:26 gleixner Exp $
+ *
+ * drivers/mtd/maps/arctic-mtd.c MTD mappings and partition tables for
+ * IBM 405LP Arctic boards.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * Copyright (C) 2002, International Business Machines Corporation
+ * All Rights Reserved.
+ *
+ * Bishop Brock
+ * IBM Research, Austin Center for Low-Power Computing
+ * bcbrock@us.ibm.com
+ * March 2002
+ *
+ * modified for Arctic by,
+ * David Gibson
+ * IBM OzLabs, Canberra, Australia
+ * <arctic@gibson.dropbear.id.au>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/init.h>
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/partitions.h>
+
+#include <asm/io.h>
+#include <asm/ibm4xx.h>
+
+/*
+ * 0 : 0xFE00 0000 - 0xFEFF FFFF : Filesystem 1 (16MiB)
+ * 1 : 0xFF00 0000 - 0xFF4F FFFF : kernel (5.12MiB)
+ * 2 : 0xFF50 0000 - 0xFFF5 FFFF : Filesystem 2 (10.624MiB) (if non-XIP)
+ * 3 : 0xFFF6 0000 - 0xFFFF FFFF : PIBS Firmware (640KiB)
+ */
+
+#define FFS1_SIZE 0x01000000 /* 16MiB */
+#define KERNEL_SIZE 0x00500000 /* 5.12MiB */
+#define FFS2_SIZE 0x00a60000 /* 10.624MiB */
+#define FIRMWARE_SIZE 0x000a0000 /* 640KiB */
+
+
+#define NAME "Arctic Linux Flash"
+#define PADDR SUBZERO_BOOTFLASH_PADDR
+#define BUSWIDTH 2
+#define SIZE SUBZERO_BOOTFLASH_SIZE
+#define PARTITIONS 4
+
+/* Flash memories on these boards are memory resources, accessed big-endian. */
+
+{
+ /* do nothing for now */
+}
+
+static struct map_info arctic_mtd_map = {
+ .name = NAME,
+ .size = SIZE,
+ .bankwidth = BUSWIDTH,
+ .phys = PADDR,
+};
+
+static struct mtd_info *arctic_mtd;
+
+static struct mtd_partition arctic_partitions[PARTITIONS] = {
+ { .name = "Filesystem",
+ .size = FFS1_SIZE,
+ .offset = 0,},
+ { .name = "Kernel",
+ .size = KERNEL_SIZE,
+ .offset = FFS1_SIZE,},
+ { .name = "Filesystem",
+ .size = FFS2_SIZE,
+ .offset = FFS1_SIZE + KERNEL_SIZE,},
+ { .name = "Firmware",
+ .size = FIRMWARE_SIZE,
+ .offset = SUBZERO_BOOTFLASH_SIZE - FIRMWARE_SIZE,},
+};
+
+static int __init
+init_arctic_mtd(void)
+{
+ printk("%s: 0x%08x at 0x%08x\n", NAME, SIZE, PADDR);
+
+ arctic_mtd_map.virt = ioremap(PADDR, SIZE);
+
+ if (!arctic_mtd_map.virt) {
+ printk("%s: failed to ioremap 0x%x\n", NAME, PADDR);
+ return -EIO;
+ }
+ simple_map_init(&arctic_mtd_map);
+
+ printk("%s: probing %d-bit flash bus\n", NAME, BUSWIDTH * 8);
+ arctic_mtd = do_map_probe("cfi_probe", &arctic_mtd_map);
+
+ if (!arctic_mtd)
+ return -ENXIO;
+
+ arctic_mtd->owner = THIS_MODULE;
+
+ return add_mtd_partitions(arctic_mtd, arctic_partitions, PARTITIONS);
+}
+
+static void __exit
+cleanup_arctic_mtd(void)
+{
+ if (arctic_mtd) {
+ del_mtd_partitions(arctic_mtd);
+ map_destroy(arctic_mtd);
+ iounmap((void *) arctic_mtd_map.virt);
+ }
+}
+
+module_init(init_arctic_mtd);
+module_exit(cleanup_arctic_mtd);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("David Gibson <arctic@gibson.dropbear.id.au>");
+MODULE_DESCRIPTION("MTD map and partitions for IBM 405LP Arctic boards");
diff --git a/linux-2.4.x/drivers/mtd/maps/autcpu12-nvram.c b/linux-2.4.x/drivers/mtd/maps/autcpu12-nvram.c
index db78b01..7ed3424 100644
--- a/linux-2.4.x/drivers/mtd/maps/autcpu12-nvram.c
+++ b/linux-2.4.x/drivers/mtd/maps/autcpu12-nvram.c
@@ -1,8 +1,8 @@
/*
- * NV-RAM memory access on autcpu12
+ * NV-RAM memory access on autcpu12
* (C) 2002 Thomas Gleixner (gleixner@autronix.de)
*
- * $Id: autcpu12-nvram.c,v 1.1 2002/02/22 09:30:24 gleixner Exp $
+ * $Id: autcpu12-nvram.c,v 1.9 2005/11/07 11:14:26 gleixner Exp $
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -24,6 +24,7 @@
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/ioport.h>
+#include <linux/init.h>
#include <asm/io.h>
#include <asm/sizes.h>
#include <asm/hardware.h>
@@ -32,103 +33,50 @@
#include <linux/mtd/map.h>
#include <linux/mtd/partitions.h>
-__u8 autcpu12_read8(struct map_info *map, unsigned long ofs)
-{
- return __raw_readb(map->map_priv_1 + ofs);
-}
-
-__u16 autcpu12_read16(struct map_info *map, unsigned long ofs)
-{
- return __raw_readw(map->map_priv_1 + ofs);
-}
-
-__u32 autcpu12_read32(struct map_info *map, unsigned long ofs)
-{
- return __raw_readl(map->map_priv_1 + ofs);
-}
-
-void autcpu12_write8(struct map_info *map, __u8 d, unsigned long adr)
-{
- __raw_writeb(d, map->map_priv_1 + adr);
- mb();
-}
-
-void autcpu12_write16(struct map_info *map, __u16 d, unsigned long adr)
-{
- __raw_writew(d, map->map_priv_1 + adr);
- mb();
-}
-
-void autcpu12_write32(struct map_info *map, __u32 d, unsigned long adr)
-{
- __raw_writel(d, map->map_priv_1 + adr);
- mb();
-}
-
-void autcpu12_copy_from(struct map_info *map, void *to, unsigned long from, ssize_t len)
-{
- memcpy_fromio(to, map->map_priv_1 + from, len);
-}
-
-void autcpu12_copy_to(struct map_info *map, unsigned long to, const void *from, ssize_t len)
-{
- while(len) {
- __raw_writeb(*(unsigned char *) from, map->map_priv_1 + to);
- from++;
- to++;
- len--;
- }
-}
static struct mtd_info *sram_mtd;
struct map_info autcpu12_sram_map = {
- name: "SRAM",
- size: 32768,
- buswidth: 8,
- read8: autcpu12_read8,
- read16: autcpu12_read16,
- read32: autcpu12_read32,
- copy_from: autcpu12_copy_from,
- write8: autcpu12_write8,
- write16: autcpu12_write16,
- write32: autcpu12_write32,
- copy_to: autcpu12_copy_to
+ .name = "SRAM",
+ .size = 32768,
+ .bankwidth = 4,
+ .phys = 0x12000000,
};
static int __init init_autcpu12_sram (void)
{
int err, save0, save1;
- autcpu12_sram_map.map_priv_1 = (unsigned long)ioremap(0x12000000, SZ_128K);
- if (!autcpu12_sram_map.map_priv_1) {
+ autcpu12_sram_map.virt = ioremap(0x12000000, SZ_128K);
+ if (!autcpu12_sram_map.virt) {
printk("Failed to ioremap autcpu12 NV-RAM space\n");
err = -EIO;
goto out;
}
-
- /*
- * Check for 32K/128K
- * read ofs 0
- * read ofs 0x10000
+ simple_map_init(&autcpu_sram_map);
+
+ /*
+ * Check for 32K/128K
+ * read ofs 0
+ * read ofs 0x10000
* Write complement to ofs 0x100000
* Read and check result on ofs 0x0
* Restore contents
*/
- save0 = autcpu12_read32(&autcpu12_sram_map,0);
- save1 = autcpu12_read32(&autcpu12_sram_map,0x10000);
- autcpu12_write32(&autcpu12_sram_map,~save0,0x10000);
- /* if we find this pattern on 0x0, we have 32K size
+ save0 = map_read32(&autcpu12_sram_map,0);
+ save1 = map_read32(&autcpu12_sram_map,0x10000);
+ map_write32(&autcpu12_sram_map,~save0,0x10000);
+ /* if we find this pattern on 0x0, we have 32K size
* restore contents and exit
*/
- if ( autcpu12_read32(&autcpu12_sram_map,0) != save0) {
- autcpu12_write32(&autcpu12_sram_map,save0,0x0);
+ if ( map_read32(&autcpu12_sram_map,0) != save0) {
+ map_write32(&autcpu12_sram_map,save0,0x0);
goto map;
}
/* We have a 128K found, restore 0x10000 and set size
* to 128K
*/
- autcpu12_write32(&autcpu12_sram_map,save1,0x10000);
+ map_write32(&autcpu12_sram_map,save1,0x10000);
autcpu12_sram_map.size = SZ_128K;
map:
@@ -139,17 +87,17 @@ map:
goto out_ioremap;
}
- sram_mtd->module = THIS_MODULE;
+ sram_mtd->owner = THIS_MODULE;
sram_mtd->erasesize = 16;
-
+
if (add_mtd_device(sram_mtd)) {
printk("NV-RAM device addition failed\n");
err = -ENOMEM;
goto out_probe;
}
- printk("NV-RAM device size %ldK registered on AUTCPU12\n",autcpu12_sram_map.size/SZ_1K);
-
+ printk("NV-RAM device size %ldKiB registered on AUTCPU12\n",autcpu12_sram_map.size/SZ_1K);
+
return 0;
out_probe:
@@ -157,7 +105,7 @@ out_probe:
sram_mtd = 0;
out_ioremap:
- iounmap((void *)autcpu12_sram_map.map_priv_1);
+ iounmap((void *)autcpu12_sram_map.virt);
out:
return err;
}
@@ -167,7 +115,7 @@ static void __exit cleanup_autcpu12_maps(void)
if (sram_mtd) {
del_mtd_device(sram_mtd);
map_destroy(sram_mtd);
- iounmap((void *)autcpu12_sram_map.map_priv_1);
+ iounmap((void *)autcpu12_sram_map.virt);
}
}
diff --git a/linux-2.4.x/drivers/mtd/maps/bast-flash.c b/linux-2.4.x/drivers/mtd/maps/bast-flash.c
new file mode 100644
index 0000000..fd0fae8
--- /dev/null
+++ b/linux-2.4.x/drivers/mtd/maps/bast-flash.c
@@ -0,0 +1,225 @@
+/* linux/drivers/mtd/maps/bast_flash.c
+ *
+ * Copyright (c) 2004-2005 Simtec Electronics
+ * Ben Dooks <ben@simtec.co.uk>
+ *
+ * Simtec Bast (EB2410ITX) NOR MTD Mapping driver
+ *
+ * Changelog:
+ * 20-Sep-2004 BJD Initial version
+ * 17-Jan-2005 BJD Add whole device if no partitions found
+ *
+ * $Id: bast-flash.c,v 1.6 2005/11/29 20:01:28 gleixner Exp $
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+*/
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/ioport.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/partitions.h>
+
+#include <asm/io.h>
+#include <asm/mach/flash.h>
+
+#include <asm/arch/map.h>
+#include <asm/arch/bast-map.h>
+#include <asm/arch/bast-cpld.h>
+
+#ifdef CONFIG_MTD_BAST_MAXSIZE
+#define AREA_MAXSIZE (CONFIG_MTD_BAST_MAXSIZE * SZ_1M)
+#else
+#define AREA_MAXSIZE (32 * SZ_1M)
+#endif
+
+#define PFX "bast-flash: "
+
+struct bast_flash_info {
+ struct mtd_info *mtd;
+ struct map_info map;
+ struct mtd_partition *partitions;
+ struct resource *area;
+};
+
+static const char *probes[] = { "RedBoot", "cmdlinepart", NULL };
+
+static void bast_flash_setrw(int to)
+{
+ unsigned int val;
+ unsigned long flags;
+
+ local_irq_save(flags);
+ val = __raw_readb(BAST_VA_CTRL3);
+
+ if (to)
+ val |= BAST_CPLD_CTRL3_ROMWEN;
+ else
+ val &= ~BAST_CPLD_CTRL3_ROMWEN;
+
+ pr_debug("new cpld ctrl3=%02x\n", val);
+
+ __raw_writeb(val, BAST_VA_CTRL3);
+ local_irq_restore(flags);
+}
+
+static int bast_flash_remove(struct platform_device *pdev)
+{
+ struct bast_flash_info *info = platform_get_drvdata(pdev);
+
+ platform_set_drvdata(pdev, NULL);
+
+ if (info == NULL)
+ return 0;
+
+ if (info->map.virt != NULL)
+ iounmap(info->map.virt);
+
+ if (info->mtd) {
+ del_mtd_partitions(info->mtd);
+ map_destroy(info->mtd);
+ }
+
+ kfree(info->partitions);
+
+ if (info->area) {
+ release_resource(info->area);
+ kfree(info->area);
+ }
+
+ kfree(info);
+
+ return 0;
+}
+
+static int bast_flash_probe(struct platform_device *pdev)
+{
+ struct bast_flash_info *info;
+ struct resource *res;
+ int err = 0;
+
+ info = kmalloc(sizeof(*info), GFP_KERNEL);
+ if (info == NULL) {
+ printk(KERN_ERR PFX "no memory for flash info\n");
+ err = -ENOMEM;
+ goto exit_error;
+ }
+
+ memzero(info, sizeof(*info));
+ platform_set_drvdata(pdev, info);
+
+ res = pdev->resource; /* assume that the flash has one resource */
+
+ info->map.phys = res->start;
+ info->map.size = res->end - res->start + 1;
+ info->map.name = pdev->dev.bus_id;
+ info->map.bankwidth = 2;
+
+ if (info->map.size > AREA_MAXSIZE)
+ info->map.size = AREA_MAXSIZE;
+
+ pr_debug("%s: area %08lx, size %ld\n", __FUNCTION__,
+ info->map.phys, info->map.size);
+
+ info->area = request_mem_region(res->start, info->map.size,
+ pdev->name);
+ if (info->area == NULL) {
+ printk(KERN_ERR PFX "cannot reserve flash memory region\n");
+ err = -ENOENT;
+ goto exit_error;
+ }
+
+ info->map.virt = ioremap(res->start, info->map.size);
+ pr_debug("%s: virt at %08x\n", __FUNCTION__, (int)info->map.virt);
+
+ if (info->map.virt == 0) {
+ printk(KERN_ERR PFX "failed to ioremap() region\n");
+ err = -EIO;
+ goto exit_error;
+ }
+
+ simple_map_init(&info->map);
+
+ /* enable the write to the flash area */
+
+ bast_flash_setrw(1);
+
+ /* probe for the device(s) */
+
+ info->mtd = do_map_probe("jedec_probe", &info->map);
+ if (info->mtd == NULL)
+ info->mtd = do_map_probe("cfi_probe", &info->map);
+
+ if (info->mtd == NULL) {
+ printk(KERN_ERR PFX "map_probe() failed\n");
+ err = -ENXIO;
+ goto exit_error;
+ }
+
+ /* mark ourselves as the owner */
+ info->mtd->owner = THIS_MODULE;
+
+ err = parse_mtd_partitions(info->mtd, probes, &info->partitions, 0);
+ if (err > 0) {
+ err = add_mtd_partitions(info->mtd, info->partitions, err);
+ if (err)
+ printk(KERN_ERR PFX "cannot add/parse partitions\n");
+ } else {
+ err = add_mtd_device(info->mtd);
+ }
+
+ if (err == 0)
+ return 0;
+
+ /* fall through to exit error */
+
+ exit_error:
+ bast_flash_remove(pdev);
+ return err;
+}
+
+static struct platform_driver bast_flash_driver = {
+ .probe = bast_flash_probe,
+ .remove = bast_flash_remove,
+ .driver = {
+ .name = "bast-nor",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init bast_flash_init(void)
+{
+ printk("BAST NOR-Flash Driver, (c) 2004 Simtec Electronics\n");
+ return platform_driver_register(&bast_flash_driver);
+}
+
+static void __exit bast_flash_exit(void)
+{
+ platform_driver_unregister(&bast_flash_driver);
+}
+
+module_init(bast_flash_init);
+module_exit(bast_flash_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
+MODULE_DESCRIPTION("BAST MTD Map driver");
diff --git a/linux-2.4.x/drivers/mtd/maps/beech-mtd.c b/linux-2.4.x/drivers/mtd/maps/beech-mtd.c
new file mode 100644
index 0000000..5df7361
--- /dev/null
+++ b/linux-2.4.x/drivers/mtd/maps/beech-mtd.c
@@ -0,0 +1,112 @@
+/*
+ * $Id: beech-mtd.c,v 1.11 2005/11/07 11:14:26 gleixner Exp $
+ *
+ * drivers/mtd/maps/beech-mtd.c MTD mappings and partition tables for
+ * IBM 405LP Beech boards.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * Copyright (C) 2002, International Business Machines Corporation
+ * All Rights Reserved.
+ *
+ * Bishop Brock
+ * IBM Research, Austin Center for Low-Power Computing
+ * bcbrock@us.ibm.com
+ * March 2002
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/init.h>
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/partitions.h>
+
+#include <asm/io.h>
+#include <asm/ibm4xx.h>
+
+#define NAME "Beech Linux Flash"
+#define PADDR BEECH_BIGFLASH_PADDR
+#define SIZE BEECH_BIGFLASH_SIZE
+#define BUSWIDTH 1
+
+/* Flash memories on these boards are memory resources, accessed big-endian. */
+
+
+static struct map_info beech_mtd_map = {
+ .name = NAME,
+ .size = SIZE,
+ .bankwidth = BUSWIDTH,
+ .phys = PADDR
+};
+
+static struct mtd_info *beech_mtd;
+
+static struct mtd_partition beech_partitions[2] = {
+ {
+ .name = "Linux Kernel",
+ .size = BEECH_KERNEL_SIZE,
+ .offset = BEECH_KERNEL_OFFSET
+ }, {
+ .name = "Free Area",
+ .size = BEECH_FREE_AREA_SIZE,
+ .offset = BEECH_FREE_AREA_OFFSET
+ }
+};
+
+static int __init
+init_beech_mtd(void)
+{
+ printk("%s: 0x%08x at 0x%08x\n", NAME, SIZE, PADDR);
+
+ beech_mtd_map.virt = ioremap(PADDR, SIZE);
+
+ if (!beech_mtd_map.virt) {
+ printk("%s: failed to ioremap 0x%x\n", NAME, PADDR);
+ return -EIO;
+ }
+
+ simple_map_init(&beech_mtd_map);
+
+ printk("%s: probing %d-bit flash bus\n", NAME, BUSWIDTH * 8);
+ beech_mtd = do_map_probe("cfi_probe", &beech_mtd_map);
+
+ if (!beech_mtd)
+ return -ENXIO;
+
+ beech_mtd->owner = THIS_MODULE;
+
+ return add_mtd_partitions(beech_mtd, beech_partitions, 2);
+}
+
+static void __exit
+cleanup_beech_mtd(void)
+{
+ if (beech_mtd) {
+ del_mtd_partitions(beech_mtd);
+ map_destroy(beech_mtd);
+ iounmap((void *) beech_mtd_map.virt);
+ }
+}
+
+module_init(init_beech_mtd);
+module_exit(cleanup_beech_mtd);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Bishop Brock <bcbrock@us.ibm.com>");
+MODULE_DESCRIPTION("MTD map and partitions for IBM 405LP Beech boards");
diff --git a/linux-2.4.x/drivers/mtd/maps/cdb89712.c b/linux-2.4.x/drivers/mtd/maps/cdb89712.c
index 16b5a61..9f17bb6 100644
--- a/linux-2.4.x/drivers/mtd/maps/cdb89712.c
+++ b/linux-2.4.x/drivers/mtd/maps/cdb89712.c
@@ -1,13 +1,14 @@
/*
* Flash on Cirrus CDB89712
*
- * $Id: cdb89712.c,v 1.3 2001/10/02 15:14:43 rmk Exp $
+ * $Id: cdb89712.c,v 1.11 2005/11/07 11:14:26 gleixner Exp $
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/ioport.h>
+#include <linux/init.h>
#include <asm/io.h>
#include <asm/arch/hardware.h>
#include <linux/mtd/mtd.h>
@@ -16,96 +17,40 @@
-__u8 cdb89712_read8(struct map_info *map, unsigned long ofs)
-{
- return __raw_readb(map->map_priv_1 + ofs);
-}
-
-__u16 cdb89712_read16(struct map_info *map, unsigned long ofs)
-{
- return __raw_readw(map->map_priv_1 + ofs);
-}
-
-__u32 cdb89712_read32(struct map_info *map, unsigned long ofs)
-{
- return __raw_readl(map->map_priv_1 + ofs);
-}
-
-void cdb89712_write8(struct map_info *map, __u8 d, unsigned long adr)
-{
- __raw_writeb(d, map->map_priv_1 + adr);
- mb();
-}
-
-void cdb89712_write16(struct map_info *map, __u16 d, unsigned long adr)
-{
- __raw_writew(d, map->map_priv_1 + adr);
- mb();
-}
-
-void cdb89712_write32(struct map_info *map, __u32 d, unsigned long adr)
-{
- __raw_writel(d, map->map_priv_1 + adr);
- mb();
-}
-
-void cdb89712_copy_from(struct map_info *map, void *to, unsigned long from, ssize_t len)
-{
- // printk ("cdb89712_copy_from: 0x%x@0x%x -> 0x%x\n", len, from, to);
- memcpy_fromio(to, map->map_priv_1 + from, len);
-}
-
-void cdb89712_copy_to(struct map_info *map, unsigned long to, const void *from, ssize_t len)
-{
- while(len) {
- __raw_writeb(*(unsigned char *) from, map->map_priv_1 + to);
- from++;
- to++;
- len--;
- }
-}
-
static struct mtd_info *flash_mtd;
struct map_info cdb89712_flash_map = {
- name: "flash",
- size: FLASH_SIZE,
- buswidth: FLASH_WIDTH,
- read8: cdb89712_read8,
- read16: cdb89712_read16,
- read32: cdb89712_read32,
- copy_from: cdb89712_copy_from,
- write8: cdb89712_write8,
- write16: cdb89712_write16,
- write32: cdb89712_write32,
- copy_to: cdb89712_copy_to
+ .name = "flash",
+ .size = FLASH_SIZE,
+ .bankwidth = FLASH_WIDTH,
+ .phys = FLASH_START,
};
struct resource cdb89712_flash_resource = {
- name: "Flash",
- start: FLASH_START,
- end: FLASH_START + FLASH_SIZE - 1,
- flags: IORESOURCE_IO | IORESOURCE_BUSY,
+ .name = "Flash",
+ .start = FLASH_START,
+ .end = FLASH_START + FLASH_SIZE - 1,
+ .flags = IORESOURCE_IO | IORESOURCE_BUSY,
};
static int __init init_cdb89712_flash (void)
{
int err;
-
+
if (request_resource (&ioport_resource, &cdb89712_flash_resource)) {
printk(KERN_NOTICE "Failed to reserve Cdb89712 FLASH space\n");
err = -EBUSY;
goto out;
}
-
- cdb89712_flash_map.map_priv_1 = (unsigned long)ioremap(FLASH_START, FLASH_SIZE);
- if (!cdb89712_flash_map.map_priv_1) {
+
+ cdb89712_flash_map.virt = ioremap(FLASH_START, FLASH_SIZE);
+ if (!cdb89712_flash_map.virt) {
printk(KERN_NOTICE "Failed to ioremap Cdb89712 FLASH space\n");
err = -EIO;
goto out_resource;
}
-
+ simple_map_init(&cdb89712_flash_map);
flash_mtd = do_map_probe("cfi_probe", &cdb89712_flash_map);
if (!flash_mtd) {
flash_mtd = do_map_probe("map_rom", &cdb89712_flash_map);
@@ -118,21 +63,21 @@ static int __init init_cdb89712_flash (void)
goto out_ioremap;
}
- flash_mtd->module = THIS_MODULE;
-
+ flash_mtd->owner = THIS_MODULE;
+
if (add_mtd_device(flash_mtd)) {
printk("FLASH device addition failed\n");
err = -ENOMEM;
goto out_probe;
}
-
+
return 0;
out_probe:
map_destroy(flash_mtd);
flash_mtd = 0;
out_ioremap:
- iounmap((void *)cdb89712_flash_map.map_priv_1);
+ iounmap((void *)cdb89712_flash_map.virt);
out_resource:
release_resource (&cdb89712_flash_resource);
out:
@@ -146,43 +91,36 @@ out:
static struct mtd_info *sram_mtd;
struct map_info cdb89712_sram_map = {
- name: "SRAM",
- size: SRAM_SIZE,
- buswidth: SRAM_WIDTH,
- read8: cdb89712_read8,
- read16: cdb89712_read16,
- read32: cdb89712_read32,
- copy_from: cdb89712_copy_from,
- write8: cdb89712_write8,
- write16: cdb89712_write16,
- write32: cdb89712_write32,
- copy_to: cdb89712_copy_to
+ .name = "SRAM",
+ .size = SRAM_SIZE,
+ .bankwidth = SRAM_WIDTH,
+ .phys = SRAM_START,
};
struct resource cdb89712_sram_resource = {
- name: "SRAM",
- start: SRAM_START,
- end: SRAM_START + SRAM_SIZE - 1,
- flags: IORESOURCE_IO | IORESOURCE_BUSY,
+ .name = "SRAM",
+ .start = SRAM_START,
+ .end = SRAM_START + SRAM_SIZE - 1,
+ .flags = IORESOURCE_IO | IORESOURCE_BUSY,
};
static int __init init_cdb89712_sram (void)
{
int err;
-
+
if (request_resource (&ioport_resource, &cdb89712_sram_resource)) {
printk(KERN_NOTICE "Failed to reserve Cdb89712 SRAM space\n");
err = -EBUSY;
goto out;
}
-
- cdb89712_sram_map.map_priv_1 = (unsigned long)ioremap(SRAM_START, SRAM_SIZE);
- if (!cdb89712_sram_map.map_priv_1) {
+
+ cdb89712_sram_map.virt = ioremap(SRAM_START, SRAM_SIZE);
+ if (!cdb89712_sram_map.virt) {
printk(KERN_NOTICE "Failed to ioremap Cdb89712 SRAM space\n");
err = -EIO;
goto out_resource;
}
-
+ simple_map_init(&cdb89712_sram_map);
sram_mtd = do_map_probe("map_ram", &cdb89712_sram_map);
if (!sram_mtd) {
printk("SRAM probe failed\n");
@@ -190,22 +128,22 @@ static int __init init_cdb89712_sram (void)
goto out_ioremap;
}
- sram_mtd->module = THIS_MODULE;
+ sram_mtd->owner = THIS_MODULE;
sram_mtd->erasesize = 16;
-
+
if (add_mtd_device(sram_mtd)) {
printk("SRAM device addition failed\n");
err = -ENOMEM;
goto out_probe;
}
-
+
return 0;
out_probe:
map_destroy(sram_mtd);
sram_mtd = 0;
out_ioremap:
- iounmap((void *)cdb89712_sram_map.map_priv_1);
+ iounmap((void *)cdb89712_sram_map.virt);
out_resource:
release_resource (&cdb89712_sram_resource);
out:
@@ -221,39 +159,36 @@ out:
static struct mtd_info *bootrom_mtd;
struct map_info cdb89712_bootrom_map = {
- name: "BootROM",
- size: BOOTROM_SIZE,
- buswidth: BOOTROM_WIDTH,
- read8: cdb89712_read8,
- read16: cdb89712_read16,
- read32: cdb89712_read32,
- copy_from: cdb89712_copy_from,
+ .name = "BootROM",
+ .size = BOOTROM_SIZE,
+ .bankwidth = BOOTROM_WIDTH,
+ .phys = BOOTROM_START,
};
struct resource cdb89712_bootrom_resource = {
- name: "BootROM",
- start: BOOTROM_START,
- end: BOOTROM_START + BOOTROM_SIZE - 1,
- flags: IORESOURCE_IO | IORESOURCE_BUSY,
+ .name = "BootROM",
+ .start = BOOTROM_START,
+ .end = BOOTROM_START + BOOTROM_SIZE - 1,
+ .flags = IORESOURCE_IO | IORESOURCE_BUSY,
};
static int __init init_cdb89712_bootrom (void)
{
int err;
-
+
if (request_resource (&ioport_resource, &cdb89712_bootrom_resource)) {
printk(KERN_NOTICE "Failed to reserve Cdb89712 BOOTROM space\n");
err = -EBUSY;
goto out;
}
-
- cdb89712_bootrom_map.map_priv_1 = (unsigned long)ioremap(BOOTROM_START, BOOTROM_SIZE);
- if (!cdb89712_bootrom_map.map_priv_1) {
+
+ cdb89712_bootrom_map.virt = ioremap(BOOTROM_START, BOOTROM_SIZE);
+ if (!cdb89712_bootrom_map.virt) {
printk(KERN_NOTICE "Failed to ioremap Cdb89712 BootROM space\n");
err = -EIO;
goto out_resource;
}
-
+ simple_map_init(&cdb89712_bootrom_map);
bootrom_mtd = do_map_probe("map_rom", &cdb89712_bootrom_map);
if (!bootrom_mtd) {
printk("BootROM probe failed\n");
@@ -261,22 +196,22 @@ static int __init init_cdb89712_bootrom (void)
goto out_ioremap;
}
- bootrom_mtd->module = THIS_MODULE;
+ bootrom_mtd->owner = THIS_MODULE;
bootrom_mtd->erasesize = 0x10000;
-
+
if (add_mtd_device(bootrom_mtd)) {
printk("BootROM device addition failed\n");
err = -ENOMEM;
goto out_probe;
}
-
+
return 0;
out_probe:
map_destroy(bootrom_mtd);
bootrom_mtd = 0;
out_ioremap:
- iounmap((void *)cdb89712_bootrom_map.map_priv_1);
+ iounmap((void *)cdb89712_bootrom_map.virt);
out_resource:
release_resource (&cdb89712_bootrom_resource);
out:
@@ -290,37 +225,37 @@ out:
static int __init init_cdb89712_maps(void)
{
- printk(KERN_INFO "Cirrus CDB89712 MTD mappings:\n Flash 0x%x at 0x%x\n SRAM 0x%x at 0x%x\n BootROM 0x%x at 0x%x\n",
+ printk(KERN_INFO "Cirrus CDB89712 MTD mappings:\n Flash 0x%x at 0x%x\n SRAM 0x%x at 0x%x\n BootROM 0x%x at 0x%x\n",
FLASH_SIZE, FLASH_START, SRAM_SIZE, SRAM_START, BOOTROM_SIZE, BOOTROM_START);
init_cdb89712_flash();
init_cdb89712_sram();
init_cdb89712_bootrom();
-
+
return 0;
}
-
+
static void __exit cleanup_cdb89712_maps(void)
{
if (sram_mtd) {
del_mtd_device(sram_mtd);
map_destroy(sram_mtd);
- iounmap((void *)cdb89712_sram_map.map_priv_1);
+ iounmap((void *)cdb89712_sram_map.virt);
release_resource (&cdb89712_sram_resource);
}
-
+
if (flash_mtd) {
del_mtd_device(flash_mtd);
map_destroy(flash_mtd);
- iounmap((void *)cdb89712_flash_map.map_priv_1);
+ iounmap((void *)cdb89712_flash_map.virt);
release_resource (&cdb89712_flash_resource);
}
if (bootrom_mtd) {
del_mtd_device(bootrom_mtd);
map_destroy(bootrom_mtd);
- iounmap((void *)cdb89712_bootrom_map.map_priv_1);
+ iounmap((void *)cdb89712_bootrom_map.virt);
release_resource (&cdb89712_bootrom_resource);
}
}
diff --git a/linux-2.4.x/drivers/mtd/maps/ceiva.c b/linux-2.4.x/drivers/mtd/maps/ceiva.c
new file mode 100644
index 0000000..8b5abd2
--- /dev/null
+++ b/linux-2.4.x/drivers/mtd/maps/ceiva.c
@@ -0,0 +1,350 @@
+/*
+ * Ceiva flash memory driver.
+ * Copyright (C) 2002 Rob Scott <rscott@mtrob.fdns.net>
+ *
+ * Note: this driver supports jedec compatible devices. Modification
+ * for CFI compatible devices should be straight forward: change
+ * jedec_probe to cfi_probe.
+ *
+ * Based on: sa1100-flash.c, which has the following copyright:
+ * Flash memory access on SA11x0 based devices
+ *
+ * (C) 2000 Nicolas Pitre <nico@cam.org>
+ *
+ * $Id: ceiva.c,v 1.13 2005/11/29 20:01:28 gleixner Exp $
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/ioport.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/concat.h>
+
+#include <asm/hardware.h>
+#include <asm/mach-types.h>
+#include <asm/io.h>
+#include <asm/sizes.h>
+
+/*
+ * This isn't complete yet, so...
+ */
+#define CONFIG_MTD_CEIVA_STATICMAP
+
+#ifdef CONFIG_MTD_CEIVA_STATICMAP
+/*
+ * See include/linux/mtd/partitions.h for definition of the mtd_partition
+ * structure.
+ *
+ * Please note:
+ * 1. The flash size given should be the largest flash size that can
+ * be accomodated.
+ *
+ * 2. The bus width must defined in clps_setup_flash.
+ *
+ * The MTD layer will detect flash chip aliasing and reduce the size of
+ * the map accordingly.
+ *
+ */
+
+#ifdef CONFIG_ARCH_CEIVA
+/* Flash / Partition sizing */
+/* For the 28F8003, we use the block mapping to calcuate the sizes */
+#define MAX_SIZE_KiB (16 + 8 + 8 + 96 + (7*128))
+#define BOOT_PARTITION_SIZE_KiB (16)
+#define PARAMS_PARTITION_SIZE_KiB (8)
+#define KERNEL_PARTITION_SIZE_KiB (4*128)
+/* Use both remaing portion of first flash, and all of second flash */
+#define ROOT_PARTITION_SIZE_KiB (3*128) + (8*128)
+
+static struct mtd_partition ceiva_partitions[] = {
+ {
+ .name = "Ceiva BOOT partition",
+ .size = BOOT_PARTITION_SIZE_KiB*1024,
+ .offset = 0,
+
+ },{
+ .name = "Ceiva parameters partition",
+ .size = PARAMS_PARTITION_SIZE_KiB*1024,
+ .offset = (16 + 8) * 1024,
+ },{
+ .name = "Ceiva kernel partition",
+ .size = (KERNEL_PARTITION_SIZE_KiB)*1024,
+ .offset = 0x20000,
+
+ },{
+ .name = "Ceiva root filesystem partition",
+ .offset = MTDPART_OFS_APPEND,
+ .size = (ROOT_PARTITION_SIZE_KiB)*1024,
+ }
+};
+#endif
+
+static int __init clps_static_partitions(struct mtd_partition **parts)
+{
+ int nb_parts = 0;
+
+#ifdef CONFIG_ARCH_CEIVA
+ if (machine_is_ceiva()) {
+ *parts = ceiva_partitions;
+ nb_parts = ARRAY_SIZE(ceiva_partitions);
+ }
+#endif
+ return nb_parts;
+}
+#endif
+
+struct clps_info {
+ unsigned long base;
+ unsigned long size;
+ int width;
+ void *vbase;
+ struct map_info *map;
+ struct mtd_info *mtd;
+ struct resource *res;
+};
+
+#define NR_SUBMTD 4
+
+static struct clps_info info[NR_SUBMTD];
+
+static int __init clps_setup_mtd(struct clps_info *clps, int nr, struct mtd_info **rmtd)
+{
+ struct mtd_info *subdev[nr];
+ struct map_info *maps;
+ int i, found = 0, ret = 0;
+
+ /*
+ * Allocate the map_info structs in one go.
+ */
+ maps = kmalloc(sizeof(struct map_info) * nr, GFP_KERNEL);
+ if (!maps)
+ return -ENOMEM;
+ memset(maps, 0, sizeof(struct map_info) * nr);
+ /*
+ * Claim and then map the memory regions.
+ */
+ for (i = 0; i < nr; i++) {
+ if (clps[i].base == (unsigned long)-1)
+ break;
+
+ clps[i].res = request_mem_region(clps[i].base, clps[i].size, "clps flash");
+ if (!clps[i].res) {
+ ret = -EBUSY;
+ break;
+ }
+
+ clps[i].map = maps + i;
+
+ clps[i].map->name = "clps flash";
+ clps[i].map->phys = clps[i].base;
+
+ clps[i].vbase = ioremap(clps[i].base, clps[i].size);
+ if (!clps[i].vbase) {
+ ret = -ENOMEM;
+ break;
+ }
+
+ clps[i].map->virt = (void __iomem *)clps[i].vbase;
+ clps[i].map->bankwidth = clps[i].width;
+ clps[i].map->size = clps[i].size;
+
+ simple_map_init(&clps[i].map);
+
+ clps[i].mtd = do_map_probe("jedec_probe", clps[i].map);
+ if (clps[i].mtd == NULL) {
+ ret = -ENXIO;
+ break;
+ }
+ clps[i].mtd->owner = THIS_MODULE;
+ subdev[i] = clps[i].mtd;
+
+ printk(KERN_INFO "clps flash: JEDEC device at 0x%08lx, %dMiB, "
+ "%d-bit\n", clps[i].base, clps[i].mtd->size >> 20,
+ clps[i].width * 8);
+ found += 1;
+ }
+
+ /*
+ * ENXIO is special. It means we didn't find a chip when
+ * we probed. We need to tear down the mapping, free the
+ * resource and mark it as such.
+ */
+ if (ret == -ENXIO) {
+ iounmap(clps[i].vbase);
+ clps[i].vbase = NULL;
+ release_resource(clps[i].res);
+ clps[i].res = NULL;
+ }
+
+ /*
+ * If we found one device, don't bother with concat support.
+ * If we found multiple devices, use concat if we have it
+ * available, otherwise fail.
+ */
+ if (ret == 0 || ret == -ENXIO) {
+ if (found == 1) {
+ *rmtd = subdev[0];
+ ret = 0;
+ } else if (found > 1) {
+ /*
+ * We detected multiple devices. Concatenate
+ * them together.
+ */
+#ifdef CONFIG_MTD_CONCAT
+ *rmtd = mtd_concat_create(subdev, found,
+ "clps flash");
+ if (*rmtd == NULL)
+ ret = -ENXIO;
+#else
+ printk(KERN_ERR "clps flash: multiple devices "
+ "found but MTD concat support disabled.\n");
+ ret = -ENXIO;
+#endif
+ }
+ }
+
+ /*
+ * If we failed, clean up.
+ */
+ if (ret) {
+ do {
+ if (clps[i].mtd)
+ map_destroy(clps[i].mtd);
+ if (clps[i].vbase)
+ iounmap(clps[i].vbase);
+ if (clps[i].res)
+ release_resource(clps[i].res);
+ } while (i--);
+
+ kfree(maps);
+ }
+
+ return ret;
+}
+
+static void __exit clps_destroy_mtd(struct clps_info *clps, struct mtd_info *mtd)
+{
+ int i;
+
+ del_mtd_partitions(mtd);
+
+ if (mtd != clps[0].mtd)
+ mtd_concat_destroy(mtd);
+
+ for (i = NR_SUBMTD; i >= 0; i--) {
+ if (clps[i].mtd)
+ map_destroy(clps[i].mtd);
+ if (clps[i].vbase)
+ iounmap(clps[i].vbase);
+ if (clps[i].res)
+ release_resource(clps[i].res);
+ }
+ kfree(clps[0].map);
+}
+
+/*
+ * We define the memory space, size, and width for the flash memory
+ * space here.
+ */
+
+static int __init clps_setup_flash(void)
+{
+ int nr;
+
+#ifdef CONFIG_ARCH_CEIVA
+ if (machine_is_ceiva()) {
+ info[0].base = CS0_PHYS_BASE;
+ info[0].size = SZ_32M;
+ info[0].width = CEIVA_FLASH_WIDTH;
+ info[1].base = CS1_PHYS_BASE;
+ info[1].size = SZ_32M;
+ info[1].width = CEIVA_FLASH_WIDTH;
+ nr = 2;
+ }
+#endif
+ return nr;
+}
+
+static struct mtd_partition *parsed_parts;
+static const char *probes[] = { "cmdlinepart", "RedBoot", NULL };
+
+static void __init clps_locate_partitions(struct mtd_info *mtd)
+{
+ const char *part_type = NULL;
+ int nr_parts = 0;
+ do {
+ /*
+ * Partition selection stuff.
+ */
+ nr_parts = parse_mtd_partitions(mtd, probes, &parsed_parts, 0);
+ if (nr_parts > 0) {
+ part_type = "command line";
+ break;
+ }
+#ifdef CONFIG_MTD_CEIVA_STATICMAP
+ nr_parts = clps_static_partitions(&parsed_parts);
+ if (nr_parts > 0) {
+ part_type = "static";
+ break;
+ }
+ printk("found: %d partitions\n", nr_parts);
+#endif
+ } while (0);
+
+ if (nr_parts == 0) {
+ printk(KERN_NOTICE "clps flash: no partition info "
+ "available, registering whole flash\n");
+ add_mtd_device(mtd);
+ } else {
+ printk(KERN_NOTICE "clps flash: using %s partition "
+ "definition\n", part_type);
+ add_mtd_partitions(mtd, parsed_parts, nr_parts);
+ }
+
+ /* Always succeeds. */
+}
+
+static void __exit clps_destroy_partitions(void)
+{
+ kfree(parsed_parts);
+}
+
+static struct mtd_info *mymtd;
+
+static int __init clps_mtd_init(void)
+{
+ int ret;
+ int nr;
+
+ nr = clps_setup_flash();
+ if (nr < 0)
+ return nr;
+
+ ret = clps_setup_mtd(info, nr, &mymtd);
+ if (ret)
+ return ret;
+
+ clps_locate_partitions(mymtd);
+
+ return 0;
+}
+
+static void __exit clps_mtd_cleanup(void)
+{
+ clps_destroy_mtd(info, mymtd);
+ clps_destroy_partitions();
+}
+
+module_init(clps_mtd_init);
+module_exit(clps_mtd_cleanup);
+
+MODULE_AUTHOR("Rob Scott");
+MODULE_DESCRIPTION("Cirrus Logic JEDEC map driver");
+MODULE_LICENSE("GPL");
diff --git a/linux-2.4.x/drivers/mtd/maps/cfi_flagadm.c b/linux-2.4.x/drivers/mtd/maps/cfi_flagadm.c
index 6c74ab9..35bd597 100644
--- a/linux-2.4.x/drivers/mtd/maps/cfi_flagadm.c
+++ b/linux-2.4.x/drivers/mtd/maps/cfi_flagadm.c
@@ -1,8 +1,8 @@
/*
* Copyright © 2001 Flaga hf. Medical Devices, Kári Davíðsson <kd@flaga.is>
*
- * $Id: cfi_flagadm.c,v 1.7 2001/10/02 15:05:13 dwmw2 Exp $
- *
+ * $Id: cfi_flagadm.c,v 1.16 2006/03/29 08:31:11 dwmw2 Exp $
+ *
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
@@ -27,6 +27,7 @@
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
+#include <linux/init.h>
#include <asm/io.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/map.h>
@@ -41,7 +42,7 @@
*/
#define FLASH_PHYS_ADDR 0x40000000
-#define FLASH_SIZE 0x400000
+#define FLASH_SIZE 0x400000
#define FLASH_PARTITION0_ADDR 0x00000000
#define FLASH_PARTITION0_SIZE 0x00020000
@@ -55,111 +56,65 @@
#define FLASH_PARTITION3_ADDR 0x00240000
#define FLASH_PARTITION3_SIZE 0x001C0000
-__u8 flagadm_read8(struct map_info *map, unsigned long ofs)
-{
- return __raw_readb(map->map_priv_1 + ofs);
-}
-
-__u16 flagadm_read16(struct map_info *map, unsigned long ofs)
-{
- return __raw_readw(map->map_priv_1 + ofs);
-}
-
-__u32 flagadm_read32(struct map_info *map, unsigned long ofs)
-{
- return __raw_readl(map->map_priv_1 + ofs);
-}
-
-void flagadm_copy_from(struct map_info *map, void *to, unsigned long from, ssize_t len)
-{
- memcpy_fromio(to, map->map_priv_1 + from, len);
-}
-
-void flagadm_write8(struct map_info *map, __u8 d, unsigned long adr)
-{
- __raw_writeb(d, map->map_priv_1 + adr);
- mb();
-}
-
-void flagadm_write16(struct map_info *map, __u16 d, unsigned long adr)
-{
- __raw_writew(d, map->map_priv_1 + adr);
- mb();
-}
-
-void flagadm_write32(struct map_info *map, __u32 d, unsigned long adr)
-{
- __raw_writel(d, map->map_priv_1 + adr);
- mb();
-}
-
-void flagadm_copy_to(struct map_info *map, unsigned long to, const void *from, ssize_t len)
-{
- memcpy_toio(map->map_priv_1 + to, from, len);
-}
struct map_info flagadm_map = {
- name: "FlagaDM flash device",
- size: FLASH_SIZE,
- buswidth: 2,
- read8: flagadm_read8,
- read16: flagadm_read16,
- read32: flagadm_read32,
- copy_from: flagadm_copy_from,
- write8: flagadm_write8,
- write16: flagadm_write16,
- write32: flagadm_write32,
- copy_to: flagadm_copy_to
+ .name = "FlagaDM flash device",
+ .size = FLASH_SIZE,
+ .bankwidth = 2,
};
struct mtd_partition flagadm_parts[] = {
{
- name : "Bootloader",
- offset : FLASH_PARTITION0_ADDR,
- size : FLASH_PARTITION0_SIZE
+ .name = "Bootloader",
+ .offset = FLASH_PARTITION0_ADDR,
+ .size = FLASH_PARTITION0_SIZE
},
{
- name : "Kernel image",
- offset : FLASH_PARTITION1_ADDR,
- size : FLASH_PARTITION1_SIZE
+ .name = "Kernel image",
+ .offset = FLASH_PARTITION1_ADDR,
+ .size = FLASH_PARTITION1_SIZE
},
{
- name : "Initial ramdisk image",
- offset : FLASH_PARTITION2_ADDR,
- size : FLASH_PARTITION2_SIZE
+ .name = "Initial ramdisk image",
+ .offset = FLASH_PARTITION2_ADDR,
+ .size = FLASH_PARTITION2_SIZE
},
- {
- name : "Persistant storage",
- offset : FLASH_PARTITION3_ADDR,
- size : FLASH_PARTITION3_SIZE
+ {
+ .name = "Persistant storage",
+ .offset = FLASH_PARTITION3_ADDR,
+ .size = FLASH_PARTITION3_SIZE
}
};
-#define PARTITION_COUNT (sizeof(flagadm_parts)/sizeof(struct mtd_partition))
+#define PARTITION_COUNT ARRAY_SIZE(flagadm_parts)
static struct mtd_info *mymtd;
int __init init_flagadm(void)
-{
+{
printk(KERN_NOTICE "FlagaDM flash device: %x at %x\n",
FLASH_SIZE, FLASH_PHYS_ADDR);
-
- flagadm_map.map_priv_1 = (unsigned long)ioremap(FLASH_PHYS_ADDR,
+
+ flagadm_map.phys = FLASH_PHYS_ADDR;
+ flagadm_map.virt = ioremap(FLASH_PHYS_ADDR,
FLASH_SIZE);
- if (!flagadm_map.map_priv_1) {
+ if (!flagadm_map.virt) {
printk("Failed to ioremap\n");
return -EIO;
}
+
+ simple_map_init(&flagadm_map);
+
mymtd = do_map_probe("cfi_probe", &flagadm_map);
if (mymtd) {
- mymtd->module = THIS_MODULE;
+ mymtd->owner = THIS_MODULE;
add_mtd_partitions(mymtd, flagadm_parts, PARTITION_COUNT);
printk(KERN_NOTICE "FlagaDM flash device initialized\n");
return 0;
}
- iounmap((void *)flagadm_map.map_priv_1);
+ iounmap((void *)flagadm_map.virt);
return -ENXIO;
}
@@ -169,9 +124,9 @@ static void __exit cleanup_flagadm(void)
del_mtd_partitions(mymtd);
map_destroy(mymtd);
}
- if (flagadm_map.map_priv_1) {
- iounmap((void *)flagadm_map.map_priv_1);
- flagadm_map.map_priv_1 = 0;
+ if (flagadm_map.virt) {
+ iounmap((void *)flagadm_map.virt);
+ flagadm_map.virt = 0;
}
}
diff --git a/linux-2.4.x/drivers/mtd/maps/cstm_mips_ixx.c b/linux-2.4.x/drivers/mtd/maps/cstm_mips_ixx.c
index 3402da3..a370953 100644
--- a/linux-2.4.x/drivers/mtd/maps/cstm_mips_ixx.c
+++ b/linux-2.4.x/drivers/mtd/maps/cstm_mips_ixx.c
@@ -1,10 +1,10 @@
/*
- * $Id: cstm_mips_ixx.c,v 1.5 2001/10/02 15:05:14 dwmw2 Exp $
+ * $Id: cstm_mips_ixx.c,v 1.14 2005/11/07 11:14:26 gleixner Exp $
*
* Mapping of a custom board with both AMD CFI and JEDEC flash in partitions.
* Config with both CFI and JEDEC device support.
*
- * Basically physmap.c with the addition of partitions and
+ * Basically physmap.c with the addition of partitions and
* an array of mapping info to accomodate more than one flash type per board.
*
* Copyright 2000 MontaVista Software Inc.
@@ -33,55 +33,13 @@
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
+#include <linux/init.h>
#include <asm/io.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/map.h>
#include <linux/mtd/partitions.h>
#include <linux/config.h>
-
-#if defined(CONFIG_MIPS_ITE8172) || defined(CONFIG_MIPS_IVR)
#include <linux/delay.h>
-#endif
-
-__u8 cstm_mips_ixx_read8(struct map_info *map, unsigned long ofs)
-{
- return *(__u8 *)(map->map_priv_1 + ofs);
-}
-
-__u16 cstm_mips_ixx_read16(struct map_info *map, unsigned long ofs)
-{
- return *(__u16 *)(map->map_priv_1 + ofs);
-}
-
-__u32 cstm_mips_ixx_read32(struct map_info *map, unsigned long ofs)
-{
- return *(__u32 *)(map->map_priv_1 + ofs);
-}
-
-void cstm_mips_ixx_copy_from(struct map_info *map, void *to, unsigned long from, ssize_t len)
-{
- memcpy_fromio(to, map->map_priv_1 + from, len);
-}
-
-void cstm_mips_ixx_write8(struct map_info *map, __u8 d, unsigned long adr)
-{
- *(__u8 *)(map->map_priv_1 + adr) = d;
-}
-
-void cstm_mips_ixx_write16(struct map_info *map, __u16 d, unsigned long adr)
-{
- *(__u16 *)(map->map_priv_1 + adr) = d;
-}
-
-void cstm_mips_ixx_write32(struct map_info *map, __u32 d, unsigned long adr)
-{
- *(__u32 *)(map->map_priv_1 + adr) = d;
-}
-
-void cstm_mips_ixx_copy_to(struct map_info *map, unsigned long to, const void *from, ssize_t len)
-{
- memcpy_toio(map->map_priv_1 + to, from, len);
-}
#if defined(CONFIG_MIPS_ITE8172) || defined(CONFIG_MIPS_IVR)
#define CC_GCR 0xB4013818
@@ -97,56 +55,47 @@ void cstm_mips_ixx_copy_to(struct map_info *map, unsigned long to, const void *f
#define CC_GPAICR 0xB4013804
#endif /* defined(CONFIG_MIPS_ITE8172) || defined(CONFIG_MIPS_IVR) */
+#if defined(CONFIG_MIPS_ITE8172) || defined(CONFIG_MIPS_IVR)
void cstm_mips_ixx_set_vpp(struct map_info *map,int vpp)
{
- if (vpp) {
-#if defined(CONFIG_MIPS_ITE8172) || defined(CONFIG_MIPS_IVR)
- __u16 data;
- __u8 data1;
- static u8 first = 1;
-
- // Set GPIO port B pin3 to high
- data = *(__u16 *)(CC_GPBCR);
- data = (data & 0xff0f) | 0x0040;
- *(__u16 *)CC_GPBCR = data;
- *(__u8 *)CC_GPBDR = (*(__u8*)CC_GPBDR) | 0x08;
- if (first) {
- first = 0;
- /* need to have this delay for first
- enabling vpp after powerup */
- udelay(40);
+ static DEFINE_SPINLOCK(vpp_lock);
+ static int vpp_count = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&vpp_lock, flags);
+
+ if (vpp) {
+ if (!vpp_count++) {
+ __u16 data;
+ __u8 data1;
+ static u8 first = 1;
+
+ // Set GPIO port B pin3 to high
+ data = *(__u16 *)(CC_GPBCR);
+ data = (data & 0xff0f) | 0x0040;
+ *(__u16 *)CC_GPBCR = data;
+ *(__u8 *)CC_GPBDR = (*(__u8*)CC_GPBDR) | 0x08;
+ if (first) {
+ first = 0;
+ /* need to have this delay for first
+ enabling vpp after powerup */
+ udelay(40);
+ }
+ }
+ } else {
+ if (!--vpp_count) {
+ __u16 data;
+
+ // Set GPIO port B pin3 to high
+ data = *(__u16 *)(CC_GPBCR);
+ data = (data & 0xff3f) | 0x0040;
+ *(__u16 *)CC_GPBCR = data;
+ *(__u8 *)CC_GPBDR = (*(__u8*)CC_GPBDR) & 0xf7;
+ }
}
-#endif /* CONFIG_MIPS_ITE8172 */
- }
- else {
-#if defined(CONFIG_MIPS_ITE8172) || defined(CONFIG_MIPS_IVR)
- __u16 data;
-
- // Set GPIO port B pin3 to high
- data = *(__u16 *)(CC_GPBCR);
- data = (data & 0xff3f) | 0x0040;
- *(__u16 *)CC_GPBCR = data;
- *(__u8 *)CC_GPBDR = (*(__u8*)CC_GPBDR) & 0xf7;
-#endif /* CONFIG_MIPS_ITE8172 */
- }
+ spin_unlock_irqrestore(&vpp_lock, flags);
}
-
-const struct map_info basic_cstm_mips_ixx_map = {
- NULL,
- 0,
- 0,
- cstm_mips_ixx_read8,
- cstm_mips_ixx_read16,
- cstm_mips_ixx_read32,
- cstm_mips_ixx_copy_from,
- cstm_mips_ixx_write8,
- cstm_mips_ixx_write16,
- cstm_mips_ixx_write32,
- cstm_mips_ixx_copy_to,
- cstm_mips_ixx_set_vpp,
- 0,
- 0
-};
+#endif
/* board and partition description */
@@ -155,19 +104,19 @@ struct cstm_mips_ixx_info {
char *name;
unsigned long window_addr;
unsigned long window_size;
- int buswidth;
+ int bankwidth;
int num_partitions;
};
#if defined(CONFIG_MIPS_ITE8172) || defined(CONFIG_MIPS_IVR)
-#define PHYSMAP_NUMBER 1 // number of board desc structs needed, one per contiguous flash type
-const struct cstm_mips_ixx_info cstm_mips_ixx_board_desc[PHYSMAP_NUMBER] =
+#define PHYSMAP_NUMBER 1 // number of board desc structs needed, one per contiguous flash type
+const struct cstm_mips_ixx_info cstm_mips_ixx_board_desc[PHYSMAP_NUMBER] =
{
{ // 28F128J3A in 2x16 configuration
"big flash", // name
0x08000000, // window_addr
0x02000000, // window_size
- 4, // buswidth
+ 4, // bankwidth
1, // num_partitions
}
@@ -175,31 +124,31 @@ const struct cstm_mips_ixx_info cstm_mips_ixx_board_desc[PHYSMAP_NUMBER] =
static struct mtd_partition cstm_mips_ixx_partitions[PHYSMAP_NUMBER][MAX_PHYSMAP_PARTITIONS] = {
{ // 28F128J3A in 2x16 configuration
{
- name: "main partition ",
- size: 0x02000000, // 128 x 2 x 128k byte sectors
- offset: 0,
+ .name = "main partition ",
+ .size = 0x02000000, // 128 x 2 x 128k byte sectors
+ .offset = 0,
},
},
};
#else /* defined(CONFIG_MIPS_ITE8172) || defined(CONFIG_MIPS_IVR) */
-#define PHYSMAP_NUMBER 1 // number of board desc structs needed, one per contiguous flash type
-const struct cstm_mips_ixx_info cstm_mips_ixx_board_desc[PHYSMAP_NUMBER] =
+#define PHYSMAP_NUMBER 1 // number of board desc structs needed, one per contiguous flash type
+const struct cstm_mips_ixx_info cstm_mips_ixx_board_desc[PHYSMAP_NUMBER] =
{
- {
+ {
"MTD flash", // name
CONFIG_MTD_CSTM_MIPS_IXX_START, // window_addr
CONFIG_MTD_CSTM_MIPS_IXX_LEN, // window_size
- CONFIG_MTD_CSTM_MIPS_IXX_BUSWIDTH, // buswidth
+ CONFIG_MTD_CSTM_MIPS_IXX_BUSWIDTH, // bankwidth
1, // num_partitions
},
};
static struct mtd_partition cstm_mips_ixx_partitions[PHYSMAP_NUMBER][MAX_PHYSMAP_PARTITIONS] = {
-{
+{
{
- name: "main partition",
- size: CONFIG_MTD_CSTM_MIPS_IXX_LEN,
- offset: 0,
+ .name = "main partition",
+ .size = CONFIG_MTD_CSTM_MIPS_IXX_LEN,
+ .offset = 0,
},
},
};
@@ -216,17 +165,24 @@ int __init init_cstm_mips_ixx(void)
/* Initialize mapping */
for (i=0;i<PHYSMAP_NUMBER;i++) {
- printk(KERN_NOTICE "cstm_mips_ixx flash device: %lx at %lx\n", cstm_mips_ixx_board_desc[i].window_size, cstm_mips_ixx_board_desc[i].window_addr);
- memcpy((char *)&cstm_mips_ixx_map[i],(char *)&basic_cstm_mips_ixx_map,sizeof(struct map_info));
- cstm_mips_ixx_map[i].map_priv_1 = (unsigned long)ioremap(cstm_mips_ixx_board_desc[i].window_addr, cstm_mips_ixx_board_desc[i].window_size);
- if (!cstm_mips_ixx_map[i].map_priv_1) {
+ printk(KERN_NOTICE "cstm_mips_ixx flash device: 0x%lx at 0x%lx\n",
+ cstm_mips_ixx_board_desc[i].window_size, cstm_mips_ixx_board_desc[i].window_addr);
+
+
+ cstm_mips_ixx_map[i].phys = cstm_mips_ixx_board_desc[i].window_addr;
+ cstm_mips_ixx_map[i].virt = ioremap(cstm_mips_ixx_board_desc[i].window_addr, cstm_mips_ixx_board_desc[i].window_size);
+ if (!cstm_mips_ixx_map[i].virt) {
printk(KERN_WARNING "Failed to ioremap\n");
return -EIO;
}
cstm_mips_ixx_map[i].name = cstm_mips_ixx_board_desc[i].name;
cstm_mips_ixx_map[i].size = cstm_mips_ixx_board_desc[i].window_size;
- cstm_mips_ixx_map[i].buswidth = cstm_mips_ixx_board_desc[i].buswidth;
- //printk(KERN_NOTICE "cstm_mips_ixx: ioremap is %x\n",(unsigned int)(cstm_mips_ixx_map[i].map_priv_1));
+ cstm_mips_ixx_map[i].bankwidth = cstm_mips_ixx_board_desc[i].bankwidth;
+#if defined(CONFIG_MIPS_ITE8172) || defined(CONFIG_MIPS_IVR)
+ cstm_mips_ixx_map[i].set_vpp = cstm_mips_ixx_set_vpp;
+#endif
+ simple_map_init(&cstm_mips_ixx_map[i]);
+ //printk(KERN_NOTICE "cstm_mips_ixx: ioremap is %x\n",(unsigned int)(cstm_mips_ixx_map[i].virt));
}
#if defined(CONFIG_MIPS_ITE8172) || defined(CONFIG_MIPS_IVR)
@@ -244,7 +200,7 @@ int __init init_cstm_mips_ixx(void)
printk(KERN_NOTICE "cstm_mips_ixx %d jedec: mymtd is %x\n",i,(unsigned int)mymtd);
}
if (mymtd) {
- mymtd->module = THIS_MODULE;
+ mymtd->owner = THIS_MODULE;
cstm_mips_ixx_map[i].map_priv_2 = (unsigned long)mymtd;
add_mtd_partitions(mymtd, parts, cstm_mips_ixx_board_desc[i].num_partitions);
@@ -266,9 +222,9 @@ static void __exit cleanup_cstm_mips_ixx(void)
del_mtd_partitions(mymtd);
map_destroy(mymtd);
}
- if (cstm_mips_ixx_map[i].map_priv_1) {
- iounmap((void *)cstm_mips_ixx_map[i].map_priv_1);
- cstm_mips_ixx_map[i].map_priv_1 = 0;
+ if (cstm_mips_ixx_map[i].virt) {
+ iounmap((void *)cstm_mips_ixx_map[i].virt);
+ cstm_mips_ixx_map[i].virt = 0;
}
}
}
@@ -279,7 +235,7 @@ void PCISetULongByOffset(__u32 DevNumber, __u32 FuncNumber, __u32 Offset, __u32
offset = ( unsigned long )( 0x80000000 | ( DevNumber << 11 ) + ( FuncNumber << 8 ) + Offset) ;
- *(__u32 *)CC_CONFADDR = offset;
+ *(__u32 *)CC_CONFADDR = offset;
*(__u32 *)CC_CONFDATA = data;
}
void setup_ITE_IVR_flash()
diff --git a/linux-2.4.x/drivers/mtd/maps/dbox2-flash.c b/linux-2.4.x/drivers/mtd/maps/dbox2-flash.c
index 2e5c30d..5998f92 100644
--- a/linux-2.4.x/drivers/mtd/maps/dbox2-flash.c
+++ b/linux-2.4.x/drivers/mtd/maps/dbox2-flash.c
@@ -1,131 +1,107 @@
/*
- * $Id: dbox2-flash.c,v 1.4 2001/10/02 15:05:14 dwmw2 Exp $
+ * $Id: dbox2-flash.c,v 1.15 2006/03/29 08:31:11 dwmw2 Exp $
*
- * Nokia / Sagem D-Box 2 flash driver
+ * D-Box 2 flash driver
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
+#include <linux/init.h>
#include <asm/io.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/map.h>
#include <linux/mtd/partitions.h>
#include <linux/config.h>
+#include <linux/errno.h>
/* partition_info gives details on the logical partitions that the split the
* single flash device into. If the size if zero we use up to the end of the
* device. */
-static struct mtd_partition partition_info[]= {{name: "BR bootloader", // raw
- size: 128 * 1024,
- offset: 0,
- mask_flags: MTD_WRITEABLE},
- {name: "PPC bootloader", // flfs
- size: 128 * 1024,
- offset: MTDPART_OFS_APPEND,
- mask_flags: 0},
- {name: "Kernel", // idxfs
- size: 768 * 1024,
- offset: MTDPART_OFS_APPEND,
- mask_flags: 0},
- {name: "System", // jffs
- size: MTDPART_SIZ_FULL,
- offset: MTDPART_OFS_APPEND,
- mask_flags: 0}};
-
-#define NUM_PARTITIONS (sizeof(partition_info) / sizeof(partition_info[0]))
+static struct mtd_partition partition_info[]= {
+ {
+ .name = "BR bootloader",
+ .size = 128 * 1024,
+ .offset = 0,
+ .mask_flags = MTD_WRITEABLE
+ },
+ {
+ .name = "FLFS (U-Boot)",
+ .size = 128 * 1024,
+ .offset = MTDPART_OFS_APPEND,
+ .mask_flags = 0
+ },
+ {
+ .name = "Root (SquashFS)",
+ .size = 7040 * 1024,
+ .offset = MTDPART_OFS_APPEND,
+ .mask_flags = 0
+ },
+ {
+ .name = "var (JFFS2)",
+ .size = 896 * 1024,
+ .offset = MTDPART_OFS_APPEND,
+ .mask_flags = 0
+ },
+ {
+ .name = "Flash without bootloader",
+ .size = MTDPART_SIZ_FULL,
+ .offset = 128 * 1024,
+ .mask_flags = 0
+ },
+ {
+ .name = "Complete Flash",
+ .size = MTDPART_SIZ_FULL,
+ .offset = 0,
+ .mask_flags = MTD_WRITEABLE
+ }
+};
+
+#define NUM_PARTITIONS ARRAY_SIZE(partition_info)
#define WINDOW_ADDR 0x10000000
#define WINDOW_SIZE 0x800000
static struct mtd_info *mymtd;
-__u8 dbox2_flash_read8(struct map_info *map, unsigned long ofs)
-{
- return __raw_readb(map->map_priv_1 + ofs);
-}
-
-__u16 dbox2_flash_read16(struct map_info *map, unsigned long ofs)
-{
- return __raw_readw(map->map_priv_1 + ofs);
-}
-
-__u32 dbox2_flash_read32(struct map_info *map, unsigned long ofs)
-{
- return __raw_readl(map->map_priv_1 + ofs);
-}
-
-void dbox2_flash_copy_from(struct map_info *map, void *to, unsigned long from, ssize_t len)
-{
- memcpy_fromio(to, map->map_priv_1 + from, len);
-}
-
-void dbox2_flash_write8(struct map_info *map, __u8 d, unsigned long adr)
-{
- __raw_writeb(d, map->map_priv_1 + adr);
- mb();
-}
-
-void dbox2_flash_write16(struct map_info *map, __u16 d, unsigned long adr)
-{
- __raw_writew(d, map->map_priv_1 + adr);
- mb();
-}
-
-void dbox2_flash_write32(struct map_info *map, __u32 d, unsigned long adr)
-{
- __raw_writel(d, map->map_priv_1 + adr);
- mb();
-}
-
-void dbox2_flash_copy_to(struct map_info *map, unsigned long to, const void *from, ssize_t len)
-{
- memcpy_toio(map->map_priv_1 + to, from, len);
-}
struct map_info dbox2_flash_map = {
- name: "D-Box 2 flash memory",
- size: WINDOW_SIZE,
- buswidth: 4,
- read8: dbox2_flash_read8,
- read16: dbox2_flash_read16,
- read32: dbox2_flash_read32,
- copy_from: dbox2_flash_copy_from,
- write8: dbox2_flash_write8,
- write16: dbox2_flash_write16,
- write32: dbox2_flash_write32,
- copy_to: dbox2_flash_copy_to
+ .name = "D-Box 2 flash memory",
+ .size = WINDOW_SIZE,
+ .bankwidth = 4,
+ .phys = WINDOW_ADDR,
};
int __init init_dbox2_flash(void)
{
printk(KERN_NOTICE "D-Box 2 flash driver (size->0x%X mem->0x%X)\n", WINDOW_SIZE, WINDOW_ADDR);
- dbox2_flash_map.map_priv_1 = (unsigned long)ioremap(WINDOW_ADDR, WINDOW_SIZE);
+ dbox2_flash_map.virt = ioremap(WINDOW_ADDR, WINDOW_SIZE);
- if (!dbox2_flash_map.map_priv_1) {
+ if (!dbox2_flash_map.virt) {
printk("Failed to ioremap\n");
return -EIO;
}
+ simple_map_init(&dbox2_flash_map);
// Probe for dual Intel 28F320 or dual AMD
mymtd = do_map_probe("cfi_probe", &dbox2_flash_map);
if (!mymtd) {
// Probe for single Intel 28F640
- dbox2_flash_map.buswidth = 2;
-
+ dbox2_flash_map.bankwidth = 2;
+
mymtd = do_map_probe("cfi_probe", &dbox2_flash_map);
}
-
+
if (mymtd) {
- mymtd->module = THIS_MODULE;
+ mymtd->owner = THIS_MODULE;
/* Create MTD devices for each partition. */
add_mtd_partitions(mymtd, partition_info, NUM_PARTITIONS);
-
+
return 0;
}
- iounmap((void *)dbox2_flash_map.map_priv_1);
+ iounmap((void *)dbox2_flash_map.virt);
return -ENXIO;
}
@@ -135,9 +111,9 @@ static void __exit cleanup_dbox2_flash(void)
del_mtd_partitions(mymtd);
map_destroy(mymtd);
}
- if (dbox2_flash_map.map_priv_1) {
- iounmap((void *)dbox2_flash_map.map_priv_1);
- dbox2_flash_map.map_priv_1 = 0;
+ if (dbox2_flash_map.virt) {
+ iounmap((void *)dbox2_flash_map.virt);
+ dbox2_flash_map.virt = 0;
}
}
@@ -146,5 +122,5 @@ module_exit(cleanup_dbox2_flash);
MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Kári Davíðsson <kd@flaga.is>");
-MODULE_DESCRIPTION("MTD map driver for Nokia/Sagem D-Box 2 board");
+MODULE_AUTHOR("Kári Davíðsson <kd@flaga.is>, Bastian Blank <waldi@tuxbox.org>, Alexander Wild <wild@te-elektronik.com>");
+MODULE_DESCRIPTION("MTD map driver for D-Box 2 board");
diff --git a/linux-2.4.x/drivers/mtd/maps/dc21285.c b/linux-2.4.x/drivers/mtd/maps/dc21285.c
index 2bcf777..701620b 100644
--- a/linux-2.4.x/drivers/mtd/maps/dc21285.c
+++ b/linux-2.4.x/drivers/mtd/maps/dc21285.c
@@ -4,13 +4,16 @@
* (C) 2000 Nicolas Pitre <nico@cam.org>
*
* This code is GPL
- *
- * $Id: dc21285.c,v 1.7 2001/10/11 16:17:51 nico Exp $
+ *
+ * $Id: dc21285.c,v 1.24 2005/11/07 11:14:26 gleixner Exp $
*/
-
+#include <linux/config.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/map.h>
@@ -18,143 +21,200 @@
#include <asm/io.h>
#include <asm/hardware/dec21285.h>
+#include <asm/mach-types.h>
-static struct mtd_info *mymtd;
+static struct mtd_info *dc21285_mtd;
+
+#ifdef CONFIG_ARCH_NETWINDER
+/*
+ * This is really ugly, but it seams to be the only
+ * realiable way to do it, as the cpld state machine
+ * is unpredictible. So we have a 25us penalty per
+ * write access.
+ */
+static void nw_en_write(void)
+{
+ extern spinlock_t gpio_lock;
+ unsigned long flags;
+
+ /*
+ * we want to write a bit pattern XXX1 to Xilinx to enable
+ * the write gate, which will be open for about the next 2ms.
+ */
+ spin_lock_irqsave(&gpio_lock, flags);
+ cpld_modify(1, 1);
+ spin_unlock_irqrestore(&gpio_lock, flags);
+
+ /*
+ * let the ISA bus to catch on...
+ */
+ udelay(25);
+}
+#else
+#define nw_en_write() do { } while (0)
+#endif
-__u8 dc21285_read8(struct map_info *map, unsigned long ofs)
+static map_word dc21285_read8(struct map_info *map, unsigned long ofs)
{
- return *(__u8*)(map->map_priv_1 + ofs);
+ map_word val;
+ val.x[0] = *(uint8_t*)(map->virt + ofs);
+ return val;
}
-__u16 dc21285_read16(struct map_info *map, unsigned long ofs)
+static map_word dc21285_read16(struct map_info *map, unsigned long ofs)
{
- return *(__u16*)(map->map_priv_1 + ofs);
+ map_word val;
+ val.x[0] = *(uint16_t*)(map->virt + ofs);
+ return val;
}
-__u32 dc21285_read32(struct map_info *map, unsigned long ofs)
+static map_word dc21285_read32(struct map_info *map, unsigned long ofs)
{
- return *(__u32*)(map->map_priv_1 + ofs);
+ map_word val;
+ val.x[0] = *(uint32_t*)(map->virt + ofs);
+ return val;
}
-void dc21285_copy_from(struct map_info *map, void *to, unsigned long from, ssize_t len)
+static void dc21285_copy_from(struct map_info *map, void *to, unsigned long from, ssize_t len)
{
- memcpy(to, (void*)(map->map_priv_1 + from), len);
+ memcpy(to, (void*)(map->virt + from), len);
}
-void dc21285_write8(struct map_info *map, __u8 d, unsigned long adr)
+static void dc21285_write8(struct map_info *map, const map_word d, unsigned long adr)
{
+ if (machine_is_netwinder())
+ nw_en_write();
*CSR_ROMWRITEREG = adr & 3;
adr &= ~3;
- *(__u8*)(map->map_priv_1 + adr) = d;
+ *(uint8_t*)(map->virt + adr) = d.x[0];
}
-void dc21285_write16(struct map_info *map, __u16 d, unsigned long adr)
+static void dc21285_write16(struct map_info *map, const map_word d, unsigned long adr)
{
+ if (machine_is_netwinder())
+ nw_en_write();
*CSR_ROMWRITEREG = adr & 3;
adr &= ~3;
- *(__u16*)(map->map_priv_1 + adr) = d;
+ *(uint16_t*)(map->virt + adr) = d.x[0];
}
-void dc21285_write32(struct map_info *map, __u32 d, unsigned long adr)
+static void dc21285_write32(struct map_info *map, const map_word d, unsigned long adr)
{
- *(__u32*)(map->map_priv_1 + adr) = d;
+ if (machine_is_netwinder())
+ nw_en_write();
+ *(uint32_t*)(map->virt + adr) = d.x[0];
}
-void dc21285_copy_to(struct map_info *map, unsigned long to, const void *from, ssize_t len)
+static void dc21285_copy_to_32(struct map_info *map, unsigned long to, const void *from, ssize_t len)
{
- switch (map->buswidth) {
- case 4:
- while (len > 0) {
- __u32 d = *((__u32*)from)++;
- dc21285_write32(map, d, to);
- to += 4;
- len -= 4;
- }
- break;
- case 2:
- while (len > 0) {
- __u16 d = *((__u16*)from)++;
- dc21285_write16(map, d, to);
- to += 2;
- len -= 2;
- }
- break;
- case 1:
- while (len > 0) {
- __u8 d = *((__u8*)from)++;
- dc21285_write8(map, d, to);
- to++;
- len--;
- }
- break;
+ while (len > 0) {
+ map_word d;
+ d.x[0] = *((uint32_t*)from)++;
+ dc21285_write32(map, d, to);
+ to += 4;
+ len -= 4;
+ }
+}
+
+static void dc21285_copy_to_16(struct map_info *map, unsigned long to, const void *from, ssize_t len)
+{
+ while (len > 0) {
+ map_word d;
+ d.x[0] = *((uint16_t*)from)++;
+ dc21285_write16(map, d, to);
+ to += 2;
+ len -= 2;
}
}
-struct map_info dc21285_map = {
- name: "DC21285 flash",
- size: 16*1024*1024,
- read8: dc21285_read8,
- read16: dc21285_read16,
- read32: dc21285_read32,
- copy_from: dc21285_copy_from,
- write8: dc21285_write8,
- write16: dc21285_write16,
- write32: dc21285_write32,
- copy_to: dc21285_copy_to
+static void dc21285_copy_to_8(struct map_info *map, unsigned long to, const void *from, ssize_t len)
+{
+ map_word d;
+ d.x[0] = *((uint8_t*)from)++;
+ dc21285_write8(map, d, to);
+ to++;
+ len--;
+}
+
+static struct map_info dc21285_map = {
+ .name = "DC21285 flash",
+ .phys = NO_XIP,
+ .size = 16*1024*1024,
+ .copy_from = dc21285_copy_from,
};
/* Partition stuff */
+#ifdef CONFIG_MTD_PARTITIONS
static struct mtd_partition *dc21285_parts;
-
-extern int parse_redboot_partitions(struct mtd_info *, struct mtd_partition **);
+static const char *probes[] = { "RedBoot", "cmdlinepart", NULL };
+#endif
-int __init init_dc21285(void)
+static int __init init_dc21285(void)
{
- /* Determine buswidth */
+
+#ifdef CONFIG_MTD_PARTITIONS
+ int nrparts;
+#endif
+
+ /* Determine bankwidth */
switch (*CSR_SA110_CNTL & (3<<14)) {
- case SA110_CNTL_ROMWIDTH_8:
- dc21285_map.buswidth = 1;
+ case SA110_CNTL_ROMWIDTH_8:
+ dc21285_map.bankwidth = 1;
+ dc21285_map.read = dc21285_read8;
+ dc21285_map.write = dc21285_write8;
+ dc21285_map.copy_to = dc21285_copy_to_8;
break;
- case SA110_CNTL_ROMWIDTH_16:
- dc21285_map.buswidth = 2;
+ case SA110_CNTL_ROMWIDTH_16:
+ dc21285_map.bankwidth = 2;
+ dc21285_map.read = dc21285_read16;
+ dc21285_map.write = dc21285_write16;
+ dc21285_map.copy_to = dc21285_copy_to_16;
break;
- case SA110_CNTL_ROMWIDTH_32:
- dc21285_map.buswidth = 4;
+ case SA110_CNTL_ROMWIDTH_32:
+ dc21285_map.bankwidth = 4;
+ dc21285_map.read = dc21285_read32;
+ dc21285_map.write = dc21285_write32;
+ dc21285_map.copy_to = dc21285_copy_to_32;
break;
default:
- printk (KERN_ERR "DC21285 flash: undefined buswidth\n");
+ printk (KERN_ERR "DC21285 flash: undefined bankwidth\n");
return -ENXIO;
}
- printk (KERN_NOTICE "DC21285 flash support (%d-bit buswidth)\n",
- dc21285_map.buswidth*8);
+ printk (KERN_NOTICE "DC21285 flash support (%d-bit bankwidth)\n",
+ dc21285_map.bankwidth*8);
/* Let's map the flash area */
- dc21285_map.map_priv_1 = (unsigned long)__ioremap(DC21285_FLASH, 16*1024*1024, 0);
- if (!dc21285_map.map_priv_1) {
+ dc21285_map.virt = ioremap(DC21285_FLASH, 16*1024*1024);
+ if (!dc21285_map.virt) {
printk("Failed to ioremap\n");
return -EIO;
}
- mymtd = do_map_probe("cfi_probe", &dc21285_map);
- if (mymtd) {
- int nrparts;
+ if (machine_is_ebsa285()) {
+ dc21285_mtd = do_map_probe("cfi_probe", &dc21285_map);
+ } else {
+ dc21285_mtd = do_map_probe("jedec_probe", &dc21285_map);
+ }
- mymtd->module = THIS_MODULE;
-
- /* partition fixup */
+ if (!dc21285_mtd) {
+ iounmap(dc21285_map.virt);
+ return -ENXIO;
+ }
- nrparts = parse_redboot_partitions(mymtd, &dc21285_parts);
- if (nrparts <=0) {
- printk(KERN_NOTICE "RedBoot partition table failed\n");
- iounmap((void *)dc21285_map.map_priv_1);
- return -ENXIO;
- }
+ dc21285_mtd->owner = THIS_MODULE;
- add_mtd_partitions(mymtd, dc21285_parts, nrparts);
+#ifdef CONFIG_MTD_PARTITIONS
+ nrparts = parse_mtd_partitions(dc21285_mtd, probes, &dc21285_parts, 0);
+ if (nrparts > 0)
+ add_mtd_partitions(dc21285_mtd, dc21285_parts, nrparts);
+ else
+#endif
+ add_mtd_device(dc21285_mtd);
- /*
+ if(machine_is_ebsa285()) {
+ /*
* Flash timing is determined with bits 19-16 of the
* CSR_SA110_CNTL. The value is the number of wait cycles, or
* 0 for 16 cycles (the default). Cycles are 20 ns.
@@ -166,27 +226,23 @@ int __init init_dc21285(void)
*CSR_SA110_CNTL = ((*CSR_SA110_CNTL & ~0x00f00000) | (7 << 20));
/* tristate time */
*CSR_SA110_CNTL = ((*CSR_SA110_CNTL & ~0x0f000000) | (7 << 24));
-
- return 0;
}
- iounmap((void *)dc21285_map.map_priv_1);
- return -ENXIO;
+ return 0;
}
static void __exit cleanup_dc21285(void)
{
- if (mymtd) {
- del_mtd_device(mymtd);
- map_destroy(mymtd);
- mymtd = NULL;
- }
- if (dc21285_map.map_priv_1) {
- iounmap((void *)dc21285_map.map_priv_1);
- dc21285_map.map_priv_1 = 0;
- }
- if(dc21285_parts)
+#ifdef CONFIG_MTD_PARTITIONS
+ if (dc21285_parts) {
+ del_mtd_partitions(dc21285_mtd);
kfree(dc21285_parts);
+ } else
+#endif
+ del_mtd_device(dc21285_mtd);
+
+ map_destroy(dc21285_mtd);
+ iounmap(dc21285_map.virt);
}
module_init(init_dc21285);
diff --git a/linux-2.4.x/drivers/mtd/maps/dilnetpc.c b/linux-2.4.x/drivers/mtd/maps/dilnetpc.c
index 75b9ef1..17b1f7b 100644
--- a/linux-2.4.x/drivers/mtd/maps/dilnetpc.c
+++ b/linux-2.4.x/drivers/mtd/maps/dilnetpc.c
@@ -14,7 +14,7 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
*
- * $Id: dilnetpc.c,v 1.8 2002/03/12 13:07:26 rkaiser Exp $
+ * $Id: dilnetpc.c,v 1.21 2006/03/29 08:31:11 dwmw2 Exp $
*
* The DIL/Net PC is a tiny embedded PC board made by SSV Embedded Systems
* featuring the AMD Elan SC410 processor. There are two variants of this
@@ -29,14 +29,18 @@
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
-#include <asm/io.h>
+#include <linux/init.h>
+#include <linux/string.h>
+
#include <linux/mtd/mtd.h>
#include <linux/mtd/map.h>
#include <linux/mtd/partitions.h>
#include <linux/mtd/concat.h>
+#include <asm/io.h>
+
/*
-** The DIL/NetPC keeps it's BIOS in two distinct flash blocks.
+** The DIL/NetPC keeps its BIOS in two distinct flash blocks.
** Destroying any of these blocks transforms the DNPC into
** a paperweight (albeit not a very useful one, considering
** it only weighs a few grams).
@@ -189,45 +193,6 @@ static void dnpc_unmap_flash(void)
}
-static __u8 dnpc_read8(struct map_info *map, unsigned long ofs)
-{
- return readb(map->map_priv_1 + ofs);
-}
-
-static __u16 dnpc_read16(struct map_info *map, unsigned long ofs)
-{
- return readw(map->map_priv_1 + ofs);
-}
-
-static __u32 dnpc_read32(struct map_info *map, unsigned long ofs)
-{
- return readl(map->map_priv_1 + ofs);
-}
-
-static void dnpc_copy_from(struct map_info *map, void *to, unsigned long from, ssize_t len)
-{
- memcpy_fromio(to, (void *)(map->map_priv_1 + from), len);
-}
-
-static void dnpc_write8(struct map_info *map, __u8 d, unsigned long adr)
-{
- writeb(d, map->map_priv_1 + adr);
-}
-
-static void dnpc_write16(struct map_info *map, __u16 d, unsigned long adr)
-{
- writew(d, map->map_priv_1 + adr);
-}
-
-static void dnpc_write32(struct map_info *map, __u32 d, unsigned long adr)
-{
- writel(d, map->map_priv_1 + adr);
-}
-
-static void dnpc_copy_to(struct map_info *map, unsigned long to, const void *from, ssize_t len)
-{
- memcpy_toio((void *)(map->map_priv_1 + to), from, len);
-}
/*
************************************************************
@@ -235,7 +200,7 @@ static void dnpc_copy_to(struct map_info *map, unsigned long to, const void *fro
************************************************************
*/
-static spinlock_t dnpc_spin = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(dnpc_spin);
static int vpp_counter = 0;
/*
** This is what has to be done for the DNP board ..
@@ -285,22 +250,14 @@ static void adnp_set_vpp(struct map_info *not_used, int on)
#define DNP_WINDOW_SIZE 0x00200000 /* DNP flash size is 2MiB */
#define ADNP_WINDOW_SIZE 0x00400000 /* ADNP flash size is 4MiB */
-#define WINDOW_ADDR FLASH_BASE
+#define WINDOW_ADDR FLASH_BASE
static struct map_info dnpc_map = {
- name: "ADNP Flash Bank",
- size: ADNP_WINDOW_SIZE,
- buswidth: 1,
- read8: dnpc_read8,
- read16: dnpc_read16,
- read32: dnpc_read32,
- copy_from: dnpc_copy_from,
- write8: dnpc_write8,
- write16: dnpc_write16,
- write32: dnpc_write32,
- copy_to: dnpc_copy_to,
- set_vpp: adnp_set_vpp,
- map_priv_2: WINDOW_ADDR
+ .name = "ADNP Flash Bank",
+ .size = ADNP_WINDOW_SIZE,
+ .bankwidth = 1,
+ .set_vpp = adnp_set_vpp,
+ .phys = WINDOW_ADDR
};
/*
@@ -315,35 +272,35 @@ static struct map_info dnpc_map = {
static struct mtd_partition partition_info[]=
{
- {
- name: "ADNP boot",
- offset: 0,
- size: 0xf0000,
+ {
+ .name = "ADNP boot",
+ .offset = 0,
+ .size = 0xf0000,
},
- {
- name: "ADNP system BIOS",
- offset: MTDPART_OFS_NXTBLK,
- size: 0x10000,
+ {
+ .name = "ADNP system BIOS",
+ .offset = MTDPART_OFS_NXTBLK,
+ .size = 0x10000,
#ifdef DNPC_BIOS_BLOCKS_WRITEPROTECTED
- mask_flags: MTD_WRITEABLE,
+ .mask_flags = MTD_WRITEABLE,
#endif
},
{
- name: "ADNP file system",
- offset: MTDPART_OFS_NXTBLK,
- size: 0x2f0000,
+ .name = "ADNP file system",
+ .offset = MTDPART_OFS_NXTBLK,
+ .size = 0x2f0000,
},
{
- name: "ADNP system BIOS entry",
- offset: MTDPART_OFS_NXTBLK,
- size: MTDPART_SIZ_FULL,
+ .name = "ADNP system BIOS entry",
+ .offset = MTDPART_OFS_NXTBLK,
+ .size = MTDPART_SIZ_FULL,
#ifdef DNPC_BIOS_BLOCKS_WRITEPROTECTED
- mask_flags: MTD_WRITEABLE,
+ .mask_flags = MTD_WRITEABLE,
#endif
},
};
-#define NUM_PARTITIONS (sizeof(partition_info)/sizeof(partition_info[0]))
+#define NUM_PARTITIONS ARRAY_SIZE(partition_info)
static struct mtd_info *mymtd;
static struct mtd_info *lowlvl_parts[NUM_PARTITIONS];
@@ -368,27 +325,27 @@ static struct mtd_info *merged_mtd;
static struct mtd_partition higlvl_partition_info[]=
{
- {
- name: "ADNP boot block",
- offset: 0,
- size: CONFIG_MTD_DILNETPC_BOOTSIZE,
+ {
+ .name = "ADNP boot block",
+ .offset = 0,
+ .size = CONFIG_MTD_DILNETPC_BOOTSIZE,
},
{
- name: "ADNP file system space",
- offset: MTDPART_OFS_NXTBLK,
- size: ADNP_WINDOW_SIZE-CONFIG_MTD_DILNETPC_BOOTSIZE-0x20000,
+ .name = "ADNP file system space",
+ .offset = MTDPART_OFS_NXTBLK,
+ .size = ADNP_WINDOW_SIZE-CONFIG_MTD_DILNETPC_BOOTSIZE-0x20000,
},
- {
- name: "ADNP system BIOS + BIOS Entry",
- offset: MTDPART_OFS_NXTBLK,
- size: MTDPART_SIZ_FULL,
+ {
+ .name = "ADNP system BIOS + BIOS Entry",
+ .offset = MTDPART_OFS_NXTBLK,
+ .size = MTDPART_SIZ_FULL,
#ifdef DNPC_BIOS_BLOCKS_WRITEPROTECTED
- mask_flags: MTD_WRITEABLE,
+ .mask_flags = MTD_WRITEABLE,
#endif
},
};
-#define NUM_HIGHLVL_PARTITIONS (sizeof(higlvl_partition_info)/sizeof(partition_info[0]))
+#define NUM_HIGHLVL_PARTITIONS ARRAY_SIZE(higlvl_partition_info)
static int dnp_adnp_probe(void)
@@ -414,7 +371,7 @@ static int __init init_dnpc(void)
/*
** determine hardware (DNP/ADNP/invalid)
- */
+ */
if((is_dnp = dnp_adnp_probe()) < 0)
return -ENXIO;
@@ -440,25 +397,26 @@ static int __init init_dnpc(void)
++dnpc_map.name;
for(i = 0; i < NUM_PARTITIONS; i++)
++partition_info[i].name;
- higlvl_partition_info[1].size = DNP_WINDOW_SIZE -
+ higlvl_partition_info[1].size = DNP_WINDOW_SIZE -
CONFIG_MTD_DILNETPC_BOOTSIZE - 0x20000;
for(i = 0; i < NUM_HIGHLVL_PARTITIONS; i++)
++higlvl_partition_info[i].name;
}
- printk(KERN_NOTICE "DIL/Net %s flash: 0x%lx at 0x%lx\n",
- is_dnp ? "DNPC" : "ADNP", dnpc_map.size, dnpc_map.map_priv_2);
+ printk(KERN_NOTICE "DIL/Net %s flash: 0x%lx at 0x%lx\n",
+ is_dnp ? "DNPC" : "ADNP", dnpc_map.size, dnpc_map.phys);
- dnpc_map.map_priv_1 = (unsigned long)ioremap_nocache(dnpc_map.map_priv_2, dnpc_map.size);
+ dnpc_map.virt = ioremap_nocache(dnpc_map.phys, dnpc_map.size);
- dnpc_map_flash(dnpc_map.map_priv_2, dnpc_map.size);
+ dnpc_map_flash(dnpc_map.phys, dnpc_map.size);
- if (!dnpc_map.map_priv_1) {
+ if (!dnpc_map.virt) {
printk("Failed to ioremap_nocache\n");
return -EIO;
}
+ simple_map_init(&dnpc_map);
- printk("FLASH virtual address: 0x%lx\n", dnpc_map.map_priv_1);
+ printk("FLASH virtual address: 0x%p\n", dnpc_map.virt);
mymtd = do_map_probe("jedec_probe", &dnpc_map);
@@ -475,11 +433,11 @@ static int __init init_dnpc(void)
mymtd->erasesize = 0x10000;
if (!mymtd) {
- iounmap((void *)dnpc_map.map_priv_1);
+ iounmap(dnpc_map.virt);
return -ENXIO;
}
-
- mymtd->module = THIS_MODULE;
+
+ mymtd->owner = THIS_MODULE;
/*
** Supply pointers to lowlvl_parts[] array to add_mtd_partitions()
@@ -525,10 +483,10 @@ static void __exit cleanup_dnpc(void)
del_mtd_partitions(mymtd);
map_destroy(mymtd);
}
- if (dnpc_map.map_priv_1) {
- iounmap((void *)dnpc_map.map_priv_1);
+ if (dnpc_map.virt) {
+ iounmap(dnpc_map.virt);
dnpc_unmap_flash();
- dnpc_map.map_priv_1 = 0;
+ dnpc_map.virt = NULL;
}
}
diff --git a/linux-2.4.x/drivers/mtd/maps/dmv182.c b/linux-2.4.x/drivers/mtd/maps/dmv182.c
new file mode 100644
index 0000000..66a5246
--- /dev/null
+++ b/linux-2.4.x/drivers/mtd/maps/dmv182.c
@@ -0,0 +1,149 @@
+
+/*
+ * drivers/mtd/maps/svme182.c
+ *
+ * Flash map driver for the Dy4 SVME182 board
+ *
+ * $Id: dmv182.c,v 1.7 2006/03/29 08:31:11 dwmw2 Exp $
+ *
+ * Copyright 2003-2004, TimeSys Corporation
+ *
+ * Based on the SVME181 flash map, by Tom Nelson, Dot4, Inc. for TimeSys Corp.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <asm/io.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/partitions.h>
+#include <linux/errno.h>
+
+/*
+ * This driver currently handles only the 16MiB user flash bank 1 on the
+ * board. It does not provide access to bank 0 (contains the Dy4 FFW), bank 2
+ * (VxWorks boot), or the optional 48MiB expansion flash.
+ *
+ * scott.wood@timesys.com: On the newer boards with 128MiB flash, it
+ * now supports the first 96MiB (the boot flash bank containing FFW
+ * is excluded). The VxWorks loader is in partition 1.
+ */
+
+#define FLASH_BASE_ADDR 0xf0000000
+#define FLASH_BANK_SIZE (128*1024*1024)
+
+MODULE_AUTHOR("Scott Wood, TimeSys Corporation <scott.wood@timesys.com>");
+MODULE_DESCRIPTION("User-programmable flash device on the Dy4 SVME182 board");
+MODULE_LICENSE("GPL");
+
+static struct map_info svme182_map = {
+ .name = "Dy4 SVME182",
+ .bankwidth = 32,
+ .size = 128 * 1024 * 1024
+};
+
+#define BOOTIMAGE_PART_SIZE ((6*1024*1024)-RESERVED_PART_SIZE)
+
+// Allow 6MiB for the kernel
+#define NEW_BOOTIMAGE_PART_SIZE (6 * 1024 * 1024)
+// Allow 1MiB for the bootloader
+#define NEW_BOOTLOADER_PART_SIZE (1024 * 1024)
+// Use the remaining 9MiB at the end of flash for the RFS
+#define NEW_RFS_PART_SIZE (0x01000000 - NEW_BOOTLOADER_PART_SIZE - \
+ NEW_BOOTIMAGE_PART_SIZE)
+
+static struct mtd_partition svme182_partitions[] = {
+ // The Lower PABS is only 128KiB, but the partition code doesn't
+ // like partitions that don't end on the largest erase block
+ // size of the device, even if all of the erase blocks in the
+ // partition are small ones. The hardware should prevent
+ // writes to the actual PABS areas.
+ {
+ name: "Lower PABS and CPU 0 bootloader or kernel",
+ size: 6*1024*1024,
+ offset: 0,
+ },
+ {
+ name: "Root Filesystem",
+ size: 10*1024*1024,
+ offset: MTDPART_OFS_NXTBLK
+ },
+ {
+ name: "CPU1 Bootloader",
+ size: 1024*1024,
+ offset: MTDPART_OFS_NXTBLK,
+ },
+ {
+ name: "Extra",
+ size: 110*1024*1024,
+ offset: MTDPART_OFS_NXTBLK
+ },
+ {
+ name: "Foundation Firmware and Upper PABS",
+ size: 1024*1024,
+ offset: MTDPART_OFS_NXTBLK,
+ mask_flags: MTD_WRITEABLE // read-only
+ }
+};
+
+static struct mtd_info *this_mtd;
+
+static int __init init_svme182(void)
+{
+ struct mtd_partition *partitions;
+ int num_parts = ARRAY_SIZE(svme182_partitions);
+
+ partitions = svme182_partitions;
+
+ svme182_map.virt = ioremap(FLASH_BASE_ADDR, svme182_map.size);
+
+ if (svme182_map.virt == 0) {
+ printk("Failed to ioremap FLASH memory area.\n");
+ return -EIO;
+ }
+
+ simple_map_init(&svme182_map);
+
+ this_mtd = do_map_probe("cfi_probe", &svme182_map);
+ if (!this_mtd)
+ {
+ iounmap((void *)svme182_map.virt);
+ return -ENXIO;
+ }
+
+ printk(KERN_NOTICE "SVME182 flash device: %dMiB at 0x%08x\n",
+ this_mtd->size >> 20, FLASH_BASE_ADDR);
+
+ this_mtd->owner = THIS_MODULE;
+ add_mtd_partitions(this_mtd, partitions, num_parts);
+
+ return 0;
+}
+
+static void __exit cleanup_svme182(void)
+{
+ if (this_mtd)
+ {
+ del_mtd_partitions(this_mtd);
+ map_destroy(this_mtd);
+ }
+
+ if (svme182_map.virt)
+ {
+ iounmap((void *)svme182_map.virt);
+ svme182_map.virt = 0;
+ }
+
+ return;
+}
+
+module_init(init_svme182);
+module_exit(cleanup_svme182);
diff --git a/linux-2.4.x/drivers/mtd/maps/ebony.c b/linux-2.4.x/drivers/mtd/maps/ebony.c
new file mode 100644
index 0000000..aa229c7
--- /dev/null
+++ b/linux-2.4.x/drivers/mtd/maps/ebony.c
@@ -0,0 +1,162 @@
+/*
+ * $Id: ebony.c,v 1.17 2005/11/29 20:01:28 gleixner Exp $
+ *
+ * Mapping for Ebony user flash
+ *
+ * Matt Porter <mporter@kernel.crashing.org>
+ *
+ * Copyright 2002-2004 MontaVista Software Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/partitions.h>
+#include <linux/config.h>
+#include <asm/io.h>
+#include <asm/ibm44x.h>
+#include <platforms/4xx/ebony.h>
+
+static struct mtd_info *flash;
+
+static struct map_info ebony_small_map = {
+ .name = "Ebony small flash",
+ .size = EBONY_SMALL_FLASH_SIZE,
+ .bankwidth = 1,
+};
+
+static struct map_info ebony_large_map = {
+ .name = "Ebony large flash",
+ .size = EBONY_LARGE_FLASH_SIZE,
+ .bankwidth = 1,
+};
+
+static struct mtd_partition ebony_small_partitions[] = {
+ {
+ .name = "OpenBIOS",
+ .offset = 0x0,
+ .size = 0x80000,
+ }
+};
+
+static struct mtd_partition ebony_large_partitions[] = {
+ {
+ .name = "fs",
+ .offset = 0,
+ .size = 0x380000,
+ },
+ {
+ .name = "firmware",
+ .offset = 0x380000,
+ .size = 0x80000,
+ }
+};
+
+int __init init_ebony(void)
+{
+ u8 fpga0_reg;
+ u8 __iomem *fpga0_adr;
+ unsigned long long small_flash_base, large_flash_base;
+
+ fpga0_adr = ioremap64(EBONY_FPGA_ADDR, 16);
+ if (!fpga0_adr)
+ return -ENOMEM;
+
+ fpga0_reg = readb(fpga0_adr);
+ iounmap(fpga0_adr);
+
+ if (EBONY_BOOT_SMALL_FLASH(fpga0_reg) &&
+ !EBONY_FLASH_SEL(fpga0_reg))
+ small_flash_base = EBONY_SMALL_FLASH_HIGH2;
+ else if (EBONY_BOOT_SMALL_FLASH(fpga0_reg) &&
+ EBONY_FLASH_SEL(fpga0_reg))
+ small_flash_base = EBONY_SMALL_FLASH_HIGH1;
+ else if (!EBONY_BOOT_SMALL_FLASH(fpga0_reg) &&
+ !EBONY_FLASH_SEL(fpga0_reg))
+ small_flash_base = EBONY_SMALL_FLASH_LOW2;
+ else
+ small_flash_base = EBONY_SMALL_FLASH_LOW1;
+
+ if (EBONY_BOOT_SMALL_FLASH(fpga0_reg) &&
+ !EBONY_ONBRD_FLASH_EN(fpga0_reg))
+ large_flash_base = EBONY_LARGE_FLASH_LOW;
+ else
+ large_flash_base = EBONY_LARGE_FLASH_HIGH;
+
+ ebony_small_map.phys = small_flash_base;
+ ebony_small_map.virt = ioremap64(small_flash_base,
+ ebony_small_map.size);
+
+ if (!ebony_small_map.virt) {
+ printk("Failed to ioremap flash\n");
+ return -EIO;
+ }
+
+ simple_map_init(&ebony_small_map);
+
+ flash = do_map_probe("jedec_probe", &ebony_small_map);
+ if (flash) {
+ flash->owner = THIS_MODULE;
+ add_mtd_partitions(flash, ebony_small_partitions,
+ ARRAY_SIZE(ebony_small_partitions));
+ } else {
+ printk("map probe failed for flash\n");
+ return -ENXIO;
+ }
+
+ ebony_large_map.phys = large_flash_base;
+ ebony_large_map.virt = ioremap64(large_flash_base,
+ ebony_large_map.size);
+
+ if (!ebony_large_map.virt) {
+ printk("Failed to ioremap flash\n");
+ return -EIO;
+ }
+
+ simple_map_init(&ebony_large_map);
+
+ flash = do_map_probe("jedec_probe", &ebony_large_map);
+ if (flash) {
+ flash->owner = THIS_MODULE;
+ add_mtd_partitions(flash, ebony_large_partitions,
+ ARRAY_SIZE(ebony_large_partitions));
+ } else {
+ printk("map probe failed for flash\n");
+ return -ENXIO;
+ }
+
+ return 0;
+}
+
+static void __exit cleanup_ebony(void)
+{
+ if (flash) {
+ del_mtd_partitions(flash);
+ map_destroy(flash);
+ }
+
+ if (ebony_small_map.virt) {
+ iounmap(ebony_small_map.virt);
+ ebony_small_map.virt = NULL;
+ }
+
+ if (ebony_large_map.virt) {
+ iounmap(ebony_large_map.virt);
+ ebony_large_map.virt = NULL;
+ }
+}
+
+module_init(init_ebony);
+module_exit(cleanup_ebony);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Matt Porter <mporter@kernel.crashing.org>");
+MODULE_DESCRIPTION("MTD map and partitions for IBM 440GP Ebony boards");
diff --git a/linux-2.4.x/drivers/mtd/maps/edb7312.c b/linux-2.4.x/drivers/mtd/maps/edb7312.c
new file mode 100644
index 0000000..b48a347
--- /dev/null
+++ b/linux-2.4.x/drivers/mtd/maps/edb7312.c
@@ -0,0 +1,147 @@
+/*
+ * $Id: edb7312.c,v 1.14 2005/11/07 11:14:27 gleixner Exp $
+ *
+ * Handle mapping of the NOR flash on Cogent EDB7312 boards
+ *
+ * Copyright 2002 SYSGO Real-Time Solutions GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <asm/io.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/map.h>
+#include <linux/config.h>
+
+#ifdef CONFIG_MTD_PARTITIONS
+#include <linux/mtd/partitions.h>
+#endif
+
+#define WINDOW_ADDR 0x00000000 /* physical properties of flash */
+#define WINDOW_SIZE 0x01000000
+#define BUSWIDTH 2
+#define FLASH_BLOCKSIZE_MAIN 0x20000
+#define FLASH_NUMBLOCKS_MAIN 128
+/* can be "cfi_probe", "jedec_probe", "map_rom", NULL }; */
+#define PROBETYPES { "cfi_probe", NULL }
+
+#define MSG_PREFIX "EDB7312-NOR:" /* prefix for our printk()'s */
+#define MTDID "edb7312-nor" /* for mtdparts= partitioning */
+
+static struct mtd_info *mymtd;
+
+struct map_info edb7312nor_map = {
+ .name = "NOR flash on EDB7312",
+ .size = WINDOW_SIZE,
+ .bankwidth = BUSWIDTH,
+ .phys = WINDOW_ADDR,
+};
+
+#ifdef CONFIG_MTD_PARTITIONS
+
+/*
+ * MTD partitioning stuff
+ */
+static struct mtd_partition static_partitions[3] =
+{
+ {
+ .name = "ARMboot",
+ .size = 0x40000,
+ .offset = 0
+ },
+ {
+ .name = "Kernel",
+ .size = 0x200000,
+ .offset = 0x40000
+ },
+ {
+ .name = "RootFS",
+ .size = 0xDC0000,
+ .offset = 0x240000
+ },
+};
+
+static const char *probes[] = { "RedBoot", "cmdlinepart", NULL };
+
+#endif
+
+static int mtd_parts_nb = 0;
+static struct mtd_partition *mtd_parts = 0;
+
+int __init init_edb7312nor(void)
+{
+ static const char *rom_probe_types[] = PROBETYPES;
+ const char **type;
+ const char *part_type = 0;
+
+ printk(KERN_NOTICE MSG_PREFIX "0x%08x at 0x%08x\n",
+ WINDOW_SIZE, WINDOW_ADDR);
+ edb7312nor_map.virt = ioremap(WINDOW_ADDR, WINDOW_SIZE);
+
+ if (!edb7312nor_map.virt) {
+ printk(MSG_PREFIX "failed to ioremap\n");
+ return -EIO;
+ }
+
+ simple_map_init(&edb7312nor_map);
+
+ mymtd = 0;
+ type = rom_probe_types;
+ for(; !mymtd && *type; type++) {
+ mymtd = do_map_probe(*type, &edb7312nor_map);
+ }
+ if (mymtd) {
+ mymtd->owner = THIS_MODULE;
+
+#ifdef CONFIG_MTD_PARTITIONS
+ mtd_parts_nb = parse_mtd_partitions(mymtd, probes, &mtd_parts, MTDID);
+ if (mtd_parts_nb > 0)
+ part_type = "detected";
+
+ if (mtd_parts_nb == 0)
+ {
+ mtd_parts = static_partitions;
+ mtd_parts_nb = ARRAY_SIZE(static_partitions);
+ part_type = "static";
+ }
+#endif
+ add_mtd_device(mymtd);
+ if (mtd_parts_nb == 0)
+ printk(KERN_NOTICE MSG_PREFIX "no partition info available\n");
+ else
+ {
+ printk(KERN_NOTICE MSG_PREFIX
+ "using %s partition definition\n", part_type);
+ add_mtd_partitions(mymtd, mtd_parts, mtd_parts_nb);
+ }
+ return 0;
+ }
+
+ iounmap((void *)edb7312nor_map.virt);
+ return -ENXIO;
+}
+
+static void __exit cleanup_edb7312nor(void)
+{
+ if (mymtd) {
+ del_mtd_device(mymtd);
+ map_destroy(mymtd);
+ }
+ if (edb7312nor_map.virt) {
+ iounmap((void *)edb7312nor_map.virt);
+ edb7312nor_map.virt = 0;
+ }
+}
+
+module_init(init_edb7312nor);
+module_exit(cleanup_edb7312nor);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Marius Groeger <mag@sysgo.de>");
+MODULE_DESCRIPTION("Generic configurable MTD map driver");
diff --git a/linux-2.4.x/drivers/mtd/maps/epxa10db-flash.c b/linux-2.4.x/drivers/mtd/maps/epxa10db-flash.c
index 5cd8065..265b079 100644
--- a/linux-2.4.x/drivers/mtd/maps/epxa10db-flash.c
+++ b/linux-2.4.x/drivers/mtd/maps/epxa10db-flash.c
@@ -5,7 +5,7 @@
* Copyright (C) 2001 Altera Corporation
* Copyright (C) 2001 Red Hat, Inc.
*
- * $Id: epxa10db-flash.c,v 1.2 2001/12/19 13:00:19 jskov Exp $
+ * $Id: epxa10db-flash.c,v 1.15 2005/11/07 11:14:27 gleixner Exp $
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -26,126 +26,80 @@
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
-#include <asm/io.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+
#include <linux/mtd/mtd.h>
#include <linux/mtd/map.h>
#include <linux/mtd/partitions.h>
+#include <asm/io.h>
#include <asm/hardware.h>
+#ifdef CONFIG_EPXA10DB
+#define BOARD_NAME "EPXA10DB"
+#else
+#define BOARD_NAME "EPXA1DB"
+#endif
+
static int nr_parts = 0;
static struct mtd_partition *parts;
static struct mtd_info *mymtd;
-extern int parse_redboot_partitions(struct mtd_info *, struct mtd_partition **);
-static int epxa10db_default_partitions(struct mtd_info *master, struct mtd_partition **pparts);
-
-static __u8 epxa10db_read8(struct map_info *map, unsigned long ofs)
-{
- return __raw_readb(map->map_priv_1 + ofs);
-}
-
-static __u16 epxa10db_read16(struct map_info *map, unsigned long ofs)
-{
- return __raw_readw(map->map_priv_1 + ofs);
-}
-
-static __u32 epxa10db_read32(struct map_info *map, unsigned long ofs)
-{
- return __raw_readl(map->map_priv_1 + ofs);
-}
-
-static void epxa10db_copy_from(struct map_info *map, void *to, unsigned long from, ssize_t len)
-{
- memcpy_fromio(to, (void *)(map->map_priv_1 + from), len);
-}
-
-static void epxa10db_write8(struct map_info *map, __u8 d, unsigned long adr)
-{
- __raw_writeb(d, map->map_priv_1 + adr);
- mb();
-}
-
-static void epxa10db_write16(struct map_info *map, __u16 d, unsigned long adr)
-{
- __raw_writew(d, map->map_priv_1 + adr);
- mb();
-}
+static int epxa_default_partitions(struct mtd_info *master, struct mtd_partition **pparts);
-static void epxa10db_write32(struct map_info *map, __u32 d, unsigned long adr)
-{
- __raw_writel(d, map->map_priv_1 + adr);
- mb();
-}
-static void epxa10db_copy_to(struct map_info *map, unsigned long to, const void *from, ssize_t len)
-{
- memcpy_toio((void *)(map->map_priv_1 + to), from, len);
-}
-
-
-
-static struct map_info epxa10db_map = {
- name: "EPXA10DB flash",
- size: FLASH_SIZE,
- buswidth: 2,
- read8: epxa10db_read8,
- read16: epxa10db_read16,
- read32: epxa10db_read32,
- copy_from: epxa10db_copy_from,
- write8: epxa10db_write8,
- write16: epxa10db_write16,
- write32: epxa10db_write32,
- copy_to: epxa10db_copy_to
+static struct map_info epxa_map = {
+ .name = "EPXA flash",
+ .size = FLASH_SIZE,
+ .bankwidth = 2,
+ .phys = FLASH_START,
};
+static const char *probes[] = { "RedBoot", "afs", NULL };
-static int __init epxa10db_mtd_init(void)
+static int __init epxa_mtd_init(void)
{
int i;
- printk(KERN_NOTICE "Epxa10db flash device: %x at %x\n", FLASH_SIZE, FLASH_START);
- epxa10db_map.map_priv_1 = (unsigned long)ioremap(FLASH_START, FLASH_SIZE);
- if (!epxa10db_map.map_priv_1) {
- printk("Failed to ioremap Epxa10db flash\n");
+
+ printk(KERN_NOTICE "%s flash device: 0x%x at 0x%x\n", BOARD_NAME, FLASH_SIZE, FLASH_START);
+
+ epxa_map.virt = ioremap(FLASH_START, FLASH_SIZE);
+ if (!epxa_map.virt) {
+ printk("Failed to ioremap %s flash\n",BOARD_NAME);
return -EIO;
}
+ simple_map_init(&epxa_map);
- mymtd = do_map_probe("cfi_probe", &epxa10db_map);
+ mymtd = do_map_probe("cfi_probe", &epxa_map);
if (!mymtd) {
- iounmap((void *)epxa10db_map.map_priv_1);
+ iounmap((void *)epxa_map.virt);
return -ENXIO;
}
- mymtd->module = THIS_MODULE;
+ mymtd->owner = THIS_MODULE;
/* Unlock the flash device. */
- for (i=0; i<mymtd->numeraseregions;i++){
- int j;
- for(j=0;j<mymtd->eraseregions[i].numblocks;j++){
- mymtd->unlock(mymtd,mymtd->eraseregions[i].offset + j * mymtd->eraseregions[i].erasesize,4);
+ if(mymtd->unlock){
+ for (i=0; i<mymtd->numeraseregions;i++){
+ int j;
+ for(j=0;j<mymtd->eraseregions[i].numblocks;j++){
+ mymtd->unlock(mymtd,mymtd->eraseregions[i].offset + j * mymtd->eraseregions[i].erasesize,mymtd->eraseregions[i].erasesize);
+ }
}
}
-#ifdef CONFIG_MTD_REDBOOT_PARTS
- nr_parts = parse_redboot_partitions(mymtd, &parts);
+#ifdef CONFIG_MTD_PARTITIONS
+ nr_parts = parse_mtd_partitions(mymtd, probes, &parts, 0);
if (nr_parts > 0) {
add_mtd_partitions(mymtd, parts, nr_parts);
return 0;
}
#endif
-#ifdef CONFIG_MTD_AFS_PARTS
- nr_parts = parse_afs_partitions(mymtd, &parts);
-
- if (nr_parts > 0) {
- add_mtd_partitions(mymtd, parts, nr_parts);
- return 0;
- }
-#endif
-
/* No recognised partitioning schemes found - use defaults */
- nr_parts = epxa10db_default_partitions(mymtd, &parts);
+ nr_parts = epxa_default_partitions(mymtd, &parts);
if (nr_parts > 0) {
add_mtd_partitions(mymtd, parts, nr_parts);
return 0;
@@ -156,7 +110,7 @@ static int __init epxa10db_mtd_init(void)
return 0;
}
-static void __exit epxa10db_mtd_cleanup(void)
+static void __exit epxa_mtd_cleanup(void)
{
if (mymtd) {
if (nr_parts)
@@ -165,15 +119,15 @@ static void __exit epxa10db_mtd_cleanup(void)
del_mtd_device(mymtd);
map_destroy(mymtd);
}
- if (epxa10db_map.map_priv_1) {
- iounmap((void *)epxa10db_map.map_priv_1);
- epxa10db_map.map_priv_1 = 0;
+ if (epxa_map.virt) {
+ iounmap((void *)epxa_map.virt);
+ epxa_map.virt = 0;
}
}
-/*
- * This will do for now, once we decide which bootldr we're finally
+/*
+ * This will do for now, once we decide which bootldr we're finally
* going to use then we'll remove this function and do it properly
*
* Partions are currently (as offsets from base of flash):
@@ -181,39 +135,45 @@ static void __exit epxa10db_mtd_cleanup(void)
* 0x00400000 - 0x00FFFFFF - Flashdisk
*/
-static int __init epxa10db_default_partitions(struct mtd_info *master, struct mtd_partition **pparts)
+static int __init epxa_default_partitions(struct mtd_info *master, struct mtd_partition **pparts)
{
struct mtd_partition *parts;
int ret, i;
int npartitions = 0;
- char *names;
+ char *names;
const char *name = "jffs";
- printk("Using default partitions for epxa10db\n");
+ printk("Using default partitions for %s\n",BOARD_NAME);
npartitions=1;
parts = kmalloc(npartitions*sizeof(*parts)+strlen(name), GFP_KERNEL);
+ memzero(parts,npartitions*sizeof(*parts)+strlen(name));
if (!parts) {
ret = -ENOMEM;
goto out;
}
i=0;
- names = (char *)&parts[npartitions];
+ names = (char *)&parts[npartitions];
parts[i].name = names;
names += strlen(name) + 1;
strcpy(parts[i].name, name);
+#ifdef CONFIG_EPXA10DB
parts[i].size = FLASH_SIZE-0x00400000;
parts[i].offset = 0x00400000;
- parts[i].mask_flags = 0;
+#else
+ parts[i].size = FLASH_SIZE-0x00180000;
+ parts[i].offset = 0x00180000;
+#endif
out:
*pparts = parts;
return npartitions;
}
-module_init(epxa10db_mtd_init);
-module_exit(epxa10db_mtd_cleanup);
+
+module_init(epxa_mtd_init);
+module_exit(epxa_mtd_cleanup);
MODULE_AUTHOR("Clive Davies");
-MODULE_DESCRIPTION("Altera epxa10db mtd flash map");
+MODULE_DESCRIPTION("Altera epxa mtd flash map");
MODULE_LICENSE("GPL");
diff --git a/linux-2.4.x/drivers/mtd/maps/fortunet.c b/linux-2.4.x/drivers/mtd/maps/fortunet.c
new file mode 100644
index 0000000..c6bf4e1
--- /dev/null
+++ b/linux-2.4.x/drivers/mtd/maps/fortunet.c
@@ -0,0 +1,274 @@
+/* fortunet.c memory map
+ *
+ * $Id: fortunet.c,v 1.11 2005/11/07 11:14:27 gleixner Exp $
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/string.h>
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/partitions.h>
+
+#include <asm/io.h>
+
+#define MAX_NUM_REGIONS 4
+#define MAX_NUM_PARTITIONS 8
+
+#define DEF_WINDOW_ADDR_PHY 0x00000000
+#define DEF_WINDOW_SIZE 0x00800000 // 8 Mega Bytes
+
+#define MTD_FORTUNET_PK "MTD FortuNet: "
+
+#define MAX_NAME_SIZE 128
+
+struct map_region
+{
+ int window_addr_physical;
+ int altbankwidth;
+ struct map_info map_info;
+ struct mtd_info *mymtd;
+ struct mtd_partition parts[MAX_NUM_PARTITIONS];
+ char map_name[MAX_NAME_SIZE];
+ char parts_name[MAX_NUM_PARTITIONS][MAX_NAME_SIZE];
+};
+
+static struct map_region map_regions[MAX_NUM_REGIONS];
+static int map_regions_set[MAX_NUM_REGIONS] = {0,0,0,0};
+static int map_regions_parts[MAX_NUM_REGIONS] = {0,0,0,0};
+
+
+
+struct map_info default_map = {
+ .size = DEF_WINDOW_SIZE,
+ .bankwidth = 4,
+};
+
+static char * __init get_string_option(char *dest,int dest_size,char *sor)
+{
+ if(!dest_size)
+ return sor;
+ dest_size--;
+ while(*sor)
+ {
+ if(*sor==',')
+ {
+ sor++;
+ break;
+ }
+ else if(*sor=='\"')
+ {
+ sor++;
+ while(*sor)
+ {
+ if(*sor=='\"')
+ {
+ sor++;
+ break;
+ }
+ *dest = *sor;
+ dest++;
+ sor++;
+ dest_size--;
+ if(!dest_size)
+ {
+ *dest = 0;
+ return sor;
+ }
+ }
+ }
+ else
+ {
+ *dest = *sor;
+ dest++;
+ sor++;
+ dest_size--;
+ if(!dest_size)
+ {
+ *dest = 0;
+ return sor;
+ }
+ }
+ }
+ *dest = 0;
+ return sor;
+}
+
+static int __init MTD_New_Region(char *line)
+{
+ char string[MAX_NAME_SIZE];
+ int params[6];
+ get_options (get_string_option(string,sizeof(string),line),6,params);
+ if(params[0]<1)
+ {
+ printk(MTD_FORTUNET_PK "Bad parameters for MTD Region "
+ " name,region-number[,base,size,bankwidth,altbankwidth]\n");
+ return 1;
+ }
+ if((params[1]<0)||(params[1]>=MAX_NUM_REGIONS))
+ {
+ printk(MTD_FORTUNET_PK "Bad region index of %d only have 0..%u regions\n",
+ params[1],MAX_NUM_REGIONS-1);
+ return 1;
+ }
+ memset(&map_regions[params[1]],0,sizeof(map_regions[params[1]]));
+ memcpy(&map_regions[params[1]].map_info,
+ &default_map,sizeof(map_regions[params[1]].map_info));
+ map_regions_set[params[1]] = 1;
+ map_regions[params[1]].window_addr_physical = DEF_WINDOW_ADDR_PHY;
+ map_regions[params[1]].altbankwidth = 2;
+ map_regions[params[1]].mymtd = NULL;
+ map_regions[params[1]].map_info.name = map_regions[params[1]].map_name;
+ strcpy(map_regions[params[1]].map_info.name,string);
+ if(params[0]>1)
+ {
+ map_regions[params[1]].window_addr_physical = params[2];
+ }
+ if(params[0]>2)
+ {
+ map_regions[params[1]].map_info.size = params[3];
+ }
+ if(params[0]>3)
+ {
+ map_regions[params[1]].map_info.bankwidth = params[4];
+ }
+ if(params[0]>4)
+ {
+ map_regions[params[1]].altbankwidth = params[5];
+ }
+ return 1;
+}
+
+static int __init MTD_New_Partition(char *line)
+{
+ char string[MAX_NAME_SIZE];
+ int params[4];
+ get_options (get_string_option(string,sizeof(string),line),4,params);
+ if(params[0]<3)
+ {
+ printk(MTD_FORTUNET_PK "Bad parameters for MTD Partition "
+ " name,region-number,size,offset\n");
+ return 1;
+ }
+ if((params[1]<0)||(params[1]>=MAX_NUM_REGIONS))
+ {
+ printk(MTD_FORTUNET_PK "Bad region index of %d only have 0..%u regions\n",
+ params[1],MAX_NUM_REGIONS-1);
+ return 1;
+ }
+ if(map_regions_parts[params[1]]>=MAX_NUM_PARTITIONS)
+ {
+ printk(MTD_FORTUNET_PK "Out of space for partition in this region\n");
+ return 1;
+ }
+ map_regions[params[1]].parts[map_regions_parts[params[1]]].name =
+ map_regions[params[1]]. parts_name[map_regions_parts[params[1]]];
+ strcpy(map_regions[params[1]].parts[map_regions_parts[params[1]]].name,string);
+ map_regions[params[1]].parts[map_regions_parts[params[1]]].size =
+ params[2];
+ map_regions[params[1]].parts[map_regions_parts[params[1]]].offset =
+ params[3];
+ map_regions[params[1]].parts[map_regions_parts[params[1]]].mask_flags = 0;
+ map_regions_parts[params[1]]++;
+ return 1;
+}
+
+__setup("MTD_Region=", MTD_New_Region);
+__setup("MTD_Partition=", MTD_New_Partition);
+
+/* Backwards-spelling-compatibility */
+__setup("MTD_Partion=", MTD_New_Partition);
+
+int __init init_fortunet(void)
+{
+ int ix,iy;
+ for(iy=ix=0;ix<MAX_NUM_REGIONS;ix++)
+ {
+ if(map_regions_parts[ix]&&(!map_regions_set[ix]))
+ {
+ printk(MTD_FORTUNET_PK "Region %d is not setup (Setting to default)\n",
+ ix);
+ memset(&map_regions[ix],0,sizeof(map_regions[ix]));
+ memcpy(&map_regions[ix].map_info,&default_map,
+ sizeof(map_regions[ix].map_info));
+ map_regions_set[ix] = 1;
+ map_regions[ix].window_addr_physical = DEF_WINDOW_ADDR_PHY;
+ map_regions[ix].altbankwidth = 2;
+ map_regions[ix].mymtd = NULL;
+ map_regions[ix].map_info.name = map_regions[ix].map_name;
+ strcpy(map_regions[ix].map_info.name,"FORTUNET");
+ }
+ if(map_regions_set[ix])
+ {
+ iy++;
+ printk(KERN_NOTICE MTD_FORTUNET_PK "%s flash device at physically "
+ " address %x size %x\n",
+ map_regions[ix].map_info.name,
+ map_regions[ix].window_addr_physical,
+ map_regions[ix].map_info.size);
+
+ map_regions[ix].map_info.phys = map_regions[ix].window_addr_physical,
+
+ map_regions[ix].map_info.virt =
+ ioremap_nocache(
+ map_regions[ix].window_addr_physical,
+ map_regions[ix].map_info.size);
+ if(!map_regions[ix].map_info.virt)
+ {
+ printk(MTD_FORTUNET_PK "%s flash failed to ioremap!\n",
+ map_regions[ix].map_info.name);
+ return -ENXIO;
+ }
+ simple_map_init(&map_regions[ix].map_info);
+
+ printk(KERN_NOTICE MTD_FORTUNET_PK "%s flash is virtually at: %x\n",
+ map_regions[ix].map_info.name,
+ map_regions[ix].map_info.virt);
+ map_regions[ix].mymtd = do_map_probe("cfi_probe",
+ &map_regions[ix].map_info);
+ if((!map_regions[ix].mymtd)&&(
+ map_regions[ix].altbankwidth!=map_regions[ix].map_info.bankwidth))
+ {
+ printk(KERN_NOTICE MTD_FORTUNET_PK "Trying alternate bankwidth "
+ "for %s flash.\n",
+ map_regions[ix].map_info.name);
+ map_regions[ix].map_info.bankwidth =
+ map_regions[ix].altbankwidth;
+ map_regions[ix].mymtd = do_map_probe("cfi_probe",
+ &map_regions[ix].map_info);
+ }
+ map_regions[ix].mymtd->owner = THIS_MODULE;
+ add_mtd_partitions(map_regions[ix].mymtd,
+ map_regions[ix].parts,map_regions_parts[ix]);
+ }
+ }
+ if(iy)
+ return 0;
+ return -ENXIO;
+}
+
+static void __exit cleanup_fortunet(void)
+{
+ int ix;
+ for(ix=0;ix<MAX_NUM_REGIONS;ix++)
+ {
+ if(map_regions_set[ix])
+ {
+ if( map_regions[ix].mymtd )
+ {
+ del_mtd_partitions( map_regions[ix].mymtd );
+ map_destroy( map_regions[ix].mymtd );
+ }
+ iounmap((void *)map_regions[ix].map_info.virt);
+ }
+ }
+}
+
+module_init(init_fortunet);
+module_exit(cleanup_fortunet);
+
+MODULE_AUTHOR("FortuNet, Inc.");
+MODULE_DESCRIPTION("MTD map driver for FortuNet boards");
diff --git a/linux-2.4.x/drivers/mtd/maps/h720x-flash.c b/linux-2.4.x/drivers/mtd/maps/h720x-flash.c
new file mode 100644
index 0000000..d0b4880
--- /dev/null
+++ b/linux-2.4.x/drivers/mtd/maps/h720x-flash.c
@@ -0,0 +1,144 @@
+/*
+ * Flash memory access on Hynix GMS30C7201/HMS30C7202 based
+ * evaluation boards
+ *
+ * $Id: h720x-flash.c,v 1.13 2006/03/29 08:31:11 dwmw2 Exp $
+ *
+ * (C) 2002 Jungjun Kim <jungjun.kim@hynix.com>
+ * 2003 Thomas Gleixner <tglx@linutronix.de>
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/partitions.h>
+#include <asm/hardware.h>
+#include <asm/io.h>
+
+static struct mtd_info *mymtd;
+
+static struct map_info h720x_map = {
+ .name = "H720X",
+ .bankwidth = 4,
+ .size = FLASH_SIZE,
+ .phys = FLASH_PHYS,
+};
+
+static struct mtd_partition h720x_partitions[] = {
+ {
+ .name = "ArMon",
+ .size = 0x00080000,
+ .offset = 0,
+ .mask_flags = MTD_WRITEABLE
+ },{
+ .name = "Env",
+ .size = 0x00040000,
+ .offset = 0x00080000,
+ .mask_flags = MTD_WRITEABLE
+ },{
+ .name = "Kernel",
+ .size = 0x00180000,
+ .offset = 0x000c0000,
+ .mask_flags = MTD_WRITEABLE
+ },{
+ .name = "Ramdisk",
+ .size = 0x00400000,
+ .offset = 0x00240000,
+ .mask_flags = MTD_WRITEABLE
+ },{
+ .name = "jffs2",
+ .size = MTDPART_SIZ_FULL,
+ .offset = MTDPART_OFS_APPEND
+ }
+};
+
+#define NUM_PARTITIONS ARRAY_SIZE(h720x_partitions)
+
+static int nr_mtd_parts;
+static struct mtd_partition *mtd_parts;
+static const char *probes[] = { "cmdlinepart", NULL };
+
+/*
+ * Initialize FLASH support
+ */
+int __init h720x_mtd_init(void)
+{
+
+ char *part_type = NULL;
+
+ h720x_map.virt = ioremap(FLASH_PHYS, FLASH_SIZE);
+
+ if (!h720x_map.virt) {
+ printk(KERN_ERR "H720x-MTD: ioremap failed\n");
+ return -EIO;
+ }
+
+ simple_map_init(&h720x_map);
+
+ // Probe for flash bankwidth 4
+ printk (KERN_INFO "H720x-MTD probing 32bit FLASH\n");
+ mymtd = do_map_probe("cfi_probe", &h720x_map);
+ if (!mymtd) {
+ printk (KERN_INFO "H720x-MTD probing 16bit FLASH\n");
+ // Probe for bankwidth 2
+ h720x_map.bankwidth = 2;
+ mymtd = do_map_probe("cfi_probe", &h720x_map);
+ }
+
+ if (mymtd) {
+ mymtd->owner = THIS_MODULE;
+
+#ifdef CONFIG_MTD_PARTITIONS
+ nr_mtd_parts = parse_mtd_partitions(mymtd, probes, &mtd_parts, 0);
+ if (nr_mtd_parts > 0)
+ part_type = "command line";
+#endif
+ if (nr_mtd_parts <= 0) {
+ mtd_parts = h720x_partitions;
+ nr_mtd_parts = NUM_PARTITIONS;
+ part_type = "builtin";
+ }
+ printk(KERN_INFO "Using %s partition table\n", part_type);
+ add_mtd_partitions(mymtd, mtd_parts, nr_mtd_parts);
+ return 0;
+ }
+
+ iounmap((void *)h720x_map.virt);
+ return -ENXIO;
+}
+
+/*
+ * Cleanup
+ */
+static void __exit h720x_mtd_cleanup(void)
+{
+
+ if (mymtd) {
+ del_mtd_partitions(mymtd);
+ map_destroy(mymtd);
+ }
+
+ /* Free partition info, if commandline partition was used */
+ if (mtd_parts && (mtd_parts != h720x_partitions))
+ kfree (mtd_parts);
+
+ if (h720x_map.virt) {
+ iounmap((void *)h720x_map.virt);
+ h720x_map.virt = 0;
+ }
+}
+
+
+module_init(h720x_mtd_init);
+module_exit(h720x_mtd_cleanup);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Thomas Gleixner <tglx@linutronix.de>");
+MODULE_DESCRIPTION("MTD map driver for Hynix evaluation boards");
diff --git a/linux-2.4.x/drivers/mtd/maps/ichxrom.c b/linux-2.4.x/drivers/mtd/maps/ichxrom.c
new file mode 100644
index 0000000..6807290
--- /dev/null
+++ b/linux-2.4.x/drivers/mtd/maps/ichxrom.c
@@ -0,0 +1,382 @@
+/*
+ * ichxrom.c
+ *
+ * Normal mappings of chips in physical memory
+ * $Id: ichxrom.c,v 1.20 2005/11/29 20:01:28 gleixner Exp $
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <asm/io.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/cfi.h>
+#include <linux/mtd/flashchip.h>
+#include <linux/config.h>
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+#include <linux/list.h>
+
+#define xstr(s) str(s)
+#define str(s) #s
+#define MOD_NAME xstr(KBUILD_BASENAME)
+
+#define ADDRESS_NAME_LEN 18
+
+#define ROM_PROBE_STEP_SIZE (64*1024) /* 64KiB */
+
+#define BIOS_CNTL 0x4e
+#define FWH_DEC_EN1 0xE3
+#define FWH_DEC_EN2 0xF0
+#define FWH_SEL1 0xE8
+#define FWH_SEL2 0xEE
+
+struct ichxrom_window {
+ void __iomem* virt;
+ unsigned long phys;
+ unsigned long size;
+ struct list_head maps;
+ struct resource rsrc;
+ struct pci_dev *pdev;
+};
+
+struct ichxrom_map_info {
+ struct list_head list;
+ struct map_info map;
+ struct mtd_info *mtd;
+ struct resource rsrc;
+ char map_name[sizeof(MOD_NAME) + 2 + ADDRESS_NAME_LEN];
+};
+
+static struct ichxrom_window ichxrom_window = {
+ .maps = LIST_HEAD_INIT(ichxrom_window.maps),
+};
+
+static void ichxrom_cleanup(struct ichxrom_window *window)
+{
+ struct ichxrom_map_info *map, *scratch;
+ u16 word;
+
+ /* Disable writes through the rom window */
+ pci_read_config_word(window->pdev, BIOS_CNTL, &word);
+ pci_write_config_word(window->pdev, BIOS_CNTL, word & ~1);
+
+ /* Free all of the mtd devices */
+ list_for_each_entry_safe(map, scratch, &window->maps, list) {
+ if (map->rsrc.parent)
+ release_resource(&map->rsrc);
+ del_mtd_device(map->mtd);
+ map_destroy(map->mtd);
+ list_del(&map->list);
+ kfree(map);
+ }
+ if (window->rsrc.parent)
+ release_resource(&window->rsrc);
+ if (window->virt) {
+ iounmap(window->virt);
+ window->virt = NULL;
+ window->phys = 0;
+ window->size = 0;
+ window->pdev = NULL;
+ }
+}
+
+
+static int __devinit ichxrom_init_one (struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ static char *rom_probe_types[] = { "cfi_probe", "jedec_probe", NULL };
+ struct ichxrom_window *window = &ichxrom_window;
+ struct ichxrom_map_info *map = NULL;
+ unsigned long map_top;
+ u8 byte;
+ u16 word;
+
+ /* For now I just handle the ichx and I assume there
+ * are not a lot of resources up at the top of the address
+ * space. It is possible to handle other devices in the
+ * top 16MB but it is very painful. Also since
+ * you can only really attach a FWH to an ICHX there
+ * a number of simplifications you can make.
+ *
+ * Also you can page firmware hubs if an 8MB window isn't enough
+ * but don't currently handle that case either.
+ */
+ window->pdev = pdev;
+
+ /* Find a region continuous to the end of the ROM window */
+ window->phys = 0;
+ pci_read_config_byte(pdev, FWH_DEC_EN1, &byte);
+ if (byte == 0xff) {
+ window->phys = 0xffc00000;
+ pci_read_config_byte(pdev, FWH_DEC_EN2, &byte);
+ if ((byte & 0x0f) == 0x0f) {
+ window->phys = 0xff400000;
+ }
+ else if ((byte & 0x0e) == 0x0e) {
+ window->phys = 0xff500000;
+ }
+ else if ((byte & 0x0c) == 0x0c) {
+ window->phys = 0xff600000;
+ }
+ else if ((byte & 0x08) == 0x08) {
+ window->phys = 0xff700000;
+ }
+ }
+ else if ((byte & 0xfe) == 0xfe) {
+ window->phys = 0xffc80000;
+ }
+ else if ((byte & 0xfc) == 0xfc) {
+ window->phys = 0xffd00000;
+ }
+ else if ((byte & 0xf8) == 0xf8) {
+ window->phys = 0xffd80000;
+ }
+ else if ((byte & 0xf0) == 0xf0) {
+ window->phys = 0xffe00000;
+ }
+ else if ((byte & 0xe0) == 0xe0) {
+ window->phys = 0xffe80000;
+ }
+ else if ((byte & 0xc0) == 0xc0) {
+ window->phys = 0xfff00000;
+ }
+ else if ((byte & 0x80) == 0x80) {
+ window->phys = 0xfff80000;
+ }
+
+ if (window->phys == 0) {
+ printk(KERN_ERR MOD_NAME ": Rom window is closed\n");
+ goto out;
+ }
+ window->phys -= 0x400000UL;
+ window->size = (0xffffffffUL - window->phys) + 1UL;
+
+ /* Enable writes through the rom window */
+ pci_read_config_word(pdev, BIOS_CNTL, &word);
+ if (!(word & 1) && (word & (1<<1))) {
+ /* The BIOS will generate an error if I enable
+ * this device, so don't even try.
+ */
+ printk(KERN_ERR MOD_NAME ": firmware access control, I can't enable writes\n");
+ goto out;
+ }
+ pci_write_config_word(pdev, BIOS_CNTL, word | 1);
+
+ /*
+ * Try to reserve the window mem region. If this fails then
+ * it is likely due to the window being "reseved" by the BIOS.
+ */
+ window->rsrc.name = MOD_NAME;
+ window->rsrc.start = window->phys;
+ window->rsrc.end = window->phys + window->size - 1;
+ window->rsrc.flags = IORESOURCE_MEM | IORESOURCE_BUSY;
+ if (request_resource(&iomem_resource, &window->rsrc)) {
+ window->rsrc.parent = NULL;
+ printk(KERN_DEBUG MOD_NAME
+ ": %s(): Unable to register resource"
+ " 0x%.08lx-0x%.08lx - kernel bug?\n",
+ __func__,
+ window->rsrc.start, window->rsrc.end);
+ }
+
+ /* Map the firmware hub into my address space. */
+ window->virt = ioremap_nocache(window->phys, window->size);
+ if (!window->virt) {
+ printk(KERN_ERR MOD_NAME ": ioremap(%08lx, %08lx) failed\n",
+ window->phys, window->size);
+ goto out;
+ }
+
+ /* Get the first address to look for an rom chip at */
+ map_top = window->phys;
+ if ((window->phys & 0x3fffff) != 0) {
+ map_top = window->phys + 0x400000;
+ }
+#if 1
+ /* The probe sequence run over the firmware hub lock
+ * registers sets them to 0x7 (no access).
+ * Probe at most the last 4M of the address space.
+ */
+ if (map_top < 0xffc00000) {
+ map_top = 0xffc00000;
+ }
+#endif
+ /* Loop through and look for rom chips */
+ while((map_top - 1) < 0xffffffffUL) {
+ struct cfi_private *cfi;
+ unsigned long offset;
+ int i;
+
+ if (!map) {
+ map = kmalloc(sizeof(*map), GFP_KERNEL);
+ }
+ if (!map) {
+ printk(KERN_ERR MOD_NAME ": kmalloc failed");
+ goto out;
+ }
+ memset(map, 0, sizeof(*map));
+ INIT_LIST_HEAD(&map->list);
+ map->map.name = map->map_name;
+ map->map.phys = map_top;
+ offset = map_top - window->phys;
+ map->map.virt = (void __iomem *)
+ (((unsigned long)(window->virt)) + offset);
+ map->map.size = 0xffffffffUL - map_top + 1UL;
+ /* Set the name of the map to the address I am trying */
+ sprintf(map->map_name, "%s @%08lx",
+ MOD_NAME, map->map.phys);
+
+ /* Firmware hubs only use vpp when being programmed
+ * in a factory setting. So in-place programming
+ * needs to use a different method.
+ */
+ for(map->map.bankwidth = 32; map->map.bankwidth;
+ map->map.bankwidth >>= 1)
+ {
+ char **probe_type;
+ /* Skip bankwidths that are not supported */
+ if (!map_bankwidth_supported(map->map.bankwidth))
+ continue;
+
+ /* Setup the map methods */
+ simple_map_init(&map->map);
+
+ /* Try all of the probe methods */
+ probe_type = rom_probe_types;
+ for(; *probe_type; probe_type++) {
+ map->mtd = do_map_probe(*probe_type, &map->map);
+ if (map->mtd)
+ goto found;
+ }
+ }
+ map_top += ROM_PROBE_STEP_SIZE;
+ continue;
+ found:
+ /* Trim the size if we are larger than the map */
+ if (map->mtd->size > map->map.size) {
+ printk(KERN_WARNING MOD_NAME
+ " rom(%u) larger than window(%lu). fixing...\n",
+ map->mtd->size, map->map.size);
+ map->mtd->size = map->map.size;
+ }
+ if (window->rsrc.parent) {
+ /*
+ * Registering the MTD device in iomem may not be possible
+ * if there is a BIOS "reserved" and BUSY range. If this
+ * fails then continue anyway.
+ */
+ map->rsrc.name = map->map_name;
+ map->rsrc.start = map->map.phys;
+ map->rsrc.end = map->map.phys + map->mtd->size - 1;
+ map->rsrc.flags = IORESOURCE_MEM | IORESOURCE_BUSY;
+ if (request_resource(&window->rsrc, &map->rsrc)) {
+ printk(KERN_ERR MOD_NAME
+ ": cannot reserve MTD resource\n");
+ map->rsrc.parent = NULL;
+ }
+ }
+
+ /* Make the whole region visible in the map */
+ map->map.virt = window->virt;
+ map->map.phys = window->phys;
+ cfi = map->map.fldrv_priv;
+ for(i = 0; i < cfi->numchips; i++) {
+ cfi->chips[i].start += offset;
+ }
+
+ /* Now that the mtd devices is complete claim and export it */
+ map->mtd->owner = THIS_MODULE;
+ if (add_mtd_device(map->mtd)) {
+ map_destroy(map->mtd);
+ map->mtd = NULL;
+ goto out;
+ }
+
+
+ /* Calculate the new value of map_top */
+ map_top += map->mtd->size;
+
+ /* File away the map structure */
+ list_add(&map->list, &window->maps);
+ map = NULL;
+ }
+
+ out:
+ /* Free any left over map structures */
+ kfree(map);
+
+ /* See if I have any map structures */
+ if (list_empty(&window->maps)) {
+ ichxrom_cleanup(window);
+ return -ENODEV;
+ }
+ return 0;
+}
+
+
+static void __devexit ichxrom_remove_one (struct pci_dev *pdev)
+{
+ struct ichxrom_window *window = &ichxrom_window;
+ ichxrom_cleanup(window);
+}
+
+static struct pci_device_id ichxrom_pci_tbl[] __devinitdata = {
+ { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_0,
+ PCI_ANY_ID, PCI_ANY_ID, },
+ { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_0,
+ PCI_ANY_ID, PCI_ANY_ID, },
+ { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_0,
+ PCI_ANY_ID, PCI_ANY_ID, },
+ { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0,
+ PCI_ANY_ID, PCI_ANY_ID, },
+ { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_1,
+ PCI_ANY_ID, PCI_ANY_ID, },
+ { 0, },
+};
+
+#if 0
+MODULE_DEVICE_TABLE(pci, ichxrom_pci_tbl);
+
+static struct pci_driver ichxrom_driver = {
+ .name = MOD_NAME,
+ .id_table = ichxrom_pci_tbl,
+ .probe = ichxrom_init_one,
+ .remove = ichxrom_remove_one,
+};
+#endif
+
+static int __init init_ichxrom(void)
+{
+ struct pci_dev *pdev;
+ struct pci_device_id *id;
+
+ pdev = NULL;
+ for (id = ichxrom_pci_tbl; id->vendor; id++) {
+ pdev = pci_find_device(id->vendor, id->device, NULL);
+ if (pdev) {
+ break;
+ }
+ }
+ if (pdev) {
+ return ichxrom_init_one(pdev, &ichxrom_pci_tbl[0]);
+ }
+ return -ENXIO;
+#if 0
+ return pci_register_driver(&ichxrom_driver);
+#endif
+}
+
+static void __exit cleanup_ichxrom(void)
+{
+ ichxrom_remove_one(ichxrom_window.pdev);
+}
+
+module_init(init_ichxrom);
+module_exit(cleanup_ichxrom);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Eric Biederman <ebiederman@lnxi.com>");
+MODULE_DESCRIPTION("MTD map driver for BIOS chips on the ICHX southbridge");
diff --git a/linux-2.4.x/drivers/mtd/maps/impa7.c b/linux-2.4.x/drivers/mtd/maps/impa7.c
new file mode 100644
index 0000000..ba7f403
--- /dev/null
+++ b/linux-2.4.x/drivers/mtd/maps/impa7.c
@@ -0,0 +1,161 @@
+/*
+ * $Id: impa7.c,v 1.14 2005/11/07 11:14:27 gleixner Exp $
+ *
+ * Handle mapping of the NOR flash on implementa A7 boards
+ *
+ * Copyright 2002 SYSGO Real-Time Solutions GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <asm/io.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/map.h>
+#include <linux/config.h>
+
+#ifdef CONFIG_MTD_PARTITIONS
+#include <linux/mtd/partitions.h>
+#endif
+
+#define WINDOW_ADDR0 0x00000000 /* physical properties of flash */
+#define WINDOW_SIZE0 0x00800000
+#define WINDOW_ADDR1 0x10000000 /* physical properties of flash */
+#define WINDOW_SIZE1 0x00800000
+#define NUM_FLASHBANKS 2
+#define BUSWIDTH 4
+
+/* can be { "cfi_probe", "jedec_probe", "map_rom", NULL } */
+#define PROBETYPES { "jedec_probe", NULL }
+
+#define MSG_PREFIX "impA7:" /* prefix for our printk()'s */
+#define MTDID "impa7-%d" /* for mtdparts= partitioning */
+
+static struct mtd_info *impa7_mtd[NUM_FLASHBANKS];
+
+
+static struct map_info impa7_map[NUM_FLASHBANKS] = {
+ {
+ .name = "impA7 NOR Flash Bank #0",
+ .size = WINDOW_SIZE0,
+ .bankwidth = BUSWIDTH,
+ },
+ {
+ .name = "impA7 NOR Flash Bank #1",
+ .size = WINDOW_SIZE1,
+ .bankwidth = BUSWIDTH,
+ },
+};
+
+#ifdef CONFIG_MTD_PARTITIONS
+
+/*
+ * MTD partitioning stuff
+ */
+static struct mtd_partition static_partitions[] =
+{
+ {
+ .name = "FileSystem",
+ .size = 0x800000,
+ .offset = 0x00000000
+ },
+};
+
+static int mtd_parts_nb[NUM_FLASHBANKS];
+static struct mtd_partition *mtd_parts[NUM_FLASHBANKS];
+
+#endif
+
+static const char *probes[] = { "cmdlinepart", NULL };
+
+int __init init_impa7(void)
+{
+ static const char *rom_probe_types[] = PROBETYPES;
+ const char **type;
+ const char *part_type = 0;
+ int i;
+ static struct { u_long addr; u_long size; } pt[NUM_FLASHBANKS] = {
+ { WINDOW_ADDR0, WINDOW_SIZE0 },
+ { WINDOW_ADDR1, WINDOW_SIZE1 },
+ };
+ int devicesfound = 0;
+
+ for(i=0; i<NUM_FLASHBANKS; i++)
+ {
+ printk(KERN_NOTICE MSG_PREFIX "probing 0x%08lx at 0x%08lx\n",
+ pt[i].size, pt[i].addr);
+
+ impa7_map[i].phys = pt[i].addr;
+ impa7_map[i].virt = ioremap(pt[i].addr, pt[i].size);
+ if (!impa7_map[i].virt) {
+ printk(MSG_PREFIX "failed to ioremap\n");
+ return -EIO;
+ }
+ simple_map_init(&impa7_map[i]);
+
+ impa7_mtd[i] = 0;
+ type = rom_probe_types;
+ for(; !impa7_mtd[i] && *type; type++) {
+ impa7_mtd[i] = do_map_probe(*type, &impa7_map[i]);
+ }
+
+ if (impa7_mtd[i]) {
+ impa7_mtd[i]->owner = THIS_MODULE;
+ devicesfound++;
+#ifdef CONFIG_MTD_PARTITIONS
+ mtd_parts_nb[i] = parse_mtd_partitions(impa7_mtd[i],
+ probes,
+ &mtd_parts[i],
+ 0);
+ if (mtd_parts_nb[i] > 0) {
+ part_type = "command line";
+ } else {
+ mtd_parts[i] = static_partitions;
+ mtd_parts_nb[i] = ARRAY_SIZE(static_partitions);
+ part_type = "static";
+ }
+
+ printk(KERN_NOTICE MSG_PREFIX
+ "using %s partition definition\n",
+ part_type);
+ add_mtd_partitions(impa7_mtd[i],
+ mtd_parts[i], mtd_parts_nb[i]);
+#else
+ add_mtd_device(impa7_mtd[i]);
+
+#endif
+ }
+ else
+ iounmap((void *)impa7_map[i].virt);
+ }
+ return devicesfound == 0 ? -ENXIO : 0;
+}
+
+static void __exit cleanup_impa7(void)
+{
+ int i;
+ for (i=0; i<NUM_FLASHBANKS; i++) {
+ if (impa7_mtd[i]) {
+#ifdef CONFIG_MTD_PARTITIONS
+ del_mtd_partitions(impa7_mtd[i]);
+#else
+ del_mtd_device(impa7_mtd[i]);
+#endif
+ map_destroy(impa7_mtd[i]);
+ iounmap((void *)impa7_map[i].virt);
+ impa7_map[i].virt = 0;
+ }
+ }
+}
+
+module_init(init_impa7);
+module_exit(cleanup_impa7);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Pavel Bartusek <pba@sysgo.de>");
+MODULE_DESCRIPTION("MTD map driver for implementa impA7");
diff --git a/linux-2.4.x/drivers/mtd/maps/integrator-flash-v24.c b/linux-2.4.x/drivers/mtd/maps/integrator-flash-v24.c
new file mode 100644
index 0000000..404c61b
--- /dev/null
+++ b/linux-2.4.x/drivers/mtd/maps/integrator-flash-v24.c
@@ -0,0 +1,258 @@
+/*======================================================================
+
+ drivers/mtd/maps/armflash.c: ARM Flash Layout/Partitioning
+
+ Copyright (C) 2000 ARM Limited
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+
+ This is access code for flashes using ARM's flash partitioning
+ standards.
+
+ $Id: integrator-flash-v24.c,v 1.15 2005/11/07 11:14:27 gleixner Exp $
+
+======================================================================*/
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/ioport.h>
+#include <linux/init.h>
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/partitions.h>
+
+#include <asm/hardware.h>
+#include <asm/io.h>
+#include <asm/system.h>
+
+// board specific stuff - sorry, it should be in arch/arm/mach-*.
+#ifdef CONFIG_ARCH_INTEGRATOR
+
+#define FLASH_BASE INTEGRATOR_FLASH_BASE
+#define FLASH_SIZE INTEGRATOR_FLASH_SIZE
+
+#define FLASH_PART_SIZE 0x400000
+
+#define SC_CTRLC (IO_ADDRESS(INTEGRATOR_SC_BASE) + INTEGRATOR_SC_CTRLC_OFFSET)
+#define SC_CTRLS (IO_ADDRESS(INTEGRATOR_SC_BASE) + INTEGRATOR_SC_CTRLS_OFFSET)
+#define EBI_CSR1 (IO_ADDRESS(INTEGRATOR_EBI_BASE) + INTEGRATOR_EBI_CSR1_OFFSET)
+#define EBI_LOCK (IO_ADDRESS(INTEGRATOR_EBI_BASE) + INTEGRATOR_EBI_LOCK_OFFSET)
+
+/*
+ * Initialise the flash access systems:
+ * - Disable VPP
+ * - Assert WP
+ * - Set write enable bit in EBI reg
+ */
+static void armflash_flash_init(void)
+{
+ unsigned int tmp;
+
+ __raw_writel(INTEGRATOR_SC_CTRL_nFLVPPEN | INTEGRATOR_SC_CTRL_nFLWP, SC_CTRLC);
+
+ tmp = __raw_readl(EBI_CSR1) | INTEGRATOR_EBI_WRITE_ENABLE;
+ __raw_writel(tmp, EBI_CSR1);
+
+ if (!(__raw_readl(EBI_CSR1) & INTEGRATOR_EBI_WRITE_ENABLE)) {
+ __raw_writel(0xa05f, EBI_LOCK);
+ __raw_writel(tmp, EBI_CSR1);
+ __raw_writel(0, EBI_LOCK);
+ }
+}
+
+/*
+ * Shutdown the flash access systems:
+ * - Disable VPP
+ * - Assert WP
+ * - Clear write enable bit in EBI reg
+ */
+static void armflash_flash_exit(void)
+{
+ unsigned int tmp;
+
+ __raw_writel(INTEGRATOR_SC_CTRL_nFLVPPEN | INTEGRATOR_SC_CTRL_nFLWP, SC_CTRLC);
+
+ /*
+ * Clear the write enable bit in system controller EBI register.
+ */
+ tmp = __raw_readl(EBI_CSR1) & ~INTEGRATOR_EBI_WRITE_ENABLE;
+ __raw_writel(tmp, EBI_CSR1);
+
+ if (__raw_readl(EBI_CSR1) & INTEGRATOR_EBI_WRITE_ENABLE) {
+ __raw_writel(0xa05f, EBI_LOCK);
+ __raw_writel(tmp, EBI_CSR1);
+ __raw_writel(0, EBI_LOCK);
+ }
+}
+
+static void armflash_flash_wp(int on)
+{
+ unsigned int reg;
+
+ if (on)
+ reg = SC_CTRLC;
+ else
+ reg = SC_CTRLS;
+
+ __raw_writel(INTEGRATOR_SC_CTRL_nFLWP, reg);
+}
+
+static void armflash_set_vpp(struct map_info *map, int on)
+{
+ unsigned int reg;
+
+ if (on)
+ reg = SC_CTRLS;
+ else
+ reg = SC_CTRLC;
+
+ __raw_writel(INTEGRATOR_SC_CTRL_nFLVPPEN, reg);
+}
+#endif
+
+#ifdef CONFIG_ARCH_P720T
+
+#define FLASH_BASE (0x04000000)
+#define FLASH_SIZE (64*1024*1024)
+
+#define FLASH_PART_SIZE (4*1024*1024)
+#define FLASH_BLOCK_SIZE (128*1024)
+
+static void armflash_flash_init(void)
+{
+}
+
+static void armflash_flash_exit(void)
+{
+}
+
+static void armflash_flash_wp(int on)
+{
+}
+
+static void armflash_set_vpp(struct map_info *map, int on)
+{
+}
+#endif
+
+
+static struct map_info armflash_map =
+{
+ .name = "AFS",
+ .set_vpp = armflash_set_vpp,
+ .phys = FLASH_BASE,
+};
+
+static struct mtd_info *mtd;
+static struct mtd_partition *parts;
+static const char *probes[] = { "RedBoot", "afs", NULL };
+
+static int __init armflash_cfi_init(void *base, u_int size)
+{
+ int ret;
+
+ armflash_flash_init();
+ armflash_flash_wp(1);
+
+ /*
+ * look for CFI based flash parts fitted to this board
+ */
+ armflash_map.size = size;
+ armflash_map.bankwidth = 4;
+ armflash_map.virt = (void __iomem *) base;
+
+ simple_map_init(&armflash_map);
+
+ /*
+ * Also, the CFI layer automatically works out what size
+ * of chips we have, and does the necessary identification
+ * for us automatically.
+ */
+ mtd = do_map_probe("cfi_probe", &armflash_map);
+ if (!mtd)
+ return -ENXIO;
+
+ mtd->owner = THIS_MODULE;
+
+ ret = parse_mtd_partitions(mtd, probes, &parts, (void *)0);
+ if (ret > 0) {
+ ret = add_mtd_partitions(mtd, parts, ret);
+ if (ret)
+ printk(KERN_ERR "mtd partition registration "
+ "failed: %d\n", ret);
+ }
+
+ /*
+ * If we got an error, free all resources.
+ */
+ if (ret < 0) {
+ del_mtd_partitions(mtd);
+ map_destroy(mtd);
+ }
+
+ return ret;
+}
+
+static void armflash_cfi_exit(void)
+{
+ if (mtd) {
+ del_mtd_partitions(mtd);
+ map_destroy(mtd);
+ }
+ if (parts)
+ kfree(parts);
+}
+
+static int __init armflash_init(void)
+{
+ int err = -EBUSY;
+ void *base;
+
+ if (request_mem_region(FLASH_BASE, FLASH_SIZE, "flash") == NULL)
+ goto out;
+
+ base = ioremap(FLASH_BASE, FLASH_SIZE);
+ err = -ENOMEM;
+ if (base == NULL)
+ goto release;
+
+ err = armflash_cfi_init(base, FLASH_SIZE);
+ if (err) {
+ iounmap(base);
+release:
+ release_mem_region(FLASH_BASE, FLASH_SIZE);
+ }
+out:
+ return err;
+}
+
+static void __exit armflash_exit(void)
+{
+ armflash_cfi_exit();
+ iounmap((void *)armflash_map.virt);
+ release_mem_region(FLASH_BASE, FLASH_SIZE);
+ armflash_flash_exit();
+}
+
+module_init(armflash_init);
+module_exit(armflash_exit);
+
+MODULE_AUTHOR("ARM Ltd");
+MODULE_DESCRIPTION("ARM Integrator CFI map driver");
+MODULE_LICENSE("GPL");
diff --git a/linux-2.4.x/drivers/mtd/maps/integrator-flash.c b/linux-2.4.x/drivers/mtd/maps/integrator-flash.c
index cd58f8f..4882b6c 100644
--- a/linux-2.4.x/drivers/mtd/maps/integrator-flash.c
+++ b/linux-2.4.x/drivers/mtd/maps/integrator-flash.c
@@ -1,27 +1,28 @@
/*======================================================================
- drivers/mtd/maps/armflash.c: ARM Flash Layout/Partitioning
-
+ drivers/mtd/maps/integrator-flash.c: ARM Integrator flash map driver
+
Copyright (C) 2000 ARM Limited
-
+ Copyright (C) 2003 Deep Blue Solutions Ltd.
+
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
-
+
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
-
+
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-
- This is access code for flashes using ARM's flash partitioning
+
+ This is access code for flashes using ARM's flash partitioning
standards.
- $Id: integrator-flash.c,v 1.7 2001/11/01 20:55:47 rmk Exp $
+ $Id: integrator-flash.c,v 1.21 2005/11/29 20:01:28 gleixner Exp $
======================================================================*/
@@ -31,268 +32,178 @@
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/ioport.h>
+#include <linux/platform_device.h>
#include <linux/init.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/map.h>
#include <linux/mtd/partitions.h>
+#include <asm/mach/flash.h>
#include <asm/hardware.h>
#include <asm/io.h>
#include <asm/system.h>
-extern int parse_afs_partitions(struct mtd_info *, struct mtd_partition **);
-
-// board specific stuff - sorry, it should be in arch/arm/mach-*.
-#ifdef CONFIG_ARCH_INTEGRATOR
-
-#define FLASH_BASE INTEGRATOR_FLASH_BASE
-#define FLASH_SIZE INTEGRATOR_FLASH_SIZE
-
-#define FLASH_PART_SIZE 0x400000
-
-#define SC_CTRLC (IO_ADDRESS(INTEGRATOR_SC_BASE) + INTEGRATOR_SC_CTRLC_OFFSET)
-#define SC_CTRLS (IO_ADDRESS(INTEGRATOR_SC_BASE) + INTEGRATOR_SC_CTRLS_OFFSET)
-#define EBI_CSR1 (IO_ADDRESS(INTEGRATOR_EBI_BASE) + INTEGRATOR_EBI_CSR1_OFFSET)
-#define EBI_LOCK (IO_ADDRESS(INTEGRATOR_EBI_BASE) + INTEGRATOR_EBI_LOCK_OFFSET)
-
-/*
- * Initialise the flash access systems:
- * - Disable VPP
- * - Assert WP
- * - Set write enable bit in EBI reg
- */
-static void armflash_flash_init(void)
-{
- unsigned int tmp;
-
- __raw_writel(INTEGRATOR_SC_CTRL_nFLVPPEN | INTEGRATOR_SC_CTRL_nFLWP, SC_CTRLC);
-
- tmp = __raw_readl(EBI_CSR1) | INTEGRATOR_EBI_WRITE_ENABLE;
- __raw_writel(tmp, EBI_CSR1);
-
- if (!(__raw_readl(EBI_CSR1) & INTEGRATOR_EBI_WRITE_ENABLE)) {
- __raw_writel(0xa05f, EBI_LOCK);
- __raw_writel(tmp, EBI_CSR1);
- __raw_writel(0, EBI_LOCK);
- }
-}
-
-/*
- * Shutdown the flash access systems:
- * - Disable VPP
- * - Assert WP
- * - Clear write enable bit in EBI reg
- */
-static void armflash_flash_exit(void)
-{
- unsigned int tmp;
-
- __raw_writel(INTEGRATOR_SC_CTRL_nFLVPPEN | INTEGRATOR_SC_CTRL_nFLWP, SC_CTRLC);
-
- /*
- * Clear the write enable bit in system controller EBI register.
- */
- tmp = __raw_readl(EBI_CSR1) & ~INTEGRATOR_EBI_WRITE_ENABLE;
- __raw_writel(tmp, EBI_CSR1);
-
- if (__raw_readl(EBI_CSR1) & INTEGRATOR_EBI_WRITE_ENABLE) {
- __raw_writel(0xa05f, EBI_LOCK);
- __raw_writel(tmp, EBI_CSR1);
- __raw_writel(0, EBI_LOCK);
- }
-}
-
-static void armflash_flash_wp(int on)
-{
- unsigned int reg;
-
- if (on)
- reg = SC_CTRLC;
- else
- reg = SC_CTRLS;
-
- __raw_writel(INTEGRATOR_SC_CTRL_nFLWP, reg);
-}
-
-static void armflash_set_vpp(struct map_info *map, int on)
-{
- unsigned int reg;
-
- if (on)
- reg = SC_CTRLS;
- else
- reg = SC_CTRLC;
-
- __raw_writel(INTEGRATOR_SC_CTRL_nFLVPPEN, reg);
-}
-#endif
-
#ifdef CONFIG_ARCH_P720T
-
#define FLASH_BASE (0x04000000)
#define FLASH_SIZE (64*1024*1024)
-
-#define FLASH_PART_SIZE (4*1024*1024)
-#define FLASH_BLOCK_SIZE (128*1024)
-
-static void armflash_flash_init(void)
-{
-}
-
-static void armflash_flash_exit(void)
-{
-}
-
-static void armflash_flash_wp(int on)
-{
-}
-
-static void armflash_set_vpp(struct map_info *map, int on)
-{
-}
#endif
-static __u8 armflash_read8(struct map_info *map, unsigned long ofs)
-{
- return readb(ofs + map->map_priv_2);
-}
+struct armflash_info {
+ struct flash_platform_data *plat;
+ struct resource *res;
+ struct mtd_partition *parts;
+ struct mtd_info *mtd;
+ struct map_info map;
+};
-static __u16 armflash_read16(struct map_info *map, unsigned long ofs)
+static void armflash_set_vpp(struct map_info *map, int on)
{
- return readw(ofs + map->map_priv_2);
-}
+ struct armflash_info *info = container_of(map, struct armflash_info, map);
-static __u32 armflash_read32(struct map_info *map, unsigned long ofs)
-{
- return readl(ofs + map->map_priv_2);
+ if (info->plat && info->plat->set_vpp)
+ info->plat->set_vpp(on);
}
-static void armflash_copy_from(struct map_info *map, void *to, unsigned long from, ssize_t len)
-{
- memcpy(to, (void *) (from + map->map_priv_2), len);
-}
+static const char *probes[] = { "cmdlinepart", "RedBoot", "afs", NULL };
-static void armflash_write8(struct map_info *map, __u8 d, unsigned long adr)
+static int armflash_probe(struct platform_device *dev)
{
- writeb(d, adr + map->map_priv_2);
-}
+ struct flash_platform_data *plat = dev->dev.platform_data;
+ struct resource *res = dev->resource;
+ unsigned int size = res->end - res->start + 1;
+ struct armflash_info *info;
+ int err;
+ void __iomem *base;
-static void armflash_write16(struct map_info *map, __u16 d, unsigned long adr)
-{
- writew(d, adr + map->map_priv_2);
-}
-
-static void armflash_write32(struct map_info *map, __u32 d, unsigned long adr)
-{
- writel(d, adr + map->map_priv_2);
-}
-
-static void armflash_copy_to(struct map_info *map, unsigned long to, const void *from, ssize_t len)
-{
- memcpy((void *) (to + map->map_priv_2), from, len);
-}
+ info = kmalloc(sizeof(struct armflash_info), GFP_KERNEL);
+ if (!info) {
+ err = -ENOMEM;
+ goto out;
+ }
-static struct map_info armflash_map =
-{
- name: "AFS",
- read8: armflash_read8,
- read16: armflash_read16,
- read32: armflash_read32,
- copy_from: armflash_copy_from,
- write8: armflash_write8,
- write16: armflash_write16,
- write32: armflash_write32,
- copy_to: armflash_copy_to,
- set_vpp: armflash_set_vpp,
-};
+ memset(info, 0, sizeof(struct armflash_info));
-static struct mtd_info *mtd;
-static struct mtd_partition *parts;
+ info->plat = plat;
+ if (plat && plat->init) {
+ err = plat->init();
+ if (err)
+ goto no_resource;
+ }
-static int __init armflash_cfi_init(void *base, u_int size)
-{
- int ret;
+ info->res = request_mem_region(res->start, size, "armflash");
+ if (!info->res) {
+ err = -EBUSY;
+ goto no_resource;
+ }
- armflash_flash_init();
- armflash_flash_wp(1);
+ base = ioremap(res->start, size);
+ if (!base) {
+ err = -ENOMEM;
+ goto no_mem;
+ }
/*
* look for CFI based flash parts fitted to this board
*/
- armflash_map.size = size;
- armflash_map.buswidth = 4;
- armflash_map.map_priv_2 = (unsigned long) base;
+ info->map.size = size;
+ info->map.bankwidth = plat->width;
+ info->map.phys = res->start;
+ info->map.virt = base;
+ info->map.name = dev->dev.bus_id;
+ info->map.set_vpp = armflash_set_vpp;
+
+ simple_map_init(&info->map);
/*
* Also, the CFI layer automatically works out what size
* of chips we have, and does the necessary identification
* for us automatically.
*/
- mtd = do_map_probe("cfi_probe", &armflash_map);
- if (!mtd)
- return -ENXIO;
-
- mtd->module = THIS_MODULE;
-
- ret = parse_afs_partitions(mtd, &parts);
- if (ret > 0) {
- ret = add_mtd_partitions(mtd, parts, ret);
- if (ret)
- printk(KERN_ERR "mtd partition registration "
- "failed: %d\n", ret);
+ info->mtd = do_map_probe(plat->map_name, &info->map);
+ if (!info->mtd) {
+ err = -ENXIO;
+ goto no_device;
}
+ info->mtd->owner = THIS_MODULE;
+
+ err = parse_mtd_partitions(info->mtd, probes, &info->parts, 0);
+ if (err > 0) {
+ err = add_mtd_partitions(info->mtd, info->parts, err);
+ if (err)
+ printk(KERN_ERR
+ "mtd partition registration failed: %d\n", err);
+ }
+
+ if (err == 0)
+ platform_set_drvdata(dev, info);
+
/*
* If we got an error, free all resources.
*/
- if (ret < 0) {
- del_mtd_partitions(mtd);
- map_destroy(mtd);
+ if (err < 0) {
+ if (info->mtd) {
+ del_mtd_partitions(info->mtd);
+ map_destroy(info->mtd);
+ }
+ kfree(info->parts);
+
+ no_device:
+ iounmap(base);
+ no_mem:
+ release_mem_region(res->start, size);
+ no_resource:
+ if (plat && plat->exit)
+ plat->exit();
+ kfree(info);
}
-
- return ret;
+ out:
+ return err;
}
-static void armflash_cfi_exit(void)
+static int armflash_remove(struct platform_device *dev)
{
- if (mtd) {
- del_mtd_partitions(mtd);
- map_destroy(mtd);
- }
- if (parts)
- kfree(parts);
-}
+ struct armflash_info *info = platform_get_drvdata(dev);
-static int __init armflash_init(void)
-{
- int err = -EBUSY;
- void *base;
+ platform_set_drvdata(dev, NULL);
- if (request_mem_region(FLASH_BASE, FLASH_SIZE, "flash") == NULL)
- goto out;
+ if (info) {
+ if (info->mtd) {
+ del_mtd_partitions(info->mtd);
+ map_destroy(info->mtd);
+ }
+ kfree(info->parts);
- base = ioremap(FLASH_BASE, FLASH_SIZE);
- err = -ENOMEM;
- if (base == NULL)
- goto release;
+ iounmap(info->map.virt);
+ release_resource(info->res);
+ kfree(info->res);
- err = armflash_cfi_init(base, FLASH_SIZE);
- if (err) {
- iounmap(base);
-release:
- release_mem_region(FLASH_BASE, FLASH_SIZE);
+ if (info->plat && info->plat->exit)
+ info->plat->exit();
+
+ kfree(info);
}
-out:
- return err;
+
+ return 0;
+}
+
+static struct platform_driver armflash_driver = {
+ .probe = armflash_probe,
+ .remove = armflash_remove,
+ .driver = {
+ .name = "armflash",
+ },
+};
+
+static int __init armflash_init(void)
+{
+ return platform_driver_register(&armflash_driver);
}
static void __exit armflash_exit(void)
{
- armflash_cfi_exit();
- iounmap((void *)armflash_map.map_priv_2);
- release_mem_region(FLASH_BASE, FLASH_SIZE);
- armflash_flash_exit();
+ platform_driver_unregister(&armflash_driver);
}
module_init(armflash_init);
diff --git a/linux-2.4.x/drivers/mtd/maps/ipaq-flash.c b/linux-2.4.x/drivers/mtd/maps/ipaq-flash.c
new file mode 100644
index 0000000..a539727
--- /dev/null
+++ b/linux-2.4.x/drivers/mtd/maps/ipaq-flash.c
@@ -0,0 +1,463 @@
+/*
+ * Flash memory access on iPAQ Handhelds (either SA1100 or PXA250 based)
+ *
+ * (C) 2000 Nicolas Pitre <nico@cam.org>
+ * (C) 2002 Hewlett-Packard Company <jamey.hicks@hp.com>
+ * (C) 2003 Christian Pellegrin <chri@ascensit.com>, <chri@infis.univ.ts.it>: concatenation of multiple flashes
+ *
+ * $Id: ipaq-flash.c,v 1.7 2005/11/29 20:01:28 gleixner Exp $
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/spinlock.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <asm/page.h>
+#include <asm/mach-types.h>
+#include <asm/system.h>
+#include <asm/errno.h>
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/partitions.h>
+#ifdef CONFIG_MTD_CONCAT
+#include <linux/mtd/concat.h>
+#endif
+
+#include <asm/hardware.h>
+#include <asm/arch-sa1100/h3600.h>
+#include <asm/io.h>
+
+
+#ifndef CONFIG_IPAQ_HANDHELD
+#error This is for iPAQ Handhelds only
+#endif
+#ifdef CONFIG_SA1100_JORNADA56X
+
+static void jornada56x_set_vpp(struct map_info *map, int vpp)
+{
+ if (vpp)
+ GPSR = GPIO_GPIO26;
+ else
+ GPCR = GPIO_GPIO26;
+ GPDR |= GPIO_GPIO26;
+}
+
+#endif
+
+#ifdef CONFIG_SA1100_JORNADA720
+
+static void jornada720_set_vpp(struct map_info *map, int vpp)
+{
+ if (vpp)
+ PPSR |= 0x80;
+ else
+ PPSR &= ~0x80;
+ PPDR |= 0x80;
+}
+
+#endif
+
+#define MAX_IPAQ_CS 2 /* Number of CS we are going to test */
+
+#define IPAQ_MAP_INIT(X) \
+ { \
+ name: "IPAQ flash " X, \
+ }
+
+
+static struct map_info ipaq_map[MAX_IPAQ_CS] = {
+ IPAQ_MAP_INIT("bank 1"),
+ IPAQ_MAP_INIT("bank 2")
+};
+
+static struct mtd_info *my_sub_mtd[MAX_IPAQ_CS] = {
+ NULL,
+ NULL
+};
+
+/*
+ * Here are partition information for all known IPAQ-based devices.
+ * See include/linux/mtd/partitions.h for definition of the mtd_partition
+ * structure.
+ *
+ * The *_max_flash_size is the maximum possible mapped flash size which
+ * is not necessarily the actual flash size. It must be no more than
+ * the value specified in the "struct map_desc *_io_desc" mapping
+ * definition for the corresponding machine.
+ *
+ * Please keep these in alphabetical order, and formatted as per existing
+ * entries. Thanks.
+ */
+
+#ifdef CONFIG_IPAQ_HANDHELD
+static unsigned long h3xxx_max_flash_size = 0x04000000;
+static struct mtd_partition h3xxx_partitions[] = {
+ {
+ name: "H3XXX boot firmware",
+#ifndef CONFIG_LAB
+ size: 0x00040000,
+#else
+ size: 0x00080000,
+#endif
+ offset: 0,
+#ifndef CONFIG_LAB
+ mask_flags: MTD_WRITEABLE, /* force read-only */
+#endif
+ },
+ {
+ name: "H3XXX root jffs2",
+#ifndef CONFIG_LAB
+ size: 0x2000000 - 2*0x40000, /* Warning, this is fixed later */
+ offset: 0x00040000,
+#else
+ size: 0x2000000 - 0x40000 - 0x80000, /* Warning, this is fixed later */
+ offset: 0x00080000,
+#endif
+ },
+ {
+ name: "asset",
+ size: 0x40000,
+ offset: 0x2000000 - 0x40000, /* Warning, this is fixed later */
+ mask_flags: MTD_WRITEABLE, /* force read-only */
+ }
+};
+
+#ifndef CONFIG_MTD_CONCAT
+static struct mtd_partition h3xxx_partitions_bank2[] = {
+ /* this is used only on 2 CS machines when concat is not present */
+ {
+ name: "second H3XXX root jffs2",
+ size: 0x1000000 - 0x40000, /* Warning, this is fixed later */
+ offset: 0x00000000,
+ },
+ {
+ name: "second asset",
+ size: 0x40000,
+ offset: 0x1000000 - 0x40000, /* Warning, this is fixed later */
+ mask_flags: MTD_WRITEABLE, /* force read-only */
+ }
+};
+#endif
+
+static DEFINE_SPINLOCK(ipaq_vpp_lock);
+
+static void h3xxx_set_vpp(struct map_info *map, int vpp)
+{
+ static int nest = 0;
+
+ spin_lock(&ipaq_vpp_lock);
+ if (vpp)
+ nest++;
+ else
+ nest--;
+ if (nest)
+ assign_h3600_egpio(IPAQ_EGPIO_VPP_ON, 1);
+ else
+ assign_h3600_egpio(IPAQ_EGPIO_VPP_ON, 0);
+ spin_unlock(&ipaq_vpp_lock);
+}
+
+#endif
+
+#if defined(CONFIG_SA1100_JORNADA56X) || defined(CONFIG_SA1100_JORNADA720)
+static unsigned long jornada_max_flash_size = 0x02000000;
+static struct mtd_partition jornada_partitions[] = {
+ {
+ name: "Jornada boot firmware",
+ size: 0x00040000,
+ offset: 0,
+ mask_flags: MTD_WRITEABLE, /* force read-only */
+ }, {
+ name: "Jornada root jffs2",
+ size: MTDPART_SIZ_FULL,
+ offset: 0x00040000,
+ }
+};
+#endif
+
+
+static struct mtd_partition *parsed_parts;
+static struct mtd_info *mymtd;
+
+static unsigned long cs_phys[] = {
+#ifdef CONFIG_ARCH_SA1100
+ SA1100_CS0_PHYS,
+ SA1100_CS1_PHYS,
+ SA1100_CS2_PHYS,
+ SA1100_CS3_PHYS,
+ SA1100_CS4_PHYS,
+ SA1100_CS5_PHYS,
+#else
+ PXA_CS0_PHYS,
+ PXA_CS1_PHYS,
+ PXA_CS2_PHYS,
+ PXA_CS3_PHYS,
+ PXA_CS4_PHYS,
+ PXA_CS5_PHYS,
+#endif
+};
+
+static const char *part_probes[] = { "cmdlinepart", "RedBoot", NULL };
+
+static int __init h1900_special_case(void);
+
+int __init ipaq_mtd_init(void)
+{
+ struct mtd_partition *parts = NULL;
+ int nb_parts = 0;
+ int parsed_nr_parts = 0;
+ const char *part_type;
+ int i; /* used when we have >1 flash chips */
+ unsigned long tot_flashsize = 0; /* used when we have >1 flash chips */
+
+ /* Default flash bankwidth */
+ // ipaq_map.bankwidth = (MSC0 & MSC_RBW) ? 2 : 4;
+
+ if (machine_is_h1900())
+ {
+ /* For our intents, the h1900 is not a real iPAQ, so we special-case it. */
+ return h1900_special_case();
+ }
+
+ if (machine_is_h3100() || machine_is_h1900())
+ for(i=0; i<MAX_IPAQ_CS; i++)
+ ipaq_map[i].bankwidth = 2;
+ else
+ for(i=0; i<MAX_IPAQ_CS; i++)
+ ipaq_map[i].bankwidth = 4;
+
+ /*
+ * Static partition definition selection
+ */
+ part_type = "static";
+
+ simple_map_init(&ipaq_map[0]);
+ simple_map_init(&ipaq_map[1]);
+
+#ifdef CONFIG_IPAQ_HANDHELD
+ if (machine_is_ipaq()) {
+ parts = h3xxx_partitions;
+ nb_parts = ARRAY_SIZE(h3xxx_partitions);
+ for(i=0; i<MAX_IPAQ_CS; i++) {
+ ipaq_map[i].size = h3xxx_max_flash_size;
+ ipaq_map[i].set_vpp = h3xxx_set_vpp;
+ ipaq_map[i].phys = cs_phys[i];
+ ipaq_map[i].virt = ioremap(cs_phys[i], 0x04000000);
+ if (machine_is_h3100 () || machine_is_h1900())
+ ipaq_map[i].bankwidth = 2;
+ }
+ if (machine_is_h3600()) {
+ /* No asset partition here */
+ h3xxx_partitions[1].size += 0x40000;
+ nb_parts--;
+ }
+ }
+#endif
+#ifdef CONFIG_ARCH_H5400
+ if (machine_is_h5400()) {
+ ipaq_map[0].size = 0x02000000;
+ ipaq_map[1].size = 0x02000000;
+ ipaq_map[1].phys = 0x02000000;
+ ipaq_map[1].virt = ipaq_map[0].virt + 0x02000000;
+ }
+#endif
+#ifdef CONFIG_ARCH_H1900
+ if (machine_is_h1900()) {
+ ipaq_map[0].size = 0x00400000;
+ ipaq_map[1].size = 0x02000000;
+ ipaq_map[1].phys = 0x00080000;
+ ipaq_map[1].virt = ipaq_map[0].virt + 0x00080000;
+ }
+#endif
+
+#ifdef CONFIG_SA1100_JORNADA56X
+ if (machine_is_jornada56x()) {
+ parts = jornada_partitions;
+ nb_parts = ARRAY_SIZE(jornada_partitions);
+ ipaq_map[0].size = jornada_max_flash_size;
+ ipaq_map[0].set_vpp = jornada56x_set_vpp;
+ ipaq_map[0].virt = (__u32)ioremap(0x0, 0x04000000);
+ }
+#endif
+#ifdef CONFIG_SA1100_JORNADA720
+ if (machine_is_jornada720()) {
+ parts = jornada_partitions;
+ nb_parts = ARRAY_SIZE(jornada_partitions);
+ ipaq_map[0].size = jornada_max_flash_size;
+ ipaq_map[0].set_vpp = jornada720_set_vpp;
+ }
+#endif
+
+
+ if (machine_is_ipaq()) { /* for iPAQs only */
+ for(i=0; i<MAX_IPAQ_CS; i++) {
+ printk(KERN_NOTICE "iPAQ flash: probing %d-bit flash bus, window=%lx with CFI.\n", ipaq_map[i].bankwidth*8, ipaq_map[i].virt);
+ my_sub_mtd[i] = do_map_probe("cfi_probe", &ipaq_map[i]);
+ if (!my_sub_mtd[i]) {
+ printk(KERN_NOTICE "iPAQ flash: probing %d-bit flash bus, window=%lx with JEDEC.\n", ipaq_map[i].bankwidth*8, ipaq_map[i].virt);
+ my_sub_mtd[i] = do_map_probe("jedec_probe", &ipaq_map[i]);
+ }
+ if (!my_sub_mtd[i]) {
+ printk(KERN_NOTICE "iPAQ flash: failed to find flash.\n");
+ if (i)
+ break;
+ else
+ return -ENXIO;
+ } else
+ printk(KERN_NOTICE "iPAQ flash: found %d bytes\n", my_sub_mtd[i]->size);
+
+ /* do we really need this debugging? --joshua 20030703 */
+ // printk("my_sub_mtd[%d]=%p\n", i, my_sub_mtd[i]);
+ my_sub_mtd[i]->owner = THIS_MODULE;
+ tot_flashsize += my_sub_mtd[i]->size;
+ }
+#ifdef CONFIG_MTD_CONCAT
+ /* fix the asset location */
+# ifdef CONFIG_LAB
+ h3xxx_partitions[1].size = tot_flashsize - 0x40000 - 0x80000 /* extra big boot block */;
+# else
+ h3xxx_partitions[1].size = tot_flashsize - 2 * 0x40000;
+# endif
+ h3xxx_partitions[2].offset = tot_flashsize - 0x40000;
+ /* and concat the devices */
+ mymtd = mtd_concat_create(&my_sub_mtd[0], i,
+ "ipaq");
+ if (!mymtd) {
+ printk("Cannot create iPAQ concat device\n");
+ return -ENXIO;
+ }
+#else
+ mymtd = my_sub_mtd[0];
+
+ /*
+ *In the very near future, command line partition parsing
+ * will use the device name as 'mtd-id' instead of a value
+ * passed to the parse_cmdline_partitions() routine. Since
+ * the bootldr says 'ipaq', make sure it continues to work.
+ */
+ mymtd->name = "ipaq";
+
+ if ((machine_is_h3600())) {
+# ifdef CONFIG_LAB
+ h3xxx_partitions[1].size = my_sub_mtd[0]->size - 0x80000;
+# else
+ h3xxx_partitions[1].size = my_sub_mtd[0]->size - 0x40000;
+# endif
+ nb_parts = 2;
+ } else {
+# ifdef CONFIG_LAB
+ h3xxx_partitions[1].size = my_sub_mtd[0]->size - 0x40000 - 0x80000; /* extra big boot block */
+# else
+ h3xxx_partitions[1].size = my_sub_mtd[0]->size - 2*0x40000;
+# endif
+ h3xxx_partitions[2].offset = my_sub_mtd[0]->size - 0x40000;
+ }
+
+ if (my_sub_mtd[1]) {
+# ifdef CONFIG_LAB
+ h3xxx_partitions_bank2[0].size = my_sub_mtd[1]->size - 0x80000;
+# else
+ h3xxx_partitions_bank2[0].size = my_sub_mtd[1]->size - 0x40000;
+# endif
+ h3xxx_partitions_bank2[1].offset = my_sub_mtd[1]->size - 0x40000;
+ }
+#endif
+ }
+ else {
+ /*
+ * Now let's probe for the actual flash. Do it here since
+ * specific machine settings might have been set above.
+ */
+ printk(KERN_NOTICE "IPAQ flash: probing %d-bit flash bus, window=%lx\n", ipaq_map[0].bankwidth*8, ipaq_map[0].virt);
+ mymtd = do_map_probe("cfi_probe", &ipaq_map[0]);
+ if (!mymtd)
+ return -ENXIO;
+ mymtd->owner = THIS_MODULE;
+ }
+
+
+ /*
+ * Dynamic partition selection stuff (might override the static ones)
+ */
+
+ i = parse_mtd_partitions(mymtd, part_probes, &parsed_parts, 0);
+
+ if (i > 0) {
+ nb_parts = parsed_nr_parts = i;
+ parts = parsed_parts;
+ part_type = "dynamic";
+ }
+
+ if (!parts) {
+ printk(KERN_NOTICE "IPAQ flash: no partition info available, registering whole flash at once\n");
+ add_mtd_device(mymtd);
+#ifndef CONFIG_MTD_CONCAT
+ if (my_sub_mtd[1])
+ add_mtd_device(my_sub_mtd[1]);
+#endif
+ } else {
+ printk(KERN_NOTICE "Using %s partition definition\n", part_type);
+ add_mtd_partitions(mymtd, parts, nb_parts);
+#ifndef CONFIG_MTD_CONCAT
+ if (my_sub_mtd[1])
+ add_mtd_partitions(my_sub_mtd[1], h3xxx_partitions_bank2, ARRAY_SIZE(h3xxx_partitions_bank2));
+#endif
+ }
+
+ return 0;
+}
+
+static void __exit ipaq_mtd_cleanup(void)
+{
+ int i;
+
+ if (mymtd) {
+ del_mtd_partitions(mymtd);
+#ifndef CONFIG_MTD_CONCAT
+ if (my_sub_mtd[1])
+ del_mtd_partitions(my_sub_mtd[1]);
+#endif
+ map_destroy(mymtd);
+#ifdef CONFIG_MTD_CONCAT
+ for(i=0; i<MAX_IPAQ_CS; i++)
+#else
+ for(i=1; i<MAX_IPAQ_CS; i++)
+#endif
+ {
+ if (my_sub_mtd[i])
+ map_destroy(my_sub_mtd[i]);
+ }
+ kfree(parsed_parts);
+ }
+}
+
+static int __init h1900_special_case(void)
+{
+ /* The iPAQ h1900 is a special case - it has weird ROM. */
+ simple_map_init(&ipaq_map[0]);
+ ipaq_map[0].size = 0x80000;
+ ipaq_map[0].set_vpp = h3xxx_set_vpp;
+ ipaq_map[0].phys = 0x0;
+ ipaq_map[0].virt = ioremap(0x0, 0x04000000);
+ ipaq_map[0].bankwidth = 2;
+
+ printk(KERN_NOTICE "iPAQ flash: probing %d-bit flash bus, window=%lx with JEDEC.\n", ipaq_map[0].bankwidth*8, ipaq_map[0].virt);
+ mymtd = do_map_probe("jedec_probe", &ipaq_map[0]);
+ if (!mymtd)
+ return -ENODEV;
+ add_mtd_device(mymtd);
+ printk(KERN_NOTICE "iPAQ flash: registered h1910 flash\n");
+
+ return 0;
+}
+
+module_init(ipaq_mtd_init);
+module_exit(ipaq_mtd_cleanup);
+
+MODULE_AUTHOR("Jamey Hicks");
+MODULE_DESCRIPTION("IPAQ CFI map driver");
+MODULE_LICENSE("MIT");
diff --git a/linux-2.4.x/drivers/mtd/maps/iq80310.c b/linux-2.4.x/drivers/mtd/maps/iq80310.c
index 3a30113..034542b 100644
--- a/linux-2.4.x/drivers/mtd/maps/iq80310.c
+++ b/linux-2.4.x/drivers/mtd/maps/iq80310.c
@@ -1,11 +1,11 @@
/*
- * $Id: iq80310.c,v 1.9 2002/01/01 22:45:02 rmk Exp $
+ * $Id: iq80310.c,v 1.22 2005/11/29 20:01:28 gleixner Exp $
*
* Mapping for the Intel XScale IQ80310 evaluation board
*
* Author: Nicolas Pitre
* Copyright: (C) 2001 MontaVista Software Inc.
- *
+ *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
@@ -14,6 +14,8 @@
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/slab.h>
#include <asm/io.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/map.h>
@@ -26,127 +28,72 @@
static struct mtd_info *mymtd;
-static __u8 iq80310_read8(struct map_info *map, unsigned long ofs)
-{
- return *(__u8 *)(map->map_priv_1 + ofs);
-}
-
-static __u16 iq80310_read16(struct map_info *map, unsigned long ofs)
-{
- return *(__u16 *)(map->map_priv_1 + ofs);
-}
-
-static __u32 iq80310_read32(struct map_info *map, unsigned long ofs)
-{
- return *(__u32 *)(map->map_priv_1 + ofs);
-}
-
-static void iq80310_copy_from(struct map_info *map, void *to, unsigned long from, ssize_t len)
-{
- memcpy(to, (void *)(map->map_priv_1 + from), len);
-}
-
-static void iq80310_write8(struct map_info *map, __u8 d, unsigned long adr)
-{
- *(__u8 *)(map->map_priv_1 + adr) = d;
-}
-
-static void iq80310_write16(struct map_info *map, __u16 d, unsigned long adr)
-{
- *(__u16 *)(map->map_priv_1 + adr) = d;
-}
-
-static void iq80310_write32(struct map_info *map, __u32 d, unsigned long adr)
-{
- *(__u32 *)(map->map_priv_1 + adr) = d;
-}
-
-static void iq80310_copy_to(struct map_info *map, unsigned long to, const void *from, ssize_t len)
-{
- memcpy((void *)(map->map_priv_1 + to), from, len);
-}
-
static struct map_info iq80310_map = {
- name: "IQ80310 flash",
- size: WINDOW_SIZE,
- buswidth: BUSWIDTH,
- read8: iq80310_read8,
- read16: iq80310_read16,
- read32: iq80310_read32,
- copy_from: iq80310_copy_from,
- write8: iq80310_write8,
- write16: iq80310_write16,
- write32: iq80310_write32,
- copy_to: iq80310_copy_to
+ .name = "IQ80310 flash",
+ .size = WINDOW_SIZE,
+ .bankwidth = BUSWIDTH,
+ .phys = WINDOW_ADDR
};
static struct mtd_partition iq80310_partitions[4] = {
{
- name: "Firmware",
- size: 0x00080000,
- offset: 0,
- mask_flags: MTD_WRITEABLE /* force read-only */
+ .name = "Firmware",
+ .size = 0x00080000,
+ .offset = 0,
+ .mask_flags = MTD_WRITEABLE /* force read-only */
},{
- name: "Kernel",
- size: 0x000a0000,
- offset: 0x00080000,
+ .name = "Kernel",
+ .size = 0x000a0000,
+ .offset = 0x00080000,
},{
- name: "Filesystem",
- size: 0x00600000,
- offset: 0x00120000
+ .name = "Filesystem",
+ .size = 0x00600000,
+ .offset = 0x00120000
},{
- name: "RedBoot",
- size: 0x000e0000,
- offset: 0x00720000,
- mask_flags: MTD_WRITEABLE
+ .name = "RedBoot",
+ .size = 0x000e0000,
+ .offset = 0x00720000,
+ .mask_flags = MTD_WRITEABLE
}
};
-#define NB_OF(x) (sizeof(x)/sizeof(x[0]))
-
static struct mtd_info *mymtd;
static struct mtd_partition *parsed_parts;
-
-extern int parse_redboot_partitions(struct mtd_info *master, struct mtd_partition **pparts);
+static const char *probes[] = { "RedBoot", "cmdlinepart", NULL };
static int __init init_iq80310(void)
{
struct mtd_partition *parts;
int nb_parts = 0;
int parsed_nr_parts = 0;
- char *part_type = "static";
+ int ret;
- iq80310_map.map_priv_1 = (unsigned long)ioremap(WINDOW_ADDR, WINDOW_SIZE);
- if (!iq80310_map.map_priv_1) {
+ iq80310_map.virt = ioremap(WINDOW_ADDR, WINDOW_SIZE);
+ if (!iq80310_map.virt) {
printk("Failed to ioremap\n");
return -EIO;
}
+ simple_map_init(&iq80310_map);
+
mymtd = do_map_probe("cfi_probe", &iq80310_map);
if (!mymtd) {
- iounmap((void *)iq80310_map.map_priv_1);
+ iounmap((void *)iq80310_map.virt);
return -ENXIO;
}
- mymtd->module = THIS_MODULE;
+ mymtd->owner = THIS_MODULE;
-#ifdef CONFIG_MTD_REDBOOT_PARTS
- if (parsed_nr_parts == 0) {
- int ret = parse_redboot_partitions(mymtd, &parsed_parts);
+ ret = parse_mtd_partitions(mymtd, probes, &parsed_parts, 0);
- if (ret > 0) {
- part_type = "RedBoot";
- parsed_nr_parts = ret;
- }
- }
-#endif
+ if (ret > 0)
+ parsed_nr_parts = ret;
if (parsed_nr_parts > 0) {
parts = parsed_parts;
nb_parts = parsed_nr_parts;
} else {
parts = iq80310_partitions;
- nb_parts = NB_OF(iq80310_partitions);
+ nb_parts = ARRAY_SIZE(iq80310_partitions);
}
- printk(KERN_NOTICE "Using %s partition definition\n", part_type);
add_mtd_partitions(mymtd, parts, nb_parts);
return 0;
}
@@ -156,11 +103,10 @@ static void __exit cleanup_iq80310(void)
if (mymtd) {
del_mtd_partitions(mymtd);
map_destroy(mymtd);
- if (parsed_parts)
- kfree(parsed_parts);
+ kfree(parsed_parts);
}
- if (iq80310_map.map_priv_1)
- iounmap((void *)iq80310_map.map_priv_1);
+ if (iq80310_map.virt)
+ iounmap((void *)iq80310_map.virt);
}
module_init(init_iq80310);
diff --git a/linux-2.4.x/drivers/mtd/maps/ixp2000.c b/linux-2.4.x/drivers/mtd/maps/ixp2000.c
new file mode 100644
index 0000000..3749231
--- /dev/null
+++ b/linux-2.4.x/drivers/mtd/maps/ixp2000.c
@@ -0,0 +1,275 @@
+/*
+ * $Id: ixp2000.c,v 1.11 2005/11/29 20:01:28 gleixner Exp $
+ *
+ * drivers/mtd/maps/ixp2000.c
+ *
+ * Mapping for the Intel XScale IXP2000 based systems
+ *
+ * Copyright (C) 2002 Intel Corp.
+ * Copyright (C) 2003-2004 MontaVista Software, Inc.
+ *
+ * Original Author: Naeem M Afzal <naeem.m.afzal@intel.com>
+ * Maintainer: Deepak Saxena <dsaxena@plexity.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/ioport.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/partitions.h>
+
+#include <asm/io.h>
+#include <asm/hardware.h>
+#include <asm/mach/flash.h>
+
+#include <linux/reboot.h>
+
+struct ixp2000_flash_info {
+ struct mtd_info *mtd;
+ struct map_info map;
+ struct mtd_partition *partitions;
+ struct resource *res;
+ int nr_banks;
+};
+
+static inline unsigned long flash_bank_setup(struct map_info *map, unsigned long ofs)
+{
+ unsigned long (*set_bank)(unsigned long) =
+ (unsigned long(*)(unsigned long))map->map_priv_2;
+
+ return (set_bank ? set_bank(ofs) : ofs);
+}
+
+#ifdef __ARMEB__
+/*
+ * Rev A0 and A1 of IXP2400 silicon have a broken addressing unit which
+ * causes the lower address bits to be XORed with 0x11 on 8 bit accesses
+ * and XORed with 0x10 on 16 bit accesses. See the spec update, erratum 44.
+ */
+static int erratum44_workaround = 0;
+
+static inline unsigned long address_fix8_write(unsigned long addr)
+{
+ if (erratum44_workaround) {
+ return (addr ^ 3);
+ }
+ return addr;
+}
+#else
+
+#define address_fix8_write(x) (x)
+#endif
+
+static map_word ixp2000_flash_read8(struct map_info *map, unsigned long ofs)
+{
+ map_word val;
+
+ val.x[0] = *((u8 *)(map->map_priv_1 + flash_bank_setup(map, ofs)));
+ return val;
+}
+
+/*
+ * We can't use the standard memcpy due to the broken SlowPort
+ * address translation on rev A0 and A1 silicon and the fact that
+ * we have banked flash.
+ */
+static void ixp2000_flash_copy_from(struct map_info *map, void *to,
+ unsigned long from, ssize_t len)
+{
+ from = flash_bank_setup(map, from);
+ while(len--)
+ *(__u8 *) to++ = *(__u8 *)(map->map_priv_1 + from++);
+}
+
+static void ixp2000_flash_write8(struct map_info *map, map_word d, unsigned long ofs)
+{
+ *(__u8 *) (address_fix8_write(map->map_priv_1 +
+ flash_bank_setup(map, ofs))) = d.x[0];
+}
+
+static void ixp2000_flash_copy_to(struct map_info *map, unsigned long to,
+ const void *from, ssize_t len)
+{
+ to = flash_bank_setup(map, to);
+ while(len--) {
+ unsigned long tmp = address_fix8_write(map->map_priv_1 + to++);
+ *(__u8 *)(tmp) = *(__u8 *)(from++);
+ }
+}
+
+
+static int ixp2000_flash_remove(struct platform_device *dev)
+{
+ struct flash_platform_data *plat = dev->dev.platform_data;
+ struct ixp2000_flash_info *info = platform_get_drvdata(dev);
+
+ platform_set_drvdata(dev, NULL);
+
+ if(!info)
+ return 0;
+
+ if (info->mtd) {
+ del_mtd_partitions(info->mtd);
+ map_destroy(info->mtd);
+ }
+ if (info->map.map_priv_1)
+ iounmap((void *) info->map.map_priv_1);
+
+ kfree(info->partitions);
+
+ if (info->res) {
+ release_resource(info->res);
+ kfree(info->res);
+ }
+
+ if (plat->exit)
+ plat->exit();
+
+ return 0;
+}
+
+
+static int ixp2000_flash_probe(struct platform_device *dev)
+{
+ static const char *probes[] = { "RedBoot", "cmdlinepart", NULL };
+ struct ixp2000_flash_data *ixp_data = dev->dev.platform_data;
+ struct flash_platform_data *plat;
+ struct ixp2000_flash_info *info;
+ unsigned long window_size;
+ int err = -1;
+
+ if (!ixp_data)
+ return -ENODEV;
+
+ plat = ixp_data->platform_data;
+ if (!plat)
+ return -ENODEV;
+
+ window_size = dev->resource->end - dev->resource->start + 1;
+ dev_info(&dev->dev, "Probe of IXP2000 flash(%d banks x %dMiB)\n",
+ ixp_data->nr_banks, ((u32)window_size >> 20));
+
+ if (plat->width != 1) {
+ dev_err(&dev->dev, "IXP2000 MTD map only supports 8-bit mode, asking for %d\n",
+ plat->width * 8);
+ return -EIO;
+ }
+
+ info = kmalloc(sizeof(struct ixp2000_flash_info), GFP_KERNEL);
+ if(!info) {
+ err = -ENOMEM;
+ goto Error;
+ }
+ memzero(info, sizeof(struct ixp2000_flash_info));
+
+ platform_set_drvdata(dev, info);
+
+ /*
+ * Tell the MTD layer we're not 1:1 mapped so that it does
+ * not attempt to do a direct access on us.
+ */
+ info->map.phys = NO_XIP;
+
+ info->nr_banks = ixp_data->nr_banks;
+ info->map.size = ixp_data->nr_banks * window_size;
+ info->map.bankwidth = 1;
+
+ /*
+ * map_priv_2 is used to store a ptr to to the bank_setup routine
+ */
+ info->map.map_priv_2 = (unsigned long) ixp_data->bank_setup;
+
+ info->map.name = dev->dev.bus_id;
+ info->map.read = ixp2000_flash_read8;
+ info->map.write = ixp2000_flash_write8;
+ info->map.copy_from = ixp2000_flash_copy_from;
+ info->map.copy_to = ixp2000_flash_copy_to;
+
+ info->res = request_mem_region(dev->resource->start,
+ dev->resource->end - dev->resource->start + 1,
+ dev->dev.bus_id);
+ if (!info->res) {
+ dev_err(&dev->dev, "Could not reserve memory region\n");
+ err = -ENOMEM;
+ goto Error;
+ }
+
+ info->map.map_priv_1 = (unsigned long) ioremap(dev->resource->start,
+ dev->resource->end - dev->resource->start + 1);
+ if (!info->map.map_priv_1) {
+ dev_err(&dev->dev, "Failed to ioremap flash region\n");
+ err = -EIO;
+ goto Error;
+ }
+
+#if defined(__ARMEB__)
+ /*
+ * Enable erratum 44 workaround for NPUs with broken slowport
+ */
+
+ erratum44_workaround = ixp2000_has_broken_slowport();
+ dev_info(&dev->dev, "Erratum 44 workaround %s\n",
+ erratum44_workaround ? "enabled" : "disabled");
+#endif
+
+ info->mtd = do_map_probe(plat->map_name, &info->map);
+ if (!info->mtd) {
+ dev_err(&dev->dev, "map_probe failed\n");
+ err = -ENXIO;
+ goto Error;
+ }
+ info->mtd->owner = THIS_MODULE;
+
+ err = parse_mtd_partitions(info->mtd, probes, &info->partitions, 0);
+ if (err > 0) {
+ err = add_mtd_partitions(info->mtd, info->partitions, err);
+ if(err)
+ dev_err(&dev->dev, "Could not parse partitions\n");
+ }
+
+ if (err)
+ goto Error;
+
+ return 0;
+
+Error:
+ ixp2000_flash_remove(dev);
+ return err;
+}
+
+static struct platform_driver ixp2000_flash_driver = {
+ .probe = ixp2000_flash_probe,
+ .remove = ixp2000_flash_remove,
+ .driver = {
+ .name = "IXP2000-Flash",
+ },
+};
+
+static int __init ixp2000_flash_init(void)
+{
+ return platform_driver_register(&ixp2000_flash_driver);
+}
+
+static void __exit ixp2000_flash_exit(void)
+{
+ platform_driver_unregister(&ixp2000_flash_driver);
+}
+
+module_init(ixp2000_flash_init);
+module_exit(ixp2000_flash_exit);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Deepak Saxena <dsaxena@plexity.net>");
+
diff --git a/linux-2.4.x/drivers/mtd/maps/ixp4xx.c b/linux-2.4.x/drivers/mtd/maps/ixp4xx.c
new file mode 100644
index 0000000..fc072d3
--- /dev/null
+++ b/linux-2.4.x/drivers/mtd/maps/ixp4xx.c
@@ -0,0 +1,297 @@
+/*
+ * $Id: ixp4xx.c,v 1.14 2005/11/29 20:01:28 gleixner Exp $
+ *
+ * drivers/mtd/maps/ixp4xx.c
+ *
+ * MTD Map file for IXP4XX based systems. Please do not make per-board
+ * changes in here. If your board needs special setup, do it in your
+ * platform level code in arch/arm/mach-ixp4xx/board-setup.c
+ *
+ * Original Author: Intel Corporation
+ * Maintainer: Deepak Saxena <dsaxena@mvista.com>
+ *
+ * Copyright (C) 2002 Intel Corporation
+ * Copyright (C) 2003-2004 MontaVista Software, Inc.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/ioport.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/partitions.h>
+
+#include <asm/io.h>
+#include <asm/mach/flash.h>
+
+#include <linux/reboot.h>
+
+/*
+ * Read/write a 16 bit word from flash address 'addr'.
+ *
+ * When the cpu is in little-endian mode it swizzles the address lines
+ * ('address coherency') so we need to undo the swizzling to ensure commands
+ * and the like end up on the correct flash address.
+ *
+ * To further complicate matters, due to the way the expansion bus controller
+ * handles 32 bit reads, the byte stream ABCD is stored on the flash as:
+ * D15 D0
+ * +---+---+
+ * | A | B | 0
+ * +---+---+
+ * | C | D | 2
+ * +---+---+
+ * This means that on LE systems each 16 bit word must be swapped. Note that
+ * this requires CONFIG_MTD_CFI_BE_BYTE_SWAP to be enabled to 'unswap' the CFI
+ * data and other flash commands which are always in D7-D0.
+ */
+#ifndef __ARMEB__
+#ifndef CONFIG_MTD_CFI_BE_BYTE_SWAP
+# error CONFIG_MTD_CFI_BE_BYTE_SWAP required
+#endif
+
+static inline u16 flash_read16(void __iomem *addr)
+{
+ return be16_to_cpu(__raw_readw((void __iomem *)((unsigned long)addr ^ 0x2)));
+}
+
+static inline void flash_write16(u16 d, void __iomem *addr)
+{
+ __raw_writew(cpu_to_be16(d), (void __iomem *)((unsigned long)addr ^ 0x2));
+}
+
+#define BYTE0(h) ((h) & 0xFF)
+#define BYTE1(h) (((h) >> 8) & 0xFF)
+
+#else
+
+static inline u16 flash_read16(const void __iomem *addr)
+{
+ return __raw_readw(addr);
+}
+
+static inline void flash_write16(u16 d, void __iomem *addr)
+{
+ __raw_writew(d, addr);
+}
+
+#define BYTE0(h) (((h) >> 8) & 0xFF)
+#define BYTE1(h) ((h) & 0xFF)
+#endif
+
+static map_word ixp4xx_read16(struct map_info *map, unsigned long ofs)
+{
+ map_word val;
+ val.x[0] = flash_read16(map->virt + ofs);
+ return val;
+}
+
+/*
+ * The IXP4xx expansion bus only allows 16-bit wide acceses
+ * when attached to a 16-bit wide device (such as the 28F128J3A),
+ * so we can't just memcpy_fromio().
+ */
+static void ixp4xx_copy_from(struct map_info *map, void *to,
+ unsigned long from, ssize_t len)
+{
+ u8 *dest = (u8 *) to;
+ void __iomem *src = map->virt + from;
+
+ if (len <= 0)
+ return;
+
+ if (from & 1) {
+ *dest++ = BYTE1(flash_read16(src));
+ src++;
+ --len;
+ }
+
+ while (len >= 2) {
+ u16 data = flash_read16(src);
+ *dest++ = BYTE0(data);
+ *dest++ = BYTE1(data);
+ src += 2;
+ len -= 2;
+ }
+
+ if (len > 0)
+ *dest++ = BYTE0(flash_read16(src));
+}
+
+/*
+ * Unaligned writes are ignored, causing the 8-bit
+ * probe to fail and proceed to the 16-bit probe (which succeeds).
+ */
+static void ixp4xx_probe_write16(struct map_info *map, map_word d, unsigned long adr)
+{
+ if (!(adr & 1))
+ flash_write16(d.x[0], map->virt + adr);
+}
+
+/*
+ * Fast write16 function without the probing check above
+ */
+static void ixp4xx_write16(struct map_info *map, map_word d, unsigned long adr)
+{
+ flash_write16(d.x[0], map->virt + adr);
+}
+
+struct ixp4xx_flash_info {
+ struct mtd_info *mtd;
+ struct map_info map;
+ struct mtd_partition *partitions;
+ struct resource *res;
+};
+
+static const char *probes[] = { "RedBoot", "cmdlinepart", NULL };
+
+static int ixp4xx_flash_remove(struct platform_device *dev)
+{
+ struct flash_platform_data *plat = dev->dev.platform_data;
+ struct ixp4xx_flash_info *info = platform_get_drvdata(dev);
+
+ platform_set_drvdata(dev, NULL);
+
+ if(!info)
+ return 0;
+
+ if (info->mtd) {
+ del_mtd_partitions(info->mtd);
+ map_destroy(info->mtd);
+ }
+ if (info->map.virt)
+ iounmap(info->map.virt);
+
+ kfree(info->partitions);
+
+ if (info->res) {
+ release_resource(info->res);
+ kfree(info->res);
+ }
+
+ if (plat->exit)
+ plat->exit();
+
+ return 0;
+}
+
+static int ixp4xx_flash_probe(struct platform_device *dev)
+{
+ struct flash_platform_data *plat = dev->dev.platform_data;
+ struct ixp4xx_flash_info *info;
+ int err = -1;
+
+ if (!plat)
+ return -ENODEV;
+
+ if (plat->init) {
+ err = plat->init();
+ if (err)
+ return err;
+ }
+
+ info = kmalloc(sizeof(struct ixp4xx_flash_info), GFP_KERNEL);
+ if(!info) {
+ err = -ENOMEM;
+ goto Error;
+ }
+ memzero(info, sizeof(struct ixp4xx_flash_info));
+
+ platform_set_drvdata(dev, info);
+
+ /*
+ * Tell the MTD layer we're not 1:1 mapped so that it does
+ * not attempt to do a direct access on us.
+ */
+ info->map.phys = NO_XIP;
+ info->map.size = dev->resource->end - dev->resource->start + 1;
+
+ /*
+ * We only support 16-bit accesses for now. If and when
+ * any board use 8-bit access, we'll fixup the driver to
+ * handle that.
+ */
+ info->map.bankwidth = 2;
+ info->map.name = dev->dev.bus_id;
+ info->map.read = ixp4xx_read16,
+ info->map.write = ixp4xx_probe_write16,
+ info->map.copy_from = ixp4xx_copy_from,
+
+ info->res = request_mem_region(dev->resource->start,
+ dev->resource->end - dev->resource->start + 1,
+ "IXP4XXFlash");
+ if (!info->res) {
+ printk(KERN_ERR "IXP4XXFlash: Could not reserve memory region\n");
+ err = -ENOMEM;
+ goto Error;
+ }
+
+ info->map.virt = ioremap(dev->resource->start,
+ dev->resource->end - dev->resource->start + 1);
+ if (!info->map.virt) {
+ printk(KERN_ERR "IXP4XXFlash: Failed to ioremap region\n");
+ err = -EIO;
+ goto Error;
+ }
+
+ info->mtd = do_map_probe(plat->map_name, &info->map);
+ if (!info->mtd) {
+ printk(KERN_ERR "IXP4XXFlash: map_probe failed\n");
+ err = -ENXIO;
+ goto Error;
+ }
+ info->mtd->owner = THIS_MODULE;
+
+ /* Use the fast version */
+ info->map.write = ixp4xx_write16,
+
+ err = parse_mtd_partitions(info->mtd, probes, &info->partitions, 0);
+ if (err > 0) {
+ err = add_mtd_partitions(info->mtd, info->partitions, err);
+ if(err)
+ printk(KERN_ERR "Could not parse partitions\n");
+ }
+
+ if (err)
+ goto Error;
+
+ return 0;
+
+Error:
+ ixp4xx_flash_remove(dev);
+ return err;
+}
+
+static struct platform_driver ixp4xx_flash_driver = {
+ .probe = ixp4xx_flash_probe,
+ .remove = ixp4xx_flash_remove,
+ .driver = {
+ .name = "IXP4XX-Flash",
+ },
+};
+
+static int __init ixp4xx_flash_init(void)
+{
+ return platform_driver_register(&ixp4xx_flash_driver);
+}
+
+static void __exit ixp4xx_flash_exit(void)
+{
+ platform_driver_unregister(&ixp4xx_flash_driver);
+}
+
+
+module_init(ixp4xx_flash_init);
+module_exit(ixp4xx_flash_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("MTD map driver for Intel IXP4xx systems");
+MODULE_AUTHOR("Deepak Saxena");
diff --git a/linux-2.4.x/drivers/mtd/maps/l440gx.c b/linux-2.4.x/drivers/mtd/maps/l440gx.c
index a7b906e..851bf95 100644
--- a/linux-2.4.x/drivers/mtd/maps/l440gx.c
+++ b/linux-2.4.x/drivers/mtd/maps/l440gx.c
@@ -1,5 +1,5 @@
/*
- * $Id: l440gx.c,v 1.8 2002/01/10 20:27:40 eric Exp $
+ * $Id: l440gx.c,v 1.18 2005/11/07 11:14:27 gleixner Exp $
*
* BIOS Flash chip on Intel 440GX board.
*
@@ -9,6 +9,7 @@
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/kernel.h>
+#include <linux/init.h>
#include <asm/io.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/map.h>
@@ -27,51 +28,9 @@ static u32 iobase;
static struct mtd_info *mymtd;
-__u8 l440gx_read8(struct map_info *map, unsigned long ofs)
-{
- return __raw_readb(map->map_priv_1 + ofs);
-}
-
-__u16 l440gx_read16(struct map_info *map, unsigned long ofs)
-{
- return __raw_readw(map->map_priv_1 + ofs);
-}
-
-__u32 l440gx_read32(struct map_info *map, unsigned long ofs)
-{
- return __raw_readl(map->map_priv_1 + ofs);
-}
-
-void l440gx_copy_from(struct map_info *map, void *to, unsigned long from, ssize_t len)
-{
- memcpy_fromio(to, map->map_priv_1 + from, len);
-}
-
-void l440gx_write8(struct map_info *map, __u8 d, unsigned long adr)
-{
- __raw_writeb(d, map->map_priv_1 + adr);
- mb();
-}
-
-void l440gx_write16(struct map_info *map, __u16 d, unsigned long adr)
-{
- __raw_writew(d, map->map_priv_1 + adr);
- mb();
-}
-
-void l440gx_write32(struct map_info *map, __u32 d, unsigned long adr)
-{
- __raw_writel(d, map->map_priv_1 + adr);
- mb();
-}
-
-void l440gx_copy_to(struct map_info *map, unsigned long to, const void *from, ssize_t len)
-{
- memcpy_toio(map->map_priv_1 + to, from, len);
-}
/* Is this really the vpp port? */
-void l440gx_set_vpp(struct map_info *map, int vpp)
+static void l440gx_set_vpp(struct map_info *map, int vpp)
{
unsigned long l;
@@ -84,23 +43,16 @@ void l440gx_set_vpp(struct map_info *map, int vpp)
outl(l, VPP_PORT);
}
-struct map_info l440gx_map = {
- name: "L440GX BIOS",
- size: WINDOW_SIZE,
- buswidth: BUSWIDTH,
- read8: l440gx_read8,
- read16: l440gx_read16,
- read32: l440gx_read32,
- copy_from: l440gx_copy_from,
- write8: l440gx_write8,
- write16: l440gx_write16,
- write32: l440gx_write32,
- copy_to: l440gx_copy_to,
+static struct map_info l440gx_map = {
+ .name = "L440GX BIOS",
+ .size = WINDOW_SIZE,
+ .bankwidth = BUSWIDTH,
+ .phys = WINDOW_ADDR,
#if 0
- /* FIXME verify that this is the
+ /* FIXME verify that this is the
* appripriate code for vpp enable/disable
*/
- set_vpp: l440gx_set_vpp
+ .set_vpp = l440gx_set_vpp
#endif
};
@@ -110,11 +62,10 @@ static int __init init_l440gx(void)
struct resource *pm_iobase;
__u16 word;
- dev = pci_find_device(PCI_VENDOR_ID_INTEL,
+ dev = pci_find_device(PCI_VENDOR_ID_INTEL,
PCI_DEVICE_ID_INTEL_82371AB_0, NULL);
-
- pm_dev = pci_find_device(PCI_VENDOR_ID_INTEL,
+ pm_dev = pci_find_device(PCI_VENDOR_ID_INTEL,
PCI_DEVICE_ID_INTEL_82371AB_3, NULL);
if (!dev || !pm_dev) {
@@ -122,20 +73,19 @@ static int __init init_l440gx(void)
return -ENODEV;
}
+ l440gx_map.virt = ioremap_nocache(WINDOW_ADDR, WINDOW_SIZE);
- l440gx_map.map_priv_1 = (unsigned long)ioremap_nocache(WINDOW_ADDR, WINDOW_SIZE);
-
- if (!l440gx_map.map_priv_1) {
+ if (!l440gx_map.virt) {
printk(KERN_WARNING "Failed to ioremap L440GX flash region\n");
return -ENOMEM;
}
+ simple_map_init(&l440gx_map);
+ printk(KERN_NOTICE "window_addr = 0x%08lx\n", (unsigned long)l440gx_map.virt);
- printk(KERN_NOTICE "window_addr = 0x%08lx\n", (unsigned long)l440gx_map.map_priv_1);
-
- /* Setup the pm iobase resource
+ /* Setup the pm iobase resource
* This code should move into some kind of generic bridge
* driver but for the moment I'm content with getting the
- * allocation correct.
+ * allocation correct.
*/
pm_iobase = &pm_dev->resource[PIIXE_IOBASE_RESOURCE];
if (!(pm_iobase->flags & IORESOURCE_IO)) {
@@ -153,14 +103,14 @@ static int __init init_l440gx(void)
/* Allocate the resource region */
if (pci_assign_resource(pm_dev, PIIXE_IOBASE_RESOURCE) != 0) {
printk(KERN_WARNING "Could not allocate pm iobase resource\n");
- iounmap((void *)l440gx_map.map_priv_1);
+ iounmap(l440gx_map.virt);
return -ENXIO;
}
}
/* Set the iobase */
iobase = pm_iobase->start;
pci_write_config_dword(pm_dev, 0x40, iobase | 1);
-
+
/* Set XBCS# */
pci_read_config_word(dev, 0x4e, &word);
@@ -172,7 +122,7 @@ static int __init init_l440gx(void)
/* Enable the gate on the WE line */
outb(inb(TRIBUF_PORT) & ~1, TRIBUF_PORT);
-
+
printk(KERN_NOTICE "Enabled WE line to L440GX BIOS flash chip.\n");
mymtd = do_map_probe("jedec_probe", &l440gx_map);
@@ -181,13 +131,13 @@ static int __init init_l440gx(void)
mymtd = do_map_probe("map_rom", &l440gx_map);
}
if (mymtd) {
- mymtd->module = THIS_MODULE;
+ mymtd->owner = THIS_MODULE;
add_mtd_device(mymtd);
return 0;
}
- iounmap((void *)l440gx_map.map_priv_1);
+ iounmap(l440gx_map.virt);
return -ENXIO;
}
@@ -195,8 +145,8 @@ static void __exit cleanup_l440gx(void)
{
del_mtd_device(mymtd);
map_destroy(mymtd);
-
- iounmap((void *)l440gx_map.map_priv_1);
+
+ iounmap(l440gx_map.virt);
}
module_init(init_l440gx);
diff --git a/linux-2.4.x/drivers/mtd/maps/lasat.c b/linux-2.4.x/drivers/mtd/maps/lasat.c
new file mode 100644
index 0000000..c658d40
--- /dev/null
+++ b/linux-2.4.x/drivers/mtd/maps/lasat.c
@@ -0,0 +1,102 @@
+/*
+ * Flash device on Lasat 100 and 200 boards
+ *
+ * (C) 2002 Brian Murphy <brian@murphy.dk>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * $Id: lasat.c,v 1.9 2004/11/04 13:24:15 gleixner Exp $
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <asm/io.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/partitions.h>
+#include <linux/config.h>
+#include <asm/lasat/lasat.h>
+
+static struct mtd_info *lasat_mtd;
+
+static struct mtd_partition partition_info[LASAT_MTD_LAST];
+static char *lasat_mtd_partnames[] = {"Bootloader", "Service", "Normal", "Filesystem", "Config"};
+
+static void lasat_set_vpp(struct map_info *map, int vpp)
+{
+ if (vpp)
+ *lasat_misc->flash_wp_reg |= 1 << lasat_misc->flash_wp_bit;
+ else
+ *lasat_misc->flash_wp_reg &= ~(1 << lasat_misc->flash_wp_bit);
+}
+
+static struct map_info lasat_map = {
+ .name = "LASAT flash",
+ .bankwidth = 4,
+ .set_vpp = lasat_set_vpp
+};
+
+static int __init init_lasat(void)
+{
+ int i;
+ /* since we use AMD chips and set_vpp is not implimented
+ * for these (yet) we still have to permanently enable flash write */
+ printk(KERN_NOTICE "Unprotecting flash\n");
+ ENABLE_VPP((&lasat_map));
+
+ lasat_map.phys = lasat_flash_partition_start(LASAT_MTD_BOOTLOADER);
+ lasat_map.virt = ioremap_nocache(
+ lasat_map.phys, lasat_board_info.li_flash_size);
+ lasat_map.size = lasat_board_info.li_flash_size;
+
+ simple_map_init(&lasat_map);
+
+ for (i=0; i < LASAT_MTD_LAST; i++)
+ partition_info[i].name = lasat_mtd_partnames[i];
+
+ lasat_mtd = do_map_probe("cfi_probe", &lasat_map);
+
+ if (!lasat_mtd)
+ lasat_mtd = do_map_probe("jedec_probe", &lasat_map);
+
+ if (lasat_mtd) {
+ u32 size, offset = 0;
+
+ lasat_mtd->owner = THIS_MODULE;
+
+ for (i=0; i < LASAT_MTD_LAST; i++) {
+ size = lasat_flash_partition_size(i);
+ partition_info[i].size = size;
+ partition_info[i].offset = offset;
+ offset += size;
+ }
+
+ add_mtd_partitions( lasat_mtd, partition_info, LASAT_MTD_LAST );
+ return 0;
+ }
+
+ return -ENXIO;
+}
+
+static void __exit cleanup_lasat(void)
+{
+ if (lasat_mtd) {
+ del_mtd_partitions(lasat_mtd);
+ map_destroy(lasat_mtd);
+ }
+ if (lasat_map.virt) {
+ lasat_map.virt = 0;
+ }
+}
+
+module_init(init_lasat);
+module_exit(cleanup_lasat);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Brian Murphy <brian@murphy.dk>");
+MODULE_DESCRIPTION("Lasat Safepipe/Masquerade MTD map driver");
diff --git a/linux-2.4.x/drivers/mtd/maps/map_funcs.c b/linux-2.4.x/drivers/mtd/maps/map_funcs.c
new file mode 100644
index 0000000..9105e6c
--- /dev/null
+++ b/linux-2.4.x/drivers/mtd/maps/map_funcs.c
@@ -0,0 +1,45 @@
+/*
+ * $Id: map_funcs.c,v 1.10 2005/06/06 23:04:36 tpoynor Exp $
+ *
+ * Out-of-line map I/O functions for simple maps when CONFIG_COMPLEX_MAPPINGS
+ * is enabled.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+#include <linux/mtd/map.h>
+#include <linux/mtd/xip.h>
+
+static map_word __xipram simple_map_read(struct map_info *map, unsigned long ofs)
+{
+ return inline_map_read(map, ofs);
+}
+
+static void __xipram simple_map_write(struct map_info *map, const map_word datum, unsigned long ofs)
+{
+ inline_map_write(map, datum, ofs);
+}
+
+static void __xipram simple_map_copy_from(struct map_info *map, void *to, unsigned long from, ssize_t len)
+{
+ inline_map_copy_from(map, to, from, len);
+}
+
+static void __xipram simple_map_copy_to(struct map_info *map, unsigned long to, const void *from, ssize_t len)
+{
+ inline_map_copy_to(map, to, from, len);
+}
+
+void simple_map_init(struct map_info *map)
+{
+ BUG_ON(!map_bankwidth_supported(map->bankwidth));
+
+ map->read = simple_map_read;
+ map->write = simple_map_write;
+ map->copy_from = simple_map_copy_from;
+ map->copy_to = simple_map_copy_to;
+}
+
+EXPORT_SYMBOL(simple_map_init);
+MODULE_LICENSE("GPL");
diff --git a/linux-2.4.x/drivers/mtd/maps/mbx860.c b/linux-2.4.x/drivers/mtd/maps/mbx860.c
index d4ace26..06b1187 100644
--- a/linux-2.4.x/drivers/mtd/maps/mbx860.c
+++ b/linux-2.4.x/drivers/mtd/maps/mbx860.c
@@ -1,11 +1,11 @@
/*
- * $Id: mbx860.c,v 1.1 2001/11/18 19:43:09 dwmw2 Exp $
+ * $Id: mbx860.c,v 1.9 2005/11/07 11:14:27 gleixner Exp $
*
* Handle mapping of the flash on MBX860 boards
*
* Author: Anton Todorov
* Copyright: (C) 2001 Emness Technology
- *
+ *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
@@ -15,6 +15,7 @@
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
+#include <linux/init.h>
#include <asm/io.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/map.h>
@@ -36,91 +37,46 @@
* single flash device into. If the size if zero we use up to the end of the
* device. */
static struct mtd_partition partition_info[]={
- { name: "MBX flash BOOT partition",
- offset: 0,
- size: BOOT_PARTITION_SIZE_KiB*1024 },
- { name: "MBX flash DATA partition",
- offset: BOOT_PARTITION_SIZE_KiB*1024,
- size: (KERNEL_PARTITION_SIZE_KiB)*1024 },
- { name: "MBX flash APPLICATION partition",
- offset: (BOOT_PARTITION_SIZE_KiB+KERNEL_PARTITION_SIZE_KiB)*1024 }
+ { .name = "MBX flash BOOT partition",
+ .offset = 0,
+ .size = BOOT_PARTITION_SIZE_KiB*1024 },
+ { .name = "MBX flash DATA partition",
+ .offset = BOOT_PARTITION_SIZE_KiB*1024,
+ .size = (KERNEL_PARTITION_SIZE_KiB)*1024 },
+ { .name = "MBX flash APPLICATION partition",
+ .offset = (BOOT_PARTITION_SIZE_KiB+KERNEL_PARTITION_SIZE_KiB)*1024 }
};
-
-
-static struct mtd_info *mymtd;
-
-__u8 mbx_read8(struct map_info *map, unsigned long ofs)
-{
- return readb(map->map_priv_1 + ofs);
-}
-
-__u16 mbx_read16(struct map_info *map, unsigned long ofs)
-{
- return readw(map->map_priv_1 + ofs);
-}
-__u32 mbx_read32(struct map_info *map, unsigned long ofs)
-{
- return readl(map->map_priv_1 + ofs);
-}
-
-void mbx_copy_from(struct map_info *map, void *to, unsigned long from, ssize_t len)
-{
- memcpy_fromio(to, (void *)(map->map_priv_1 + from), len);
-}
-void mbx_write8(struct map_info *map, __u8 d, unsigned long adr)
-{
- writeb(d, map->map_priv_1 + adr);
-}
-
-void mbx_write16(struct map_info *map, __u16 d, unsigned long adr)
-{
- writew(d, map->map_priv_1 + adr);
-}
-
-void mbx_write32(struct map_info *map, __u32 d, unsigned long adr)
-{
- writel(d, map->map_priv_1 + adr);
-}
-
-void mbx_copy_to(struct map_info *map, unsigned long to, const void *from, ssize_t len)
-{
- memcpy_toio((void *)(map->map_priv_1 + to), from, len);
-}
+static struct mtd_info *mymtd;
struct map_info mbx_map = {
- name: "MBX flash",
- size: WINDOW_SIZE,
- buswidth: 4,
- read8: mbx_read8,
- read16: mbx_read16,
- read32: mbx_read32,
- copy_from: mbx_copy_from,
- write8: mbx_write8,
- write16: mbx_write16,
- write32: mbx_write32,
- copy_to: mbx_copy_to
+ .name = "MBX flash",
+ .size = WINDOW_SIZE,
+ .phys = WINDOW_ADDR,
+ .bankwidth = 4,
};
int __init init_mbx(void)
{
- printk(KERN_NOTICE "Motorola MBX flash device: %x at %x\n", WINDOW_SIZE*4, WINDOW_ADDR);
- mbx_map.map_priv_1 = (unsigned long)ioremap(WINDOW_ADDR, WINDOW_SIZE * 4);
+ printk(KERN_NOTICE "Motorola MBX flash device: 0x%x at 0x%x\n", WINDOW_SIZE*4, WINDOW_ADDR);
+ mbx_map.virt = ioremap(WINDOW_ADDR, WINDOW_SIZE * 4);
- if (!mbx_map.map_priv_1) {
+ if (!mbx_map.virt) {
printk("Failed to ioremap\n");
return -EIO;
}
+ simple_map_init(&mbx_map);
+
mymtd = do_map_probe("jedec_probe", &mbx_map);
if (mymtd) {
- mymtd->module = THIS_MODULE;
+ mymtd->owner = THIS_MODULE;
add_mtd_device(mymtd);
add_mtd_partitions(mymtd, partition_info, NUM_PARTITIONS);
return 0;
}
- iounmap((void *)mbx_map.map_priv_1);
+ iounmap((void *)mbx_map.virt);
return -ENXIO;
}
@@ -130,9 +86,9 @@ static void __exit cleanup_mbx(void)
del_mtd_device(mymtd);
map_destroy(mymtd);
}
- if (mbx_map.map_priv_1) {
- iounmap((void *)mbx_map.map_priv_1);
- mbx_map.map_priv_1 = 0;
+ if (mbx_map.virt) {
+ iounmap((void *)mbx_map.virt);
+ mbx_map.virt = 0;
}
}
diff --git a/linux-2.4.x/drivers/mtd/maps/mpc1211.c b/linux-2.4.x/drivers/mtd/maps/mpc1211.c
new file mode 100644
index 0000000..4685e8e
--- /dev/null
+++ b/linux-2.4.x/drivers/mtd/maps/mpc1211.c
@@ -0,0 +1,81 @@
+/*
+ * Flash on MPC-1211
+ *
+ * $Id: mpc1211.c,v 1.4 2004/09/16 23:27:13 gleixner Exp $
+ *
+ * (C) 2002 Interface, Saito.K & Jeanne
+ *
+ * GPL'd
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <asm/io.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/partitions.h>
+#include <linux/config.h>
+
+static struct mtd_info *flash_mtd;
+static struct mtd_partition *parsed_parts;
+
+struct map_info mpc1211_flash_map = {
+ .name = "MPC-1211 FLASH",
+ .size = 0x80000,
+ .bankwidth = 1,
+};
+
+static struct mtd_partition mpc1211_partitions[] = {
+ {
+ .name = "IPL & ETH-BOOT",
+ .offset = 0x00000000,
+ .size = 0x10000,
+ },
+ {
+ .name = "Flash FS",
+ .offset = 0x00010000,
+ .size = MTDPART_SIZ_FULL,
+ }
+};
+
+static int __init init_mpc1211_maps(void)
+{
+ int nr_parts;
+
+ mpc1211_flash_map.phys = 0;
+ mpc1211_flash_map.virt = (void __iomem *)P2SEGADDR(0);
+
+ simple_map_init(&mpc1211_flash_map);
+
+ printk(KERN_NOTICE "Probing for flash chips at 0x00000000:\n");
+ flash_mtd = do_map_probe("jedec_probe", &mpc1211_flash_map);
+ if (!flash_mtd) {
+ printk(KERN_NOTICE "Flash chips not detected at either possible location.\n");
+ return -ENXIO;
+ }
+ printk(KERN_NOTICE "MPC-1211: Flash at 0x%08lx\n", mpc1211_flash_map.virt & 0x1fffffff);
+ flash_mtd->module = THIS_MODULE;
+
+ parsed_parts = mpc1211_partitions;
+ nr_parts = ARRAY_SIZE(mpc1211_partitions);
+
+ add_mtd_partitions(flash_mtd, parsed_parts, nr_parts);
+ return 0;
+}
+
+static void __exit cleanup_mpc1211_maps(void)
+{
+ if (parsed_parts)
+ del_mtd_partitions(flash_mtd);
+ else
+ del_mtd_device(flash_mtd);
+ map_destroy(flash_mtd);
+}
+
+module_init(init_mpc1211_maps);
+module_exit(cleanup_mpc1211_maps);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Saito.K & Jeanne <ksaito@interface.co.jp>");
+MODULE_DESCRIPTION("MTD map driver for MPC-1211 boards. Interface");
diff --git a/linux-2.4.x/drivers/mtd/maps/mphysmap.c b/linux-2.4.x/drivers/mtd/maps/mphysmap.c
new file mode 100644
index 0000000..05969ec
--- /dev/null
+++ b/linux-2.4.x/drivers/mtd/maps/mphysmap.c
@@ -0,0 +1,186 @@
+/*
+ * $Id: mphysmap.c,v 1.4 2005/11/07 11:14:27 gleixner Exp $
+ *
+ * Several mappings of NOR chips
+ *
+ * Copyright (c) 2001-2005 Jörn Engel <joern@wh.fh-wedelde>
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/mtd.h>
+#ifdef CONFIG_MTD_PARTITIONS
+#include <linux/mtd/partitions.h>
+#endif
+
+static struct map_info mphysmap_static_maps[] = {
+#if CONFIG_MTD_MULTI_PHYSMAP_1_WIDTH
+ {
+ .name = CONFIG_MTD_MULTI_PHYSMAP_1_NAME,
+ .phys = CONFIG_MTD_MULTI_PHYSMAP_1_START,
+ .size = CONFIG_MTD_MULTI_PHYSMAP_1_LEN,
+ .bankwidth = CONFIG_MTD_MULTI_PHYSMAP_1_WIDTH,
+ },
+#endif
+#if CONFIG_MTD_MULTI_PHYSMAP_2_WIDTH
+ {
+ .name = CONFIG_MTD_MULTI_PHYSMAP_2_NAME,
+ .phys = CONFIG_MTD_MULTI_PHYSMAP_2_START,
+ .size = CONFIG_MTD_MULTI_PHYSMAP_2_LEN,
+ .bankwidth = CONFIG_MTD_MULTI_PHYSMAP_2_WIDTH,
+ },
+#endif
+#if CONFIG_MTD_MULTI_PHYSMAP_3_WIDTH
+ {
+ .name = CONFIG_MTD_MULTI_PHYSMAP_3_NAME,
+ .phys = CONFIG_MTD_MULTI_PHYSMAP_3_START,
+ .size = CONFIG_MTD_MULTI_PHYSMAP_3_LEN,
+ .bankwidth = CONFIG_MTD_MULTI_PHYSMAP_3_WIDTH,
+ },
+#endif
+#if CONFIG_MTD_MULTI_PHYSMAP_4_WIDTH
+ {
+ .name = CONFIG_MTD_MULTI_PHYSMAP_4_NAME,
+ .phys = CONFIG_MTD_MULTI_PHYSMAP_4_START,
+ .size = CONFIG_MTD_MULTI_PHYSMAP_4_LEN,
+ .bankwidth = CONFIG_MTD_MULTI_PHYSMAP_4_WIDTH,
+ },
+#endif
+};
+
+DECLARE_MUTEX(map_mutex);
+
+
+static int mphysmap_map_device(struct map_info *map)
+{
+ static const char *rom_probe_types[] = { "cfi_probe", "jedec_probe", "map_rom", NULL };
+ const char **type;
+ struct mtd_info* mtd;
+#ifdef CONFIG_MTD_PARTITIONS
+ struct mtd_partition* mtd_parts;
+ int mtd_parts_nb;
+ static const char *part_probes[] __initdata = {
+#ifdef CONFIG_MTD_CMDLINE_PARTS
+ "cmdlinepart",
+#endif
+#ifdef CONFIG_MTD_REDBOOT_PARTS
+ "RedBoot",
+#endif
+ NULL};
+#endif
+ map->virt = ioremap(map->phys, map->size);
+ if (!map->virt)
+ return -EIO;
+
+ simple_map_init(map);
+ mtd = NULL;
+ type = rom_probe_types;
+ for(; !mtd && *type; type++) {
+ mtd = do_map_probe(*type, map);
+ }
+
+ if (!mtd) {
+ iounmap(map->virt);
+ return -ENXIO;
+ }
+
+ map->map_priv_1 = (unsigned long)mtd;
+ mtd->owner = THIS_MODULE;
+
+#ifdef CONFIG_MTD_PARTITIONS
+ mtd_parts_nb = parse_mtd_partitions(mtd, part_probes,
+ &mtd_parts, 0);
+ if (mtd_parts_nb > 0)
+ {
+ add_mtd_partitions (mtd, mtd_parts, mtd_parts_nb);
+ map->map_priv_2=(unsigned long)mtd_parts;
+ }
+ else
+ {
+ add_mtd_device(mtd);
+ map->map_priv_2=(unsigned long)NULL;
+ };
+#else
+ add_mtd_device(mtd);
+#endif
+ return 0;
+}
+
+
+static void mphysmap_unmap_device(struct map_info *map)
+{
+ struct mtd_info* mtd = (struct mtd_info*)map->map_priv_1;
+#ifdef CONFIG_MTD_PARTITIONS
+ struct mtd_partition* mtd_parts=(struct mtd_partition*)map->map_priv_2;
+#endif
+ BUG_ON(!mtd);
+ if (!map->virt)
+ return;
+
+#ifdef CONFIG_MTD_PARTITIONS
+ if (mtd_parts)
+ {
+ del_mtd_partitions(mtd);
+ kfree(mtd_parts);
+ }
+ else
+ del_mtd_device(mtd);
+#else
+ del_mtd_device(mtd);
+#endif
+ map_destroy(mtd);
+ iounmap(map->virt);
+
+ map->map_priv_1 = 0;
+ map->map_priv_2 = 0;
+ map->virt = NULL;
+}
+
+
+
+
+static int __init mphysmap_init(void)
+{
+ int i;
+ down(&map_mutex);
+ for (i=0;
+ i<sizeof(mphysmap_static_maps)/sizeof(mphysmap_static_maps[0]);
+ i++)
+ {
+ if (strcmp(mphysmap_static_maps[i].name,"")!=0 &&
+ mphysmap_static_maps[i].size!=0 &&
+ mphysmap_static_maps[i].bankwidth!=0)
+ {
+ mphysmap_map_device(&mphysmap_static_maps[i]);
+ };
+ };
+ up(&map_mutex);
+ return 0;
+}
+
+
+static void __exit mphysmap_exit(void)
+{
+ int i;
+ down(&map_mutex);
+ for (i=0;
+ i<sizeof(mphysmap_static_maps)/sizeof(mphysmap_static_maps[0]);
+ i++)
+ {
+ if (strcmp(mphysmap_static_maps[i].name,"")!=0 &&
+ mphysmap_static_maps[i].size!=0 &&
+ mphysmap_static_maps[i].bankwidth!=0)
+ {
+ mphysmap_unmap_device(&mphysmap_static_maps[i]);
+ };
+ };
+ up(&map_mutex);
+}
+
+
+module_init(mphysmap_init);
+module_exit(mphysmap_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jörn Engel <joern@wh.fh-wedelde>");
+MODULE_DESCRIPTION("Generic configurable extensible MTD map driver");
diff --git a/linux-2.4.x/drivers/mtd/maps/mtx-1_flash.c b/linux-2.4.x/drivers/mtd/maps/mtx-1_flash.c
new file mode 100644
index 0000000..d1e66e1
--- /dev/null
+++ b/linux-2.4.x/drivers/mtd/maps/mtx-1_flash.c
@@ -0,0 +1,96 @@
+/*
+ * Flash memory access on 4G Systems MTX-1 boards
+ *
+ * $Id: mtx-1_flash.c,v 1.2 2005/11/07 11:14:27 gleixner Exp $
+ *
+ * (C) 2005 Bruno Randolf <bruno.randolf@4g-systems.biz>
+ * (C) 2005 Jörn Engel <joern@wohnheim.fh-wedel.de>
+ *
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/partitions.h>
+
+#include <asm/io.h>
+
+static struct map_info mtx1_map = {
+ .name = "MTX-1 flash",
+ .bankwidth = 4,
+ .size = 0x2000000,
+ .phys = 0x1E000000,
+};
+
+static struct mtd_partition mtx1_partitions[] = {
+ {
+ .name = "filesystem",
+ .size = 0x01C00000,
+ .offset = 0,
+ },{
+ .name = "yamon",
+ .size = 0x00100000,
+ .offset = MTDPART_OFS_APPEND,
+ .mask_flags = MTD_WRITEABLE,
+ },{
+ .name = "kernel",
+ .size = 0x002c0000,
+ .offset = MTDPART_OFS_APPEND,
+ },{
+ .name = "yamon env",
+ .size = 0x00040000,
+ .offset = MTDPART_OFS_APPEND,
+ }
+};
+
+static struct mtd_info *mtx1_mtd;
+
+int __init mtx1_mtd_init(void)
+{
+ int ret = -ENXIO;
+
+ simple_map_init(&mtx1_map);
+
+ mtx1_map.virt = ioremap(mtx1_map.phys, mtx1_map.size);
+ if (!mtx1_map.virt)
+ return -EIO;
+
+ mtx1_mtd = do_map_probe("cfi_probe", &mtx1_map);
+ if (!mtx1_mtd)
+ goto err;
+
+ mtx1_mtd->owner = THIS_MODULE;
+
+ ret = add_mtd_partitions(mtx1_mtd, mtx1_partitions,
+ ARRAY_SIZE(mtx1_partitions));
+ if (ret)
+ goto err;
+
+ return 0;
+
+err:
+ iounmap(mtx1_map.virt);
+ return ret;
+}
+
+static void __exit mtx1_mtd_cleanup(void)
+{
+ if (mtx1_mtd) {
+ del_mtd_partitions(mtx1_mtd);
+ map_destroy(mtx1_mtd);
+ }
+ if (mtx1_map.virt)
+ iounmap(mtx1_map.virt);
+}
+
+module_init(mtx1_mtd_init);
+module_exit(mtx1_mtd_cleanup);
+
+MODULE_AUTHOR("Bruno Randolf <bruno.randolf@4g-systems.biz>");
+MODULE_DESCRIPTION("MTX-1 flash map");
+MODULE_LICENSE("GPL");
diff --git a/linux-2.4.x/drivers/mtd/maps/netsc520.c b/linux-2.4.x/drivers/mtd/maps/netsc520.c
index e587f82..9b210a3 100644
--- a/linux-2.4.x/drivers/mtd/maps/netsc520.c
+++ b/linux-2.4.x/drivers/mtd/maps/netsc520.c
@@ -3,7 +3,7 @@
* Copyright (C) 2001 Mark Langsdorf (mark.langsdorf@amd.com)
* based on sc520cdp.c by Sysgo Real-Time Solutions GmbH
*
- * $Id: netsc520.c,v 1.5 2001/10/02 15:05:14 dwmw2 Exp $
+ * $Id: netsc520.c,v 1.15 2006/03/29 08:31:11 dwmw2 Exp $
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -27,6 +27,7 @@
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
+#include <linux/init.h>
#include <asm/io.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/map.h>
@@ -37,7 +38,7 @@
** The single, 16 megabyte flash bank is divided into four virtual
** partitions. The first partition is 768 KiB and is intended to
** store the kernel image loaded by the bootstrap loader. The second
-** partition is 256 KiB and holds the BIOS image. The third
+** partition is 256 KiB and holds the BIOS image. The third
** partition is 14.5 MiB and is intended for the flash file system
** image. The last partition is 512 KiB and contains another copy
** of the BIOS image and the reset vector.
@@ -50,110 +51,59 @@
** recoverable afterwards.
*/
-static __u8 netsc520_read8(struct map_info *map, unsigned long ofs)
-{
- return readb(map->map_priv_1 + ofs);
-}
-
-static __u16 netsc520_read16(struct map_info *map, unsigned long ofs)
-{
- return readw(map->map_priv_1 + ofs);
-}
-
-static __u32 netsc520_read32(struct map_info *map, unsigned long ofs)
-{
- return readl(map->map_priv_1 + ofs);
-}
-
-static void netsc520_copy_from(struct map_info *map, void *to, unsigned long from, ssize_t len)
-{
- memcpy_fromio(to, (void *)(map->map_priv_1 + from), len);
-}
-
-static void netsc520_write8(struct map_info *map, __u8 d, unsigned long adr)
-{
- writeb(d, map->map_priv_1 + adr);
-}
-
-static void netsc520_write16(struct map_info *map, __u16 d, unsigned long adr)
-{
- writew(d, map->map_priv_1 + adr);
-}
-
-static void netsc520_write32(struct map_info *map, __u32 d, unsigned long adr)
-{
- writel(d, map->map_priv_1 + adr);
-}
-
-static void netsc520_copy_to(struct map_info *map, unsigned long to, const void *from, ssize_t len)
-{
- memcpy_toio((void *)(map->map_priv_1 + to), from, len);
-}
-
-/* partition_info gives details on the logical partitions that the split the
+/* partition_info gives details on the logical partitions that the split the
* single flash device into. If the size if zero we use up to the end of the
* device. */
static struct mtd_partition partition_info[]={
- {
- name: "NetSc520 boot kernel",
- offset: 0,
- size: 0xc0000
+ {
+ .name = "NetSc520 boot kernel",
+ .offset = 0,
+ .size = 0xc0000
},
- {
- name: "NetSc520 Low BIOS",
- offset: 0xc0000,
- size: 0x40000
+ {
+ .name = "NetSc520 Low BIOS",
+ .offset = 0xc0000,
+ .size = 0x40000
},
- {
- name: "NetSc520 file system",
- offset: 0x100000,
- size: 0xe80000
+ {
+ .name = "NetSc520 file system",
+ .offset = 0x100000,
+ .size = 0xe80000
},
- {
- name: "NetSc520 High BIOS",
- offset: 0xf80000,
- size: 0x80000
+ {
+ .name = "NetSc520 High BIOS",
+ .offset = 0xf80000,
+ .size = 0x80000
},
};
-#define NUM_PARTITIONS (sizeof(partition_info)/sizeof(partition_info[0]))
-
-/*
- * If no idea what is going on here. This is taken from the FlashFX stuff.
- */
-#define ROMCS 1
-
+#define NUM_PARTITIONS ARRAY_SIZE(partition_info)
#define WINDOW_SIZE 0x00100000
#define WINDOW_ADDR 0x00200000
static struct map_info netsc520_map = {
- name: "netsc520 Flash Bank",
- size: WINDOW_SIZE,
- buswidth: 4,
- read8: netsc520_read8,
- read16: netsc520_read16,
- read32: netsc520_read32,
- copy_from: netsc520_copy_from,
- write8: netsc520_write8,
- write16: netsc520_write16,
- write32: netsc520_write32,
- copy_to: netsc520_copy_to,
- map_priv_2: WINDOW_ADDR
+ .name = "netsc520 Flash Bank",
+ .size = WINDOW_SIZE,
+ .bankwidth = 4,
+ .phys = WINDOW_ADDR,
};
-#define NUM_FLASH_BANKS (sizeof(netsc520_map)/sizeof(struct map_info))
+#define NUM_FLASH_BANKS ARRAY_SIZE(netsc520_map)
static struct mtd_info *mymtd;
static int __init init_netsc520(void)
{
- printk(KERN_NOTICE "NetSc520 flash device: %lx at %lx\n", netsc520_map.size, netsc520_map.map_priv_2);
- netsc520_map.map_priv_1 = (unsigned long)ioremap_nocache(netsc520_map.map_priv_2, netsc520_map.size);
+ printk(KERN_NOTICE "NetSc520 flash device: 0x%lx at 0x%lx\n", netsc520_map.size, netsc520_map.phys);
+ netsc520_map.virt = ioremap_nocache(netsc520_map.phys, netsc520_map.size);
- if (!netsc520_map.map_priv_1) {
+ if (!netsc520_map.virt) {
printk("Failed to ioremap_nocache\n");
return -EIO;
}
+
+ simple_map_init(&netsc520_map);
+
mymtd = do_map_probe("cfi_probe", &netsc520_map);
if(!mymtd)
mymtd = do_map_probe("map_ram", &netsc520_map);
@@ -161,11 +111,11 @@ static int __init init_netsc520(void)
mymtd = do_map_probe("map_rom", &netsc520_map);
if (!mymtd) {
- iounmap((void *)netsc520_map.map_priv_1);
+ iounmap(netsc520_map.virt);
return -ENXIO;
}
-
- mymtd->module = THIS_MODULE;
+
+ mymtd->owner = THIS_MODULE;
add_mtd_partitions( mymtd, partition_info, NUM_PARTITIONS );
return 0;
}
@@ -176,9 +126,9 @@ static void __exit cleanup_netsc520(void)
del_mtd_partitions(mymtd);
map_destroy(mymtd);
}
- if (netsc520_map.map_priv_1) {
- iounmap((void *)netsc520_map.map_priv_1);
- netsc520_map.map_priv_1 = 0;
+ if (netsc520_map.virt) {
+ iounmap(netsc520_map.virt);
+ netsc520_map.virt = NULL;
}
}
diff --git a/linux-2.4.x/drivers/mtd/maps/nettel.c b/linux-2.4.x/drivers/mtd/maps/nettel.c
index 799a58f..0db96f2 100644
--- a/linux-2.4.x/drivers/mtd/maps/nettel.c
+++ b/linux-2.4.x/drivers/mtd/maps/nettel.c
@@ -5,6 +5,8 @@
*
* (C) Copyright 2000-2001, Greg Ungerer (gerg@snapgear.com)
* (C) Copyright 2001-2002, SnapGear (www.snapgear.com)
+ *
+ * $Id: nettel.c,v 1.13 2006/03/29 08:31:11 dwmw2 Exp $
*/
/****************************************************************************/
@@ -57,133 +59,76 @@ static struct mtd_info *amd_mtd;
/****************************************************************************/
-static __u8 nettel_read8(struct map_info *map, unsigned long ofs)
-{
- return(readb(map->map_priv_1 + ofs));
-}
-
-static __u16 nettel_read16(struct map_info *map, unsigned long ofs)
-{
- return(readw(map->map_priv_1 + ofs));
-}
-
-static __u32 nettel_read32(struct map_info *map, unsigned long ofs)
-{
- return(readl(map->map_priv_1 + ofs));
-}
-
-static void nettel_copy_from(struct map_info *map, void *to, unsigned long from, ssize_t len)
-{
- memcpy_fromio(to, map->map_priv_1 + from, len);
-}
-
-static void nettel_write8(struct map_info *map, __u8 d, unsigned long adr)
-{
- writeb(d, map->map_priv_1 + adr);
-}
-
-static void nettel_write16(struct map_info *map, __u16 d, unsigned long adr)
-{
- writew(d, map->map_priv_1 + adr);
-}
-
-static void nettel_write32(struct map_info *map, __u32 d, unsigned long adr)
-{
- writel(d, map->map_priv_1 + adr);
-}
-
-static void nettel_copy_to(struct map_info *map, unsigned long to, const void *from, ssize_t len)
-{
- memcpy_toio(map->map_priv_1 + to, from, len);
-}
-
/****************************************************************************/
#ifdef CONFIG_MTD_CFI_INTELEXT
static struct map_info nettel_intel_map = {
- name: "SnapGear Intel",
- size: 0,
- buswidth: INTEL_BUSWIDTH,
- read8: nettel_read8,
- read16: nettel_read16,
- read32: nettel_read32,
- copy_from: nettel_copy_from,
- write8: nettel_write8,
- write16: nettel_write16,
- write32: nettel_write32,
- copy_to: nettel_copy_to
+ .name = "SnapGear Intel",
+ .size = 0,
+ .bankwidth = INTEL_BUSWIDTH,
};
static struct mtd_partition nettel_intel_partitions[] = {
{
- name: "SnapGear kernel",
- offset: 0,
- size: 0x000e0000
+ .name = "SnapGear kernel",
+ .offset = 0,
+ .size = 0x000e0000
},
{
- name: "SnapGear filesystem",
- offset: 0x00100000,
+ .name = "SnapGear filesystem",
+ .offset = 0x00100000,
},
{
- name: "SnapGear config",
- offset: 0x000e0000,
- size: 0x00020000
+ .name = "SnapGear config",
+ .offset = 0x000e0000,
+ .size = 0x00020000
},
{
- name: "SnapGear Intel",
- offset: 0
+ .name = "SnapGear Intel",
+ .offset = 0
},
{
- name: "SnapGear BIOS Config",
- offset: 0x007e0000,
- size: 0x00020000
+ .name = "SnapGear BIOS Config",
+ .offset = 0x007e0000,
+ .size = 0x00020000
},
{
- name: "SnapGear BIOS",
- offset: 0x007e0000,
- size: 0x00020000
+ .name = "SnapGear BIOS",
+ .offset = 0x007e0000,
+ .size = 0x00020000
},
};
#endif
static struct map_info nettel_amd_map = {
- name: "SnapGear AMD",
- size: AMD_WINDOW_MAXSIZE,
- buswidth: AMD_BUSWIDTH,
- read8: nettel_read8,
- read16: nettel_read16,
- read32: nettel_read32,
- copy_from: nettel_copy_from,
- write8: nettel_write8,
- write16: nettel_write16,
- write32: nettel_write32,
- copy_to: nettel_copy_to
+ .name = "SnapGear AMD",
+ .size = AMD_WINDOW_MAXSIZE,
+ .bankwidth = AMD_BUSWIDTH,
};
static struct mtd_partition nettel_amd_partitions[] = {
{
- name: "SnapGear BIOS config",
- offset: 0x000e0000,
- size: 0x00010000
+ .name = "SnapGear BIOS config",
+ .offset = 0x000e0000,
+ .size = 0x00010000
},
{
- name: "SnapGear BIOS",
- offset: 0x000f0000,
- size: 0x00010000
+ .name = "SnapGear BIOS",
+ .offset = 0x000f0000,
+ .size = 0x00010000
},
{
- name: "SnapGear AMD",
- offset: 0
+ .name = "SnapGear AMD",
+ .offset = 0
},
{
- name: "SnapGear high BIOS",
- offset: 0x001f0000,
- size: 0x00010000
+ .name = "SnapGear high BIOS",
+ .offset = 0x001f0000,
+ .size = 0x00010000
}
};
-#define NUM_AMD_PARTITIONS \
- (sizeof(nettel_amd_partitions)/sizeof(nettel_amd_partitions[0]))
+#define NUM_AMD_PARTITIONS ARRAY_SIZE(nettel_amd_partitions)
/****************************************************************************/
@@ -197,7 +142,7 @@ static int nettel_reboot_notifier(struct notifier_block *nb, unsigned long val,
{
struct cfi_private *cfi = nettel_intel_map.fldrv_priv;
unsigned long b;
-
+
/* Make sure all FLASH chips are put back into read mode */
for (b = 0; (b < nettel_intel_partitions[3].size); b += 0x100000) {
cfi_send_gen_cmd(0xff, 0x55, b, &nettel_intel_map, cfi,
@@ -234,7 +179,7 @@ int nettel_eraseconfig(void)
if (mtd) {
nettel_erase.mtd = mtd;
nettel_erase.callback = nettel_erasecallback;
- nettel_erase.callback = 0;
+ nettel_erase.callback = NULL;
nettel_erase.addr = 0;
nettel_erase.len = mtd->size;
nettel_erase.priv = (u_long) &wait_q;
@@ -253,7 +198,7 @@ int nettel_eraseconfig(void)
schedule(); /* Wait for erase to finish. */
remove_wait_queue(&wait_q, &wait);
-
+
put_mtd_device(mtd);
}
@@ -271,11 +216,6 @@ int nettel_eraseconfig(void)
/****************************************************************************/
-#if LINUX_VERSION_CODE < 0x20212 && defined(MODULE)
-#define nettel_init init_module
-#define nettel_cleanup cleanup_module
-#endif
-
int __init nettel_init(void)
{
volatile unsigned long *amdpar;
@@ -331,18 +271,19 @@ int __init nettel_init(void)
*amdpar = SC520_PAR(SC520_PAR_BOOTCS, amdaddr, maxsize);
__asm__ ("wbinvd");
- nettel_amd_map.map_priv_1 = (unsigned long)
- ioremap_nocache(amdaddr, maxsize);
- if (!nettel_amd_map.map_priv_1) {
+ nettel_amd_map.phys = amdaddr;
+ nettel_amd_map.virt = ioremap_nocache(amdaddr, maxsize);
+ if (!nettel_amd_map.virt) {
printk("SNAPGEAR: failed to ioremap() BOOTCS\n");
return(-EIO);
}
+ simple_map_init(&nettel_amd_map);
if ((amd_mtd = do_map_probe("jedec_probe", &nettel_amd_map))) {
printk(KERN_NOTICE "SNAPGEAR: AMD flash device size = %dK\n",
amd_mtd->size>>10);
- amd_mtd->module = THIS_MODULE;
+ amd_mtd->owner = THIS_MODULE;
/* The high BIOS partition is only present for 2MB units */
num_amd_partitions = NUM_AMD_PARTITIONS;
@@ -390,8 +331,8 @@ int __init nettel_init(void)
/* Destroy useless AMD MTD mapping */
amd_mtd = NULL;
- iounmap((void *) nettel_amd_map.map_priv_1);
- nettel_amd_map.map_priv_1 = (unsigned long) NULL;
+ iounmap(nettel_amd_map.virt);
+ nettel_amd_map.virt = NULL;
#else
/* Only AMD flash supported */
return(-ENXIO);
@@ -414,16 +355,17 @@ int __init nettel_init(void)
/* Probe for the the size of the first Intel flash */
nettel_intel_map.size = maxsize;
- nettel_intel_map.map_priv_1 = (unsigned long)
- ioremap_nocache(intel0addr, maxsize);
- if (!nettel_intel_map.map_priv_1) {
+ nettel_intel_map.phys = intel0addr;
+ nettel_intel_map.virt = ioremap_nocache(intel0addr, maxsize);
+ if (!nettel_intel_map.virt) {
printk("SNAPGEAR: failed to ioremap() ROMCS1\n");
return(-EIO);
}
+ simple_map_init(&nettel_intel_map);
intel_mtd = do_map_probe("cfi_probe", &nettel_intel_map);
- if (! intel_mtd) {
- iounmap((void *) nettel_intel_map.map_priv_1);
+ if (!intel_mtd) {
+ iounmap(nettel_intel_map.virt);
return(-ENXIO);
}
@@ -444,19 +386,18 @@ int __init nettel_init(void)
/* Delete the old map and probe again to do both chips */
map_destroy(intel_mtd);
intel_mtd = NULL;
- iounmap((void *) nettel_intel_map.map_priv_1);
+ iounmap(nettel_intel_map.virt);
nettel_intel_map.size = maxsize;
- nettel_intel_map.map_priv_1 = (unsigned long)
- ioremap_nocache(intel0addr, maxsize);
- if (!nettel_intel_map.map_priv_1) {
+ nettel_intel_map.virt = ioremap_nocache(intel0addr, maxsize);
+ if (!nettel_intel_map.virt) {
printk("SNAPGEAR: failed to ioremap() ROMCS1/2\n");
return(-EIO);
}
intel_mtd = do_map_probe("cfi_probe", &nettel_intel_map);
if (! intel_mtd) {
- iounmap((void *) nettel_intel_map.map_priv_1);
+ iounmap((void *) nettel_intel_map.virt);
return(-ENXIO);
}
@@ -471,7 +412,7 @@ int __init nettel_init(void)
printk(KERN_NOTICE "SNAPGEAR: Intel flash device size = %dK\n",
(intel_mtd->size >> 10));
- intel_mtd->module = THIS_MODULE;
+ intel_mtd->owner = THIS_MODULE;
#ifndef CONFIG_BLK_DEV_INITRD
ROOT_DEV = MKDEV(MTD_BLOCK_MAJOR, 1);
@@ -488,7 +429,7 @@ int __init nettel_init(void)
nettel_intel_partitions[1].size = (intel0size + intel1size) -
(1024*1024 + intel_mtd->erasesize);
nettel_intel_partitions[3].size = intel0size + intel1size;
- nettel_intel_partitions[4].offset =
+ nettel_intel_partitions[4].offset =
(intel0size + intel1size) - intel_mtd->erasesize;
nettel_intel_partitions[4].size = intel_mtd->erasesize;
nettel_intel_partitions[5].offset =
@@ -526,18 +467,18 @@ void __exit nettel_cleanup(void)
del_mtd_partitions(amd_mtd);
map_destroy(amd_mtd);
}
- if (nettel_amd_map.map_priv_1) {
- iounmap((void *)nettel_amd_map.map_priv_1);
- nettel_amd_map.map_priv_1 = 0;
+ if (nettel_amd_map.virt) {
+ iounmap(nettel_amd_map.virt);
+ nettel_amd_map.virt = NULL;
}
#ifdef CONFIG_MTD_CFI_INTELEXT
if (intel_mtd) {
del_mtd_partitions(intel_mtd);
map_destroy(intel_mtd);
}
- if (nettel_intel_map.map_priv_1) {
- iounmap((void *)nettel_intel_map.map_priv_1);
- nettel_intel_map.map_priv_1 = 0;
+ if (nettel_intel_map.virt) {
+ iounmap(nettel_intel_map.virt);
+ nettel_intel_map.virt = NULL;
}
#endif
}
diff --git a/linux-2.4.x/drivers/mtd/maps/ocelot.c b/linux-2.4.x/drivers/mtd/maps/ocelot.c
index ceec5fb..6977963 100644
--- a/linux-2.4.x/drivers/mtd/maps/ocelot.c
+++ b/linux-2.4.x/drivers/mtd/maps/ocelot.c
@@ -1,5 +1,5 @@
/*
- * $Id: ocelot.c,v 1.6 2001/10/02 15:05:14 dwmw2 Exp $
+ * $Id: ocelot.c,v 1.17 2005/11/07 11:14:27 gleixner Exp $
*
* Flash on Momenco Ocelot
*/
@@ -7,6 +7,7 @@
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
+#include <linux/init.h>
#include <asm/io.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/map.h>
@@ -20,47 +21,23 @@
#define NVRAM_WINDOW_SIZE 0x00007FF0
#define NVRAM_BUSWIDTH 1
-extern int parse_redboot_partitions(struct mtd_info *master, struct mtd_partition **pparts);
-
static unsigned int cacheflush = 0;
static struct mtd_info *flash_mtd;
static struct mtd_info *nvram_mtd;
-__u8 ocelot_read8(struct map_info *map, unsigned long ofs)
+static void ocelot_ram_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, const u_char *buf)
{
- return __raw_readb(map->map_priv_1 + ofs);
-}
-
-void ocelot_write8(struct map_info *map, __u8 d, unsigned long adr)
-{
- cacheflush = 1;
- __raw_writeb(d, map->map_priv_1 + adr);
- mb();
-}
-
-void ocelot_copy_from_cache(struct map_info *map, void *to, unsigned long from, ssize_t len)
-{
- if (cacheflush) {
- dma_cache_inv(map->map_priv_2, map->size);
- cacheflush = 0;
- }
- memcpy_fromio(to, map->map_priv_1 + from, len);
-}
+ struct map_info *map = mtd->priv;
+ size_t done = 0;
-void ocelot_copy_from(struct map_info *map, void *to, unsigned long from, ssize_t len)
-{
- memcpy_fromio(to, map->map_priv_1 + from, len);
-}
-
-void ocelot_copy_to(struct map_info *map, unsigned long to, const void *from, ssize_t len)
-{
- /* If we use memcpy, it does word-wide writes. Even though we told the
+ /* If we use memcpy, it does word-wide writes. Even though we told the
GT64120A that it's an 8-bit wide region, word-wide writes don't work.
We end up just writing the first byte of the four to all four bytes.
So we have this loop instead */
+ *retlen = len;
while(len) {
- __raw_writeb(*(unsigned char *) from, map->map_priv_1 + to);
+ __raw_writeb(*(unsigned char *) from, map->virt + to);
from++;
to++;
len--;
@@ -70,31 +47,28 @@ void ocelot_copy_to(struct map_info *map, unsigned long to, const void *from, ss
static struct mtd_partition *parsed_parts;
struct map_info ocelot_flash_map = {
- name: "Ocelot boot flash",
- size: FLASH_WINDOW_SIZE,
- buswidth: FLASH_BUSWIDTH,
- read8: ocelot_read8,
- copy_from: ocelot_copy_from_cache,
- write8: ocelot_write8,
+ .name = "Ocelot boot flash",
+ .size = FLASH_WINDOW_SIZE,
+ .bankwidth = FLASH_BUSWIDTH,
+ .phys = FLASH_WINDOW_ADDR,
};
struct map_info ocelot_nvram_map = {
- name: "Ocelot NVRAM",
- size: NVRAM_WINDOW_SIZE,
- buswidth: NVRAM_BUSWIDTH,
- read8: ocelot_read8,
- copy_from: ocelot_copy_from,
- write8: ocelot_write8,
- copy_to: ocelot_copy_to
+ .name = "Ocelot NVRAM",
+ .size = NVRAM_WINDOW_SIZE,
+ .bankwidth = NVRAM_BUSWIDTH,
+ .phys = NVRAM_WINDOW_ADDR,
};
+static const char *probes[] = { "RedBoot", NULL };
+
static int __init init_ocelot_maps(void)
{
void *pld;
int nr_parts;
unsigned char brd_status;
- printk(KERN_INFO "Momenco Ocelot MTD mappings: Flash 0x%x at 0x%x, NVRAM 0x%x at 0x%x\n",
+ printk(KERN_INFO "Momenco Ocelot MTD mappings: Flash 0x%x at 0x%x, NVRAM 0x%x at 0x%x\n",
FLASH_WINDOW_SIZE, FLASH_WINDOW_ADDR, NVRAM_WINDOW_SIZE, NVRAM_WINDOW_ADDR);
/* First check whether the flash jumper is present */
@@ -107,12 +81,13 @@ static int __init init_ocelot_maps(void)
iounmap(pld);
/* Now ioremap the NVRAM space */
- ocelot_nvram_map.map_priv_1 = (unsigned long)ioremap_nocache(NVRAM_WINDOW_ADDR, NVRAM_WINDOW_SIZE);
- if (!ocelot_nvram_map.map_priv_1) {
+ ocelot_nvram_map.virt = ioremap_nocache(NVRAM_WINDOW_ADDR, NVRAM_WINDOW_SIZE);
+ if (!ocelot_nvram_map.virt) {
printk(KERN_NOTICE "Failed to ioremap Ocelot NVRAM space\n");
return -EIO;
}
- // ocelot_nvram_map.map_priv_2 = ocelot_nvram_map.map_priv_1;
+
+ simple_map_init(&ocelot_nvram_map);
/* And do the RAM probe on it to get an MTD device */
nvram_mtd = do_map_probe("map_ram", &ocelot_nvram_map);
@@ -120,22 +95,21 @@ static int __init init_ocelot_maps(void)
printk("NVRAM probe failed\n");
goto fail_1;
}
- nvram_mtd->module = THIS_MODULE;
+ nvram_mtd->owner = THIS_MODULE;
nvram_mtd->erasesize = 16;
+ /* Override the write() method */
+ nvram_mtd->write = ocelot_ram_write;
/* Now map the flash space */
- ocelot_flash_map.map_priv_1 = (unsigned long)ioremap_nocache(FLASH_WINDOW_ADDR, FLASH_WINDOW_SIZE);
- if (!ocelot_flash_map.map_priv_1) {
+ ocelot_flash_map.virt = ioremap_nocache(FLASH_WINDOW_ADDR, FLASH_WINDOW_SIZE);
+ if (!ocelot_flash_map.virt) {
printk(KERN_NOTICE "Failed to ioremap Ocelot flash space\n");
goto fail_2;
}
/* Now the cached version */
- ocelot_flash_map.map_priv_2 = (unsigned long)__ioremap(FLASH_WINDOW_ADDR, FLASH_WINDOW_SIZE, 0);
+ ocelot_flash_map.cached = (unsigned long)__ioremap(FLASH_WINDOW_ADDR, FLASH_WINDOW_SIZE, 0);
- if (!ocelot_flash_map.map_priv_2) {
- /* Doesn't matter if it failed. Just use the uncached version */
- ocelot_flash_map.map_priv_2 = ocelot_flash_map.map_priv_1;
- }
+ simple_map_init(&ocelot_flash_map);
/* Only probe for flash if the write jumper is present */
if (brd_status & 0x40) {
@@ -155,25 +129,24 @@ static int __init init_ocelot_maps(void)
add_mtd_device(nvram_mtd);
- flash_mtd->module = THIS_MODULE;
- nr_parts = parse_redboot_partitions(flash_mtd, &parsed_parts);
+ flash_mtd->owner = THIS_MODULE;
+ nr_parts = parse_mtd_partitions(flash_mtd, probes, &parsed_parts, 0);
- if (nr_parts)
+ if (nr_parts > 0)
add_mtd_partitions(flash_mtd, parsed_parts, nr_parts);
else
add_mtd_device(flash_mtd);
return 0;
-
- fail3:
- iounmap((void *)ocelot_flash_map.map_priv_1);
- if (ocelot_flash_map.map_priv_2 &&
- ocelot_flash_map.map_priv_2 != ocelot_flash_map.map_priv_1)
- iounmap((void *)ocelot_flash_map.map_priv_2);
+
+ fail3:
+ iounmap((void *)ocelot_flash_map.virt);
+ if (ocelot_flash_map.cached)
+ iounmap((void *)ocelot_flash_map.cached);
fail_2:
map_destroy(nvram_mtd);
fail_1:
- iounmap((void *)ocelot_nvram_map.map_priv_1);
+ iounmap((void *)ocelot_nvram_map.virt);
return -ENXIO;
}
@@ -182,16 +155,16 @@ static void __exit cleanup_ocelot_maps(void)
{
del_mtd_device(nvram_mtd);
map_destroy(nvram_mtd);
- iounmap((void *)ocelot_nvram_map.map_priv_1);
+ iounmap((void *)ocelot_nvram_map.virt);
if (parsed_parts)
del_mtd_partitions(flash_mtd);
else
del_mtd_device(flash_mtd);
map_destroy(flash_mtd);
- iounmap((void *)ocelot_flash_map.map_priv_1);
- if (ocelot_flash_map.map_priv_2 != ocelot_flash_map.map_priv_1)
- iounmap((void *)ocelot_flash_map.map_priv_2);
+ iounmap((void *)ocelot_flash_map.virt);
+ if (ocelot_flash_map.cached)
+ iounmap((void *)ocelot_flash_map.cached);
}
module_init(init_ocelot_maps);
diff --git a/linux-2.4.x/drivers/mtd/maps/ocotea.c b/linux-2.4.x/drivers/mtd/maps/ocotea.c
new file mode 100644
index 0000000..a21fcd1
--- /dev/null
+++ b/linux-2.4.x/drivers/mtd/maps/ocotea.c
@@ -0,0 +1,151 @@
+/*
+ * Mapping for Ocotea user flash
+ *
+ * Matt Porter <mporter@kernel.crashing.org>
+ *
+ * Copyright 2002-2004 MontaVista Software Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/partitions.h>
+#include <linux/config.h>
+#include <asm/io.h>
+#include <asm/ibm44x.h>
+#include <platforms/4xx/ocotea.h>
+
+static struct mtd_info *flash;
+
+static struct map_info ocotea_small_map = {
+ .name = "Ocotea small flash",
+ .size = OCOTEA_SMALL_FLASH_SIZE,
+ .buswidth = 1,
+};
+
+static struct map_info ocotea_large_map = {
+ .name = "Ocotea large flash",
+ .size = OCOTEA_LARGE_FLASH_SIZE,
+ .buswidth = 1,
+};
+
+static struct mtd_partition ocotea_small_partitions[] = {
+ {
+ .name = "pibs",
+ .offset = 0x0,
+ .size = 0x100000,
+ }
+};
+
+static struct mtd_partition ocotea_large_partitions[] = {
+ {
+ .name = "fs",
+ .offset = 0,
+ .size = 0x300000,
+ },
+ {
+ .name = "firmware",
+ .offset = 0x300000,
+ .size = 0x100000,
+ }
+};
+
+int __init init_ocotea(void)
+{
+ u8 fpga0_reg;
+ u8 *fpga0_adr;
+ unsigned long long small_flash_base, large_flash_base;
+
+ fpga0_adr = ioremap64(OCOTEA_FPGA_ADDR, 16);
+ if (!fpga0_adr)
+ return -ENOMEM;
+
+ fpga0_reg = readb((unsigned long)fpga0_adr);
+ iounmap(fpga0_adr);
+
+ if (OCOTEA_BOOT_LARGE_FLASH(fpga0_reg)) {
+ small_flash_base = OCOTEA_SMALL_FLASH_HIGH;
+ large_flash_base = OCOTEA_LARGE_FLASH_LOW;
+ }
+ else {
+ small_flash_base = OCOTEA_SMALL_FLASH_LOW;
+ large_flash_base = OCOTEA_LARGE_FLASH_HIGH;
+ }
+
+ ocotea_small_map.phys = small_flash_base;
+ ocotea_small_map.virt = ioremap64(small_flash_base,
+ ocotea_small_map.size);
+
+ if (!ocotea_small_map.virt) {
+ printk("Failed to ioremap flash\n");
+ return -EIO;
+ }
+
+ simple_map_init(&ocotea_small_map);
+
+ flash = do_map_probe("map_rom", &ocotea_small_map);
+ if (flash) {
+ flash->owner = THIS_MODULE;
+ add_mtd_partitions(flash, ocotea_small_partitions,
+ ARRAY_SIZE(ocotea_small_partitions));
+ } else {
+ printk("map probe failed for flash\n");
+ return -ENXIO;
+ }
+
+ ocotea_large_map.phys = large_flash_base;
+ ocotea_large_map.virt = ioremap64(large_flash_base,
+ ocotea_large_map.size);
+
+ if (!ocotea_large_map.virt) {
+ printk("Failed to ioremap flash\n");
+ return -EIO;
+ }
+
+ simple_map_init(&ocotea_large_map);
+
+ flash = do_map_probe("cfi_probe", &ocotea_large_map);
+ if (flash) {
+ flash->owner = THIS_MODULE;
+ add_mtd_partitions(flash, ocotea_large_partitions,
+ ARRAY_SIZE(ocotea_large_partitions));
+ } else {
+ printk("map probe failed for flash\n");
+ return -ENXIO;
+ }
+
+ return 0;
+}
+
+static void __exit cleanup_ocotea(void)
+{
+ if (flash) {
+ del_mtd_partitions(flash);
+ map_destroy(flash);
+ }
+
+ if (ocotea_small_map.virt) {
+ iounmap((void *)ocotea_small_map.virt);
+ ocotea_small_map.virt = 0;
+ }
+
+ if (ocotea_large_map.virt) {
+ iounmap((void *)ocotea_large_map.virt);
+ ocotea_large_map.virt = 0;
+ }
+}
+
+module_init(init_ocotea);
+module_exit(cleanup_ocotea);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Matt Porter <mporter@kernel.crashing.org>");
+MODULE_DESCRIPTION("MTD map and partitions for IBM 440GX Ocotea boards");
diff --git a/linux-2.4.x/drivers/mtd/maps/octagon-5066.c b/linux-2.4.x/drivers/mtd/maps/octagon-5066.c
index 16cfd9a..a6642db 100644
--- a/linux-2.4.x/drivers/mtd/maps/octagon-5066.c
+++ b/linux-2.4.x/drivers/mtd/maps/octagon-5066.c
@@ -1,12 +1,12 @@
-// $Id: octagon-5066.c,v 1.19 2001/10/02 15:05:14 dwmw2 Exp $
+// $Id: octagon-5066.c,v 1.28 2005/11/07 11:14:27 gleixner Exp $
/* ######################################################################
- Octagon 5066 MTD Driver.
-
+ Octagon 5066 MTD Driver.
+
The Octagon 5066 is a SBC based on AMD's 586-WB running at 133 MHZ. It
comes with a builtin AMD 29F016 flash chip and a socketed EEPROM that
is replacable by flash. Both units are mapped through a multiplexer
- into a 32k memory window at 0xe8000. The control register for the
+ into a 32k memory window at 0xe8000. The control register for the
multiplexing unit is located at IO 0x208 with a bit map of
0-5 Page Selection in 32k increments
6-7 Device selection:
@@ -14,14 +14,14 @@
01 SSD 0 (Socket)
10 SSD 1 (Flash chip)
11 undefined
-
+
On each SSD, the first 128k is reserved for use by the bios
- (actually it IS the bios..) This only matters if you are booting off the
+ (actually it IS the bios..) This only matters if you are booting off the
flash, you must not put a file system starting there.
-
+
The driver tries to do a detection algorithm to guess what sort of devices
are plugged into the sockets.
-
+
##################################################################### */
#include <linux/module.h>
@@ -31,6 +31,7 @@
#include <asm/io.h>
#include <linux/mtd/map.h>
+#include <linux/mtd/mtd.h>
#define WINDOW_START 0xe8000
#define WINDOW_LENGTH 0x8000
@@ -40,7 +41,7 @@
static volatile char page_n_dev = 0;
static unsigned long iomapadr;
-static spinlock_t oct5066_spin = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(oct5066_spin);
/*
* We use map_priv_1 to identify which device we are.
@@ -55,38 +56,18 @@ static void __oct5066_page(struct map_info *map, __u8 byte)
static inline void oct5066_page(struct map_info *map, unsigned long ofs)
{
__u8 byte = map->map_priv_1 | (ofs >> WINDOW_SHIFT);
-
+
if (page_n_dev != byte)
__oct5066_page(map, byte);
}
-static __u8 oct5066_read8(struct map_info *map, unsigned long ofs)
-{
- __u8 ret;
- spin_lock(&oct5066_spin);
- oct5066_page(map, ofs);
- ret = readb(iomapadr + (ofs & WINDOW_MASK));
- spin_unlock(&oct5066_spin);
- return ret;
-}
-
-static __u16 oct5066_read16(struct map_info *map, unsigned long ofs)
+static map_word oct5066_read8(struct map_info *map, unsigned long ofs)
{
- __u16 ret;
+ map_word ret;
spin_lock(&oct5066_spin);
oct5066_page(map, ofs);
- ret = readw(iomapadr + (ofs & WINDOW_MASK));
- spin_unlock(&oct5066_spin);
- return ret;
-}
-
-static __u32 oct5066_read32(struct map_info *map, unsigned long ofs)
-{
- __u32 ret;
- spin_lock(&oct5066_spin);
- oct5066_page(map, ofs);
- ret = readl(iomapadr + (ofs & WINDOW_MASK));
+ ret.x[0] = readb(iomapadr + (ofs & WINDOW_MASK));
spin_unlock(&oct5066_spin);
return ret;
}
@@ -97,7 +78,7 @@ static void oct5066_copy_from(struct map_info *map, void *to, unsigned long from
unsigned long thislen = len;
if (len > (WINDOW_LENGTH - (from & WINDOW_MASK)))
thislen = WINDOW_LENGTH-(from & WINDOW_MASK);
-
+
spin_lock(&oct5066_spin);
oct5066_page(map, from);
memcpy_fromio(to, iomapadr + from, thislen);
@@ -108,27 +89,11 @@ static void oct5066_copy_from(struct map_info *map, void *to, unsigned long from
}
}
-static void oct5066_write8(struct map_info *map, __u8 d, unsigned long adr)
-{
- spin_lock(&oct5066_spin);
- oct5066_page(map, adr);
- writeb(d, iomapadr + (adr & WINDOW_MASK));
- spin_unlock(&oct5066_spin);
-}
-
-static void oct5066_write16(struct map_info *map, __u16 d, unsigned long adr)
-{
- spin_lock(&oct5066_spin);
- oct5066_page(map, adr);
- writew(d, iomapadr + (adr & WINDOW_MASK));
- spin_unlock(&oct5066_spin);
-}
-
-static void oct5066_write32(struct map_info *map, __u32 d, unsigned long adr)
+static void oct5066_write8(struct map_info *map, map_word d, unsigned long adr)
{
spin_lock(&oct5066_spin);
oct5066_page(map, adr);
- writel(d, iomapadr + (adr & WINDOW_MASK));
+ writeb(d.x[0], iomapadr + (adr & WINDOW_MASK));
spin_unlock(&oct5066_spin);
}
@@ -138,7 +103,7 @@ static void oct5066_copy_to(struct map_info *map, unsigned long to, const void *
unsigned long thislen = len;
if (len > (WINDOW_LENGTH - (to & WINDOW_MASK)))
thislen = WINDOW_LENGTH-(to & WINDOW_MASK);
-
+
spin_lock(&oct5066_spin);
oct5066_page(map, to);
memcpy_toio(iomapadr + to, from, thislen);
@@ -151,32 +116,26 @@ static void oct5066_copy_to(struct map_info *map, unsigned long to, const void *
static struct map_info oct5066_map[2] = {
{
- name: "Octagon 5066 Socket",
- size: 512 * 1024,
- buswidth: 1,
- read8: oct5066_read8,
- read16: oct5066_read16,
- read32: oct5066_read32,
- copy_from: oct5066_copy_from,
- write8: oct5066_write8,
- write16: oct5066_write16,
- write32: oct5066_write32,
- copy_to: oct5066_copy_to,
- map_priv_1: 1<<6
+ .name = "Octagon 5066 Socket",
+ .phys = NO_XIP,
+ .size = 512 * 1024,
+ .bankwidth = 1,
+ .read = oct5066_read8,
+ .copy_from = oct5066_copy_from,
+ .write = oct5066_write8,
+ .copy_to = oct5066_copy_to,
+ .map_priv_1 = 1<<6
},
{
- name: "Octagon 5066 Internal Flash",
- size: 2 * 1024 * 1024,
- buswidth: 1,
- read8: oct5066_read8,
- read16: oct5066_read16,
- read32: oct5066_read32,
- copy_from: oct5066_copy_from,
- write8: oct5066_write8,
- write16: oct5066_write16,
- write32: oct5066_write32,
- copy_to: oct5066_copy_to,
- map_priv_1: 2<<6
+ .name = "Octagon 5066 Internal Flash",
+ .phys = NO_XIP,
+ .size = 2 * 1024 * 1024,
+ .bankwidth = 1,
+ .read = oct5066_read8,
+ .copy_from = oct5066_copy_from,
+ .write = oct5066_write8,
+ .copy_to = oct5066_copy_to,
+ .map_priv_1 = 2<<6
}
};
@@ -185,7 +144,7 @@ static struct mtd_info *oct5066_mtd[2] = {NULL, NULL};
// OctProbe - Sense if this is an octagon card
// ---------------------------------------------------------------------
/* Perform a simple validity test, we map the window select SSD0 and
- change pages while monitoring the window. A change in the window,
+ change pages while monitoring the window. A change in the window,
controlled by the PAGE_IO port is a functioning 5066 board. This will
fail if the thing in the socket is set to a uniform value. */
static int __init OctProbe(void)
@@ -202,13 +161,13 @@ static int __init OctProbe(void)
Values[I%10] = readl(iomapadr);
if (I > 0 && Values[I%10] == Values[0])
return -EAGAIN;
- }
+ }
else
{
// Make sure we get the same values on the second pass
if (Values[I%10] != readl(iomapadr))
return -EAGAIN;
- }
+ }
}
return 0;
}
@@ -223,37 +182,36 @@ void cleanup_oct5066(void)
}
}
iounmap((void *)iomapadr);
- release_region(PAGE_IO,1);
+ release_region(PAGE_IO, 1);
}
int __init init_oct5066(void)
{
int i;
-
+ int ret = 0;
+
// Do an autoprobe sequence
- if (check_region(PAGE_IO,1) != 0)
- {
- printk("5066: Page Register in Use\n");
- return -EAGAIN;
- }
+ if (!request_region(PAGE_IO,1,"Octagon SSD")) {
+ printk(KERN_NOTICE "5066: Page Register in Use\n");
+ return -EAGAIN;
+ }
iomapadr = (unsigned long)ioremap(WINDOW_START, WINDOW_LENGTH);
if (!iomapadr) {
- printk("Failed to ioremap memory region\n");
- return -EIO;
+ printk(KERN_NOTICE "Failed to ioremap memory region\n");
+ ret = -EIO;
+ goto out_rel;
}
- if (OctProbe() != 0)
- {
- printk("5066: Octagon Probe Failed, is this an Octagon 5066 SBC?\n");
- iounmap((void *)iomapadr);
- return -EAGAIN;
- }
-
- request_region(PAGE_IO,1,"Octagon SSD");
-
+ if (OctProbe() != 0) {
+ printk(KERN_NOTICE "5066: Octagon Probe Failed, is this an Octagon 5066 SBC?\n");
+ iounmap((void *)iomapadr);
+ ret = -EAGAIN;
+ goto out_unmap;
+ }
+
// Print out our little header..
printk("Octagon 5066 SSD IO:0x%x MEM:0x%x-0x%x\n",PAGE_IO,WINDOW_START,
WINDOW_START+WINDOW_LENGTH);
-
+
for (i=0; i<2; i++) {
oct5066_mtd[i] = do_map_probe("cfi_probe", &oct5066_map[i]);
if (!oct5066_mtd[i])
@@ -263,17 +221,23 @@ int __init init_oct5066(void)
if (!oct5066_mtd[i])
oct5066_mtd[i] = do_map_probe("map_rom", &oct5066_map[i]);
if (oct5066_mtd[i]) {
- oct5066_mtd[i]->module = THIS_MODULE;
+ oct5066_mtd[i]->owner = THIS_MODULE;
add_mtd_device(oct5066_mtd[i]);
}
}
-
+
if (!oct5066_mtd[0] && !oct5066_mtd[1]) {
cleanup_oct5066();
return -ENXIO;
- }
-
+ }
+
return 0;
+
+ out_unmap:
+ iounmap((void *)iomapadr);
+ out_rel:
+ release_region(PAGE_IO, 1);
+ return ret;
}
module_init(init_oct5066);
diff --git a/linux-2.4.x/drivers/mtd/maps/omap-toto-flash.c b/linux-2.4.x/drivers/mtd/maps/omap-toto-flash.c
new file mode 100644
index 0000000..e655c08
--- /dev/null
+++ b/linux-2.4.x/drivers/mtd/maps/omap-toto-flash.c
@@ -0,0 +1,136 @@
+/*
+ * NOR Flash memory access on TI Toto board
+ *
+ * jzhang@ti.com (C) 2003 Texas Instruments.
+ *
+ * (C) 2002 MontVista Software, Inc.
+ *
+ * $Id: omap-toto-flash.c,v 1.6 2005/11/29 20:01:28 gleixner Exp $
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/partitions.h>
+
+#include <asm/hardware.h>
+#include <asm/io.h>
+
+
+#ifndef CONFIG_ARCH_OMAP
+#error This is for OMAP architecture only
+#endif
+
+//these lines need be moved to a hardware header file
+#define OMAP_TOTO_FLASH_BASE 0xd8000000
+#define OMAP_TOTO_FLASH_SIZE 0x80000
+
+static struct map_info omap_toto_map_flash = {
+ .name = "OMAP Toto flash",
+ .bankwidth = 2,
+ .virt = (void __iomem *)OMAP_TOTO_FLASH_BASE,
+};
+
+
+static struct mtd_partition toto_flash_partitions[] = {
+ {
+ .name = "BootLoader",
+ .size = 0x00040000, /* hopefully u-boot will stay 128k + 128*/
+ .offset = 0,
+ .mask_flags = MTD_WRITEABLE, /* force read-only */
+ }, {
+ .name = "ReservedSpace",
+ .size = 0x00030000,
+ .offset = MTDPART_OFS_APPEND,
+ //mask_flags: MTD_WRITEABLE, /* force read-only */
+ }, {
+ .name = "EnvArea", /* bottom 64KiB for env vars */
+ .size = MTDPART_SIZ_FULL,
+ .offset = MTDPART_OFS_APPEND,
+ }
+};
+
+static struct mtd_partition *parsed_parts;
+
+static struct mtd_info *flash_mtd;
+
+static int __init init_flash (void)
+{
+
+ struct mtd_partition *parts;
+ int nb_parts = 0;
+ int parsed_nr_parts = 0;
+ const char *part_type;
+
+ /*
+ * Static partition definition selection
+ */
+ part_type = "static";
+
+ parts = toto_flash_partitions;
+ nb_parts = ARRAY_SIZE(toto_flash_partitions);
+ omap_toto_map_flash.size = OMAP_TOTO_FLASH_SIZE;
+ omap_toto_map_flash.phys = virt_to_phys(OMAP_TOTO_FLASH_BASE);
+
+ simple_map_init(&omap_toto_map_flash);
+ /*
+ * Now let's probe for the actual flash. Do it here since
+ * specific machine settings might have been set above.
+ */
+ printk(KERN_NOTICE "OMAP toto flash: probing %d-bit flash bus\n",
+ omap_toto_map_flash.bankwidth*8);
+ flash_mtd = do_map_probe("jedec_probe", &omap_toto_map_flash);
+ if (!flash_mtd)
+ return -ENXIO;
+
+ if (parsed_nr_parts > 0) {
+ parts = parsed_parts;
+ nb_parts = parsed_nr_parts;
+ }
+
+ if (nb_parts == 0) {
+ printk(KERN_NOTICE "OMAP toto flash: no partition info available,"
+ "registering whole flash at once\n");
+ if (add_mtd_device(flash_mtd)){
+ return -ENXIO;
+ }
+ } else {
+ printk(KERN_NOTICE "Using %s partition definition\n",
+ part_type);
+ return add_mtd_partitions(flash_mtd, parts, nb_parts);
+ }
+ return 0;
+}
+
+int __init omap_toto_mtd_init(void)
+{
+ int status;
+
+ if (status = init_flash()) {
+ printk(KERN_ERR "OMAP Toto Flash: unable to init map for toto flash\n");
+ }
+ return status;
+}
+
+static void __exit omap_toto_mtd_cleanup(void)
+{
+ if (flash_mtd) {
+ del_mtd_partitions(flash_mtd);
+ map_destroy(flash_mtd);
+ kfree(parsed_parts);
+ }
+}
+
+module_init(omap_toto_mtd_init);
+module_exit(omap_toto_mtd_cleanup);
+
+MODULE_AUTHOR("Jian Zhang");
+MODULE_DESCRIPTION("OMAP Toto board map driver");
+MODULE_LICENSE("GPL");
diff --git a/linux-2.4.x/drivers/mtd/maps/omap_nor.c b/linux-2.4.x/drivers/mtd/maps/omap_nor.c
new file mode 100644
index 0000000..418afff
--- /dev/null
+++ b/linux-2.4.x/drivers/mtd/maps/omap_nor.c
@@ -0,0 +1,179 @@
+/*
+ * Flash memory support for various TI OMAP boards
+ *
+ * Copyright (C) 2001-2002 MontaVista Software Inc.
+ * Copyright (C) 2003-2004 Texas Instruments
+ * Copyright (C) 2004 Nokia Corporation
+ *
+ * Assembled using driver code copyright the companies above
+ * and written by David Brownell, Jian Zhang <jzhang@ti.com>,
+ * Tony Lindgren <tony@atomide.com> and others.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
+ * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/partitions.h>
+
+#include <asm/io.h>
+#include <asm/hardware.h>
+#include <asm/mach/flash.h>
+#include <asm/arch/tc.h>
+
+#ifdef CONFIG_MTD_PARTITIONS
+static const char *part_probes[] = { /* "RedBoot", */ "cmdlinepart", NULL };
+#endif
+
+struct omapflash_info {
+ struct mtd_partition *parts;
+ struct mtd_info *mtd;
+ struct map_info map;
+};
+
+static void omap_set_vpp(struct map_info *map, int enable)
+{
+ static int count;
+
+ if (enable) {
+ if (count++ == 0)
+ OMAP_EMIFS_CONFIG_REG |= OMAP_EMIFS_CONFIG_WP;
+ } else {
+ if (count && (--count == 0))
+ OMAP_EMIFS_CONFIG_REG &= ~OMAP_EMIFS_CONFIG_WP;
+ }
+}
+
+static int __devinit omapflash_probe(struct platform_device *pdev)
+{
+ int err;
+ struct omapflash_info *info;
+ struct flash_platform_data *pdata = pdev->dev.platform_data;
+ struct resource *res = pdev->resource;
+ unsigned long size = res->end - res->start + 1;
+
+ info = kmalloc(sizeof(struct omapflash_info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ memset(info, 0, sizeof(struct omapflash_info));
+
+ if (!request_mem_region(res->start, size, "flash")) {
+ err = -EBUSY;
+ goto out_free_info;
+ }
+
+ info->map.virt = ioremap(res->start, size);
+ if (!info->map.virt) {
+ err = -ENOMEM;
+ goto out_release_mem_region;
+ }
+ info->map.name = pdev->dev.bus_id;
+ info->map.phys = res->start;
+ info->map.size = size;
+ info->map.bankwidth = pdata->width;
+ info->map.set_vpp = omap_set_vpp;
+
+ simple_map_init(&info->map);
+ info->mtd = do_map_probe(pdata->map_name, &info->map);
+ if (!info->mtd) {
+ err = -EIO;
+ goto out_iounmap;
+ }
+ info->mtd->owner = THIS_MODULE;
+
+#ifdef CONFIG_MTD_PARTITIONS
+ err = parse_mtd_partitions(info->mtd, part_probes, &info->parts, 0);
+ if (err > 0)
+ add_mtd_partitions(info->mtd, info->parts, err);
+ else if (err < 0 && pdata->parts)
+ add_mtd_partitions(info->mtd, pdata->parts, pdata->nr_parts);
+ else
+#endif
+ add_mtd_device(info->mtd);
+
+ platform_set_drvdata(pdev, info);
+
+ return 0;
+
+out_iounmap:
+ iounmap(info->map.virt);
+out_release_mem_region:
+ release_mem_region(res->start, size);
+out_free_info:
+ kfree(info);
+
+ return err;
+}
+
+static int __devexit omapflash_remove(struct platform_device *pdev)
+{
+ struct omapflash_info *info = platform_get_drvdata(pdev);
+
+ platform_set_drvdata(pdev, NULL);
+
+ if (info) {
+ if (info->parts) {
+ del_mtd_partitions(info->mtd);
+ kfree(info->parts);
+ } else
+ del_mtd_device(info->mtd);
+ map_destroy(info->mtd);
+ release_mem_region(info->map.phys, info->map.size);
+ iounmap((void __iomem *) info->map.virt);
+ kfree(info);
+ }
+
+ return 0;
+}
+
+static struct platform_driver omapflash_driver = {
+ .probe = omapflash_probe,
+ .remove = __devexit_p(omapflash_remove),
+ .driver = {
+ .name = "omapflash",
+ },
+};
+
+static int __init omapflash_init(void)
+{
+ return platform_driver_register(&omapflash_driver);
+}
+
+static void __exit omapflash_exit(void)
+{
+ platform_driver_unregister(&omapflash_driver);
+}
+
+module_init(omapflash_init);
+module_exit(omapflash_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("MTD NOR map driver for TI OMAP boards");
+
diff --git a/linux-2.4.x/drivers/mtd/maps/p3p440.c b/linux-2.4.x/drivers/mtd/maps/p3p440.c
new file mode 100644
index 0000000..ea63527
--- /dev/null
+++ b/linux-2.4.x/drivers/mtd/maps/p3p440.c
@@ -0,0 +1,133 @@
+/*
+ * $Id: p3p440.c,v 1.2 2005/11/29 14:34:38 gleixner Exp $
+ *
+ * drivers/mtd/maps/p3p440.c
+ *
+ * Mapping for Prodrive P3P440 flash
+ *
+ * Copyright (c) 2005 DENX Software Engineering
+ * Stefan Roese <sr@denx.de>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/partitions.h>
+
+#include <asm/io.h>
+#include <asm/ibm4xx.h>
+#include <asm/ppcboot.h>
+
+extern bd_t __res;
+
+#define RW_PART0_OF 0
+#define RW_PART0_SZ 0x180000
+#define RW_PART1_SZ 0x280000
+/* Partition 2 will be autosized dynamically... */
+#define RW_PART3_SZ 0x40000
+#define RW_PART4_SZ 0x40000
+
+static struct mtd_partition p3p440_flash_partitions[] = {
+ {
+ .name = "kernel",
+ .offset = RW_PART0_OF,
+ .size = RW_PART0_SZ
+ },
+ {
+ .name = "root",
+ .offset = MTDPART_OFS_APPEND,
+ .size = RW_PART1_SZ,
+ },
+ {
+ .name = "user",
+ .offset = MTDPART_OFS_APPEND,
+ /*
+ * The size of the partition is adjusted at
+ * runtime depending on the real flash size
+ */
+ },
+ {
+ .name = "env",
+ .offset = MTDPART_OFS_APPEND,
+ .size = RW_PART3_SZ,
+ },
+ {
+ .name = "u-boot",
+ .offset = MTDPART_OFS_APPEND,
+ .size = RW_PART4_SZ,
+ }
+};
+
+static struct map_info p3p440_flash_map = {
+ .name = "p3p440-flash",
+ .bankwidth = 2,
+};
+
+static struct mtd_info *p3p440_mtd;
+
+static int __init init_p3p440_flash(void)
+{
+ unsigned long long flash_base;
+ unsigned long flash_size;
+ int res;
+
+ flash_base = __res.bi_flashstart | 0x0000000100000000LL;
+ flash_size = __res.bi_flashsize;
+
+ p3p440_flash_map.size = flash_size;
+ p3p440_flash_map.phys = flash_base;
+ p3p440_flash_map.virt = ioremap64(flash_base, p3p440_flash_map.size);
+
+ if (!p3p440_flash_map.virt) {
+ printk(KERN_NOTICE "init_p3p440_flash: failed to ioremap\n");
+ return -EIO;
+ }
+
+ /*
+ * Adjust partitions to flash size
+ */
+ p3p440_flash_partitions[2].size = p3p440_flash_map.size -
+ RW_PART0_SZ - RW_PART1_SZ - RW_PART3_SZ - RW_PART4_SZ;
+
+ simple_map_init(&p3p440_flash_map);
+
+ p3p440_mtd = do_map_probe("cfi_probe", &p3p440_flash_map);
+
+ if (p3p440_mtd) {
+ p3p440_mtd->owner = THIS_MODULE;
+ if(!add_mtd_partitions(p3p440_mtd, p3p440_flash_partitions,
+ ARRAY_SIZE(p3p440_flash_partitions)))
+ return 0;
+
+ map_destroy(p3p440_mtd);
+ }
+
+ iounmap(p3p440_flash_map.virt);
+ return -ENXIO;
+}
+
+static void __exit cleanup_p3p440_flash(void)
+{
+ if (p3p440_mtd) {
+ del_mtd_partitions(p3p440_mtd);
+ map_destroy(p3p440_mtd);
+ iounmap(p3p440_flash_map.virt);
+ }
+}
+
+module_init(init_p3p440_flash);
+module_exit(cleanup_p3p440_flash);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Stefan Roese <sr@denx.de>");
+MODULE_DESCRIPTION("MTD map and partitions for Prodrive P3P440 board");
diff --git a/linux-2.4.x/drivers/mtd/maps/pci.c b/linux-2.4.x/drivers/mtd/maps/pci.c
index 89107ad..2705620 100644
--- a/linux-2.4.x/drivers/mtd/maps/pci.c
+++ b/linux-2.4.x/drivers/mtd/maps/pci.c
@@ -7,8 +7,8 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
- * $Id: pci.c,v 1.1 2001/09/27 20:28:45 rmk Exp $
- *
+ * $Id: pci.c,v 1.15 2006/03/29 08:41:41 dwmw2 Exp $
+ *
* Generic PCI memory map driver. We support the following boards:
* - Intel IQ80310 ATU.
* - Intel EBSA285 (blank rom programming mode). Tested working 27/09/2001
@@ -17,6 +17,7 @@
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/init.h>
+#include <linux/slab.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/map.h>
@@ -33,11 +34,79 @@ struct mtd_pci_info {
struct map_pci_info {
struct map_info map;
- void *base;
+ void __iomem *base;
void (*exit)(struct pci_dev *dev, struct map_pci_info *map);
unsigned long (*translate)(struct map_pci_info *map, unsigned long ofs);
struct pci_dev *dev;
-};
+};
+
+static map_word mtd_pci_read8(struct map_info *_map, unsigned long ofs)
+{
+ struct map_pci_info *map = (struct map_pci_info *)_map;
+ map_word val;
+ val.x[0]= readb(map->base + map->translate(map, ofs));
+// printk("read8 : %08lx => %02x\n", ofs, val.x[0]);
+ return val;
+}
+
+#if 0
+static map_word mtd_pci_read16(struct map_info *_map, unsigned long ofs)
+{
+ struct map_pci_info *map = (struct map_pci_info *)_map;
+ map_word val;
+ val.x[0] = readw(map->base + map->translate(map, ofs));
+// printk("read16: %08lx => %04x\n", ofs, val.x[0]);
+ return val;
+}
+#endif
+static map_word mtd_pci_read32(struct map_info *_map, unsigned long ofs)
+{
+ struct map_pci_info *map = (struct map_pci_info *)_map;
+ map_word val;
+ val.x[0] = readl(map->base + map->translate(map, ofs));
+// printk("read32: %08lx => %08x\n", ofs, val.x[0]);
+ return val;
+}
+
+static void mtd_pci_copyfrom(struct map_info *_map, void *to, unsigned long from, ssize_t len)
+{
+ struct map_pci_info *map = (struct map_pci_info *)_map;
+ memcpy_fromio(to, map->base + map->translate(map, from), len);
+}
+
+static void mtd_pci_write8(struct map_info *_map, map_word val, unsigned long ofs)
+{
+ struct map_pci_info *map = (struct map_pci_info *)_map;
+// printk("write8 : %08lx <= %02x\n", ofs, val.x[0]);
+ writeb(val.x[0], map->base + map->translate(map, ofs));
+}
+
+#if 0
+static void mtd_pci_write16(struct map_info *_map, map_word val, unsigned long ofs)
+{
+ struct map_pci_info *map = (struct map_pci_info *)_map;
+// printk("write16: %08lx <= %04x\n", ofs, val.x[0]);
+ writew(val.x[0], map->base + map->translate(map, ofs));
+}
+#endif
+static void mtd_pci_write32(struct map_info *_map, map_word val, unsigned long ofs)
+{
+ struct map_pci_info *map = (struct map_pci_info *)_map;
+// printk("write32: %08lx <= %08x\n", ofs, val.x[0]);
+ writel(val.x[0], map->base + map->translate(map, ofs));
+}
+
+static void mtd_pci_copyto(struct map_info *_map, unsigned long to, const void *from, ssize_t len)
+{
+ struct map_pci_info *map = (struct map_pci_info *)_map;
+ memcpy_toio(map->base + map->translate(map, to), from, len);
+}
+
+static const struct map_info mtd_pci_map = {
+ .phys = NO_XIP,
+ .copy_from = mtd_pci_copyfrom,
+ .copy_to = mtd_pci_copyto,
+};
/*
* Intel IOP80310 Flash driver
@@ -48,7 +117,10 @@ intel_iq80310_init(struct pci_dev *dev, struct map_pci_info *map)
{
u32 win_base;
- map->map.buswidth = 1;
+ map->map.bankwidth = 1;
+ map->map.read = mtd_pci_read8,
+ map->map.write = mtd_pci_write8,
+
map->map.size = 0x00800000;
map->base = ioremap_nocache(pci_resource_start(dev, 0),
pci_resource_len(dev, 0));
@@ -72,7 +144,7 @@ static void
intel_iq80310_exit(struct pci_dev *dev, struct map_pci_info *map)
{
if (map->base)
- iounmap((void *)map->base);
+ iounmap(map->base);
pci_write_config_dword(dev, 0x44, map->map.map_priv_2);
}
@@ -98,10 +170,10 @@ intel_iq80310_translate(struct map_pci_info *map, unsigned long ofs)
}
static struct mtd_pci_info intel_iq80310_info = {
- init: intel_iq80310_init,
- exit: intel_iq80310_exit,
- translate: intel_iq80310_translate,
- map_name: "cfi_probe",
+ .init = intel_iq80310_init,
+ .exit = intel_iq80310_exit,
+ .translate = intel_iq80310_translate,
+ .map_name = "cfi_probe",
};
/*
@@ -134,20 +206,22 @@ intel_dc21285_init(struct pci_dev *dev, struct map_pci_info *map)
* or simply enabling it?
*/
if (!(pci_resource_flags(dev, PCI_ROM_RESOURCE) &
- PCI_ROM_ADDRESS_ENABLE)) {
+ IORESOURCE_ROM_ENABLE)) {
u32 val;
- pci_resource_flags(dev, PCI_ROM_RESOURCE) |= PCI_ROM_ADDRESS_ENABLE;
+ pci_resource_flags(dev, PCI_ROM_RESOURCE) |= IORESOURCE_ROM_ENABLE;
pci_read_config_dword(dev, PCI_ROM_ADDRESS, &val);
val |= PCI_ROM_ADDRESS_ENABLE;
pci_write_config_dword(dev, PCI_ROM_ADDRESS, val);
- printk("%s: enabling expansion ROM\n", dev->slot_name);
+ printk("%s: enabling expansion ROM\n", pci_name(dev));
}
}
if (!len || !base)
return -ENXIO;
- map->map.buswidth = 4;
+ map->map.bankwidth = 4;
+ map->map.read = mtd_pci_read32,
+ map->map.write = mtd_pci_write32,
map->map.size = len;
map->base = ioremap_nocache(base, len);
@@ -163,12 +237,12 @@ intel_dc21285_exit(struct pci_dev *dev, struct map_pci_info *map)
u32 val;
if (map->base)
- iounmap((void *)map->base);
+ iounmap(map->base);
/*
* We need to undo the PCI BAR2/PCI ROM BAR address alteration.
*/
- pci_resource_flags(dev, PCI_ROM_RESOURCE) &= ~PCI_ROM_ADDRESS_ENABLE;
+ pci_resource_flags(dev, PCI_ROM_RESOURCE) &= ~IORESOURCE_ROM_ENABLE;
pci_read_config_dword(dev, PCI_ROM_ADDRESS, &val);
val &= ~PCI_ROM_ADDRESS_ENABLE;
pci_write_config_dword(dev, PCI_ROM_ADDRESS, val);
@@ -181,34 +255,32 @@ intel_dc21285_translate(struct map_pci_info *map, unsigned long ofs)
}
static struct mtd_pci_info intel_dc21285_info = {
- init: intel_dc21285_init,
- exit: intel_dc21285_exit,
- translate: intel_dc21285_translate,
- map_name: "jedec_probe",
+ .init = intel_dc21285_init,
+ .exit = intel_dc21285_exit,
+ .translate = intel_dc21285_translate,
+ .map_name = "jedec_probe",
};
/*
* PCI device ID table
*/
-static struct pci_device_id mtd_pci_ids[] __devinitdata = {
+static struct pci_device_id mtd_pci_ids[] = {
{
- vendor: PCI_VENDOR_ID_INTEL,
- device: 0x530d,
- subvendor: PCI_ANY_ID,
- subdevice: PCI_ANY_ID,
- class: PCI_CLASS_MEMORY_OTHER << 8,
- class_mask: 0xffff00,
- driver_data: (unsigned long)&intel_iq80310_info,
+ .vendor = PCI_VENDOR_ID_INTEL,
+ .device = 0x530d,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .class = PCI_CLASS_MEMORY_OTHER << 8,
+ .class_mask = 0xffff00,
+ .driver_data = (unsigned long)&intel_iq80310_info,
},
{
- vendor: PCI_VENDOR_ID_DEC,
- device: PCI_DEVICE_ID_DEC_21285,
- subvendor: 0, /* DC21285 defaults to 0 on reset */
- subdevice: 0, /* DC21285 defaults to 0 on reset */
- class: 0,
- class_mask: 0,
- driver_data: (unsigned long)&intel_dc21285_info,
+ .vendor = PCI_VENDOR_ID_DEC,
+ .device = PCI_DEVICE_ID_DEC_21285,
+ .subvendor = 0, /* DC21285 defaults to 0 on reset */
+ .subdevice = 0, /* DC21285 defaults to 0 on reset */
+ .driver_data = (unsigned long)&intel_dc21285_info,
},
{ 0, }
};
@@ -217,74 +289,6 @@ static struct pci_device_id mtd_pci_ids[] __devinitdata = {
* Generic code follows.
*/
-static u8 mtd_pci_read8(struct map_info *_map, unsigned long ofs)
-{
- struct map_pci_info *map = (struct map_pci_info *)_map;
- u8 val = readb(map->base + map->translate(map, ofs));
-// printk("read8 : %08lx => %02x\n", ofs, val);
- return val;
-}
-
-static u16 mtd_pci_read16(struct map_info *_map, unsigned long ofs)
-{
- struct map_pci_info *map = (struct map_pci_info *)_map;
- u16 val = readw(map->base + map->translate(map, ofs));
-// printk("read16: %08lx => %04x\n", ofs, val);
- return val;
-}
-
-static u32 mtd_pci_read32(struct map_info *_map, unsigned long ofs)
-{
- struct map_pci_info *map = (struct map_pci_info *)_map;
- u32 val = readl(map->base + map->translate(map, ofs));
-// printk("read32: %08lx => %08x\n", ofs, val);
- return val;
-}
-
-static void mtd_pci_copyfrom(struct map_info *_map, void *to, unsigned long from, ssize_t len)
-{
- struct map_pci_info *map = (struct map_pci_info *)_map;
- memcpy_fromio(to, map->base + map->translate(map, from), len);
-}
-
-static void mtd_pci_write8(struct map_info *_map, u8 val, unsigned long ofs)
-{
- struct map_pci_info *map = (struct map_pci_info *)_map;
-// printk("write8 : %08lx <= %02x\n", ofs, val);
- writeb(val, map->base + map->translate(map, ofs));
-}
-
-static void mtd_pci_write16(struct map_info *_map, u16 val, unsigned long ofs)
-{
- struct map_pci_info *map = (struct map_pci_info *)_map;
-// printk("write16: %08lx <= %04x\n", ofs, val);
- writew(val, map->base + map->translate(map, ofs));
-}
-
-static void mtd_pci_write32(struct map_info *_map, u32 val, unsigned long ofs)
-{
- struct map_pci_info *map = (struct map_pci_info *)_map;
-// printk("write32: %08lx <= %08x\n", ofs, val);
- writel(val, map->base + map->translate(map, ofs));
-}
-
-static void mtd_pci_copyto(struct map_info *_map, unsigned long to, const void *from, ssize_t len)
-{
- struct map_pci_info *map = (struct map_pci_info *)_map;
- memcpy_toio(map->base + map->translate(map, to), from, len);
-}
-
-static struct map_info mtd_pci_map = {
- read8: mtd_pci_read8,
- read16: mtd_pci_read16,
- read32: mtd_pci_read32,
- copy_from: mtd_pci_copyfrom,
- write8: mtd_pci_write8,
- write16: mtd_pci_write16,
- write32: mtd_pci_write32,
- copy_to: mtd_pci_copyto,
-};
-
static int __devinit
mtd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
@@ -307,7 +311,7 @@ mtd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
goto release;
map->map = mtd_pci_map;
- map->map.name = dev->slot_name;
+ map->map.name = pci_name(dev);
map->dev = dev;
map->exit = info->exit;
map->translate = info->translate;
@@ -322,7 +326,7 @@ mtd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
if (!mtd)
goto release;
- mtd->module = THIS_MODULE;
+ mtd->owner = THIS_MODULE;
add_mtd_device(mtd);
pci_set_drvdata(dev, mtd);
@@ -330,9 +334,6 @@ mtd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
return 0;
release:
- if (mtd)
- map_destroy(mtd);
-
if (map) {
map->exit(dev, map);
kfree(map);
@@ -359,15 +360,15 @@ mtd_pci_remove(struct pci_dev *dev)
}
static struct pci_driver mtd_pci_driver = {
- name: "MTD PCI",
- probe: mtd_pci_probe,
- remove: __devexit_p(mtd_pci_remove),
- id_table: mtd_pci_ids,
+ .name = "MTD PCI",
+ .probe = mtd_pci_probe,
+ .remove = __devexit_p(mtd_pci_remove),
+ .id_table = mtd_pci_ids,
};
static int __init mtd_pci_maps_init(void)
{
- return pci_module_init(&mtd_pci_driver);
+ return pci_register_driver(&mtd_pci_driver);
}
static void __exit mtd_pci_maps_exit(void)
diff --git a/linux-2.4.x/drivers/mtd/maps/pcmciamtd.c b/linux-2.4.x/drivers/mtd/maps/pcmciamtd.c
new file mode 100644
index 0000000..cce14f3
--- /dev/null
+++ b/linux-2.4.x/drivers/mtd/maps/pcmciamtd.c
@@ -0,0 +1,883 @@
+/*
+ * $Id: pcmciamtd.c,v 1.59 2006/03/29 08:31:11 dwmw2 Exp $
+ *
+ * pcmciamtd.c - MTD driver for PCMCIA flash memory cards
+ *
+ * Author: Simon Evans <spse@secret.org.uk>
+ *
+ * Copyright (C) 2002 Simon Evans
+ *
+ * Licence: GPL
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/timer.h>
+#include <linux/init.h>
+#include <asm/io.h>
+#include <asm/system.h>
+
+#include <pcmcia/cs_types.h>
+#include <pcmcia/cs.h>
+#include <pcmcia/cistpl.h>
+#include <pcmcia/ds.h>
+
+#include <linux/mtd/map.h>
+#include <linux/mtd/mtd.h>
+
+#ifdef CONFIG_MTD_DEBUG
+static int debug = CONFIG_MTD_DEBUG_VERBOSE;
+MODULE_PARM(debug, "i");
+MODULE_PARM_DESC(debug, "Set Debug Level 0=quiet, 5=noisy");
+#undef DEBUG
+#define DEBUG(n, format, arg...) \
+ if (n <= debug) { \
+ printk(KERN_DEBUG __FILE__ ":%s(): " format "\n", __FUNCTION__ , ## arg); \
+ }
+
+#else
+#undef DEBUG
+#define DEBUG(n, arg...)
+static const int debug = 0;
+#endif
+
+#define err(format, arg...) printk(KERN_ERR "pcmciamtd: " format "\n" , ## arg)
+#define info(format, arg...) printk(KERN_INFO "pcmciamtd: " format "\n" , ## arg)
+#define warn(format, arg...) printk(KERN_WARNING "pcmciamtd: " format "\n" , ## arg)
+
+
+#define DRIVER_DESC "PCMCIA Flash memory card driver"
+#define DRIVER_VERSION "$Revision: 1.59 $"
+
+/* Size of the PCMCIA address space: 26 bits = 64 MB */
+#define MAX_PCMCIA_ADDR 0x4000000
+
+struct pcmciamtd_dev {
+ dev_link_t link; /* PCMCIA link */
+ dev_node_t node; /* device node */
+ caddr_t win_base; /* ioremapped address of PCMCIA window */
+ unsigned int win_size; /* size of window */
+ unsigned int offset; /* offset into card the window currently points at */
+ struct map_info pcmcia_map;
+ struct mtd_info *mtd_info;
+ int vpp;
+ char mtd_name[sizeof(struct cistpl_vers_1_t)];
+};
+
+
+static dev_info_t dev_info = "pcmciamtd";
+static dev_link_t *dev_list;
+
+/* Module parameters */
+
+/* 2 = do 16-bit transfers, 1 = do 8-bit transfers */
+static int bankwidth = 2;
+
+/* Speed of memory accesses, in ns */
+static int mem_speed;
+
+/* Force the size of an SRAM card */
+static int force_size;
+
+/* Force Vpp */
+static int vpp;
+
+/* Set Vpp */
+static int setvpp;
+
+/* Force card to be treated as FLASH, ROM or RAM */
+static int mem_type;
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Simon Evans <spse@secret.org.uk>");
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_PARM(bankwidth, "i");
+MODULE_PARM_DESC(bankwidth, "Set bankwidth (1=8 bit, 2=16 bit, default=2)");
+MODULE_PARM(mem_speed, "i");
+MODULE_PARM_DESC(mem_speed, "Set memory access speed in ns");
+MODULE_PARM(force_size, "i");
+MODULE_PARM_DESC(force_size, "Force size of card in MiB (1-64)");
+MODULE_PARM(setvpp, "i");
+MODULE_PARM_DESC(setvpp, "Set Vpp (0=Never, 1=On writes, 2=Always on, default=0)");
+MODULE_PARM(vpp, "i");
+MODULE_PARM_DESC(vpp, "Vpp value in 1/10ths eg 33=3.3V 120=12V (Dangerous)");
+MODULE_PARM(mem_type, "i");
+MODULE_PARM_DESC(mem_type, "Set Memory type (0=Flash, 1=RAM, 2=ROM, default=0)");
+
+
+/* read/write{8,16} copy_{from,to} routines with window remapping to access whole card */
+static caddr_t remap_window(struct map_info *map, unsigned long to)
+{
+ struct pcmciamtd_dev *dev = (struct pcmciamtd_dev *)map->map_priv_1;
+ window_handle_t win = (window_handle_t)map->map_priv_2;
+ memreq_t mrq;
+ int ret;
+
+ if(!(dev->link.state & DEV_PRESENT)) {
+ DEBUG(1, "device removed state = 0x%4.4X", dev->link.state);
+ return 0;
+ }
+
+ mrq.CardOffset = to & ~(dev->win_size-1);
+ if(mrq.CardOffset != dev->offset) {
+ DEBUG(2, "Remapping window from 0x%8.8x to 0x%8.8x",
+ dev->offset, mrq.CardOffset);
+ mrq.Page = 0;
+ if( (ret = pcmcia_map_mem_page(win, &mrq)) != CS_SUCCESS) {
+ cs_error(dev->link.handle, MapMemPage, ret);
+ return NULL;
+ }
+ dev->offset = mrq.CardOffset;
+ }
+ return dev->win_base + (to & (dev->win_size-1));
+}
+
+
+static map_word pcmcia_read8_remap(struct map_info *map, unsigned long ofs)
+{
+ caddr_t addr;
+ map_word d = {{0}};
+
+ addr = remap_window(map, ofs);
+ if(!addr)
+ return d;
+
+ d.x[0] = readb(addr);
+ DEBUG(3, "ofs = 0x%08lx (%p) data = 0x%02x", ofs, addr, d.x[0]);
+ return d;
+}
+
+
+static map_word pcmcia_read16_remap(struct map_info *map, unsigned long ofs)
+{
+ caddr_t addr;
+ map_word d = {{0}};
+
+ addr = remap_window(map, ofs);
+ if(!addr)
+ return d;
+
+ d.x[0] = readw(addr);
+ DEBUG(3, "ofs = 0x%08lx (%p) data = 0x%04x", ofs, addr, d.x[0]);
+ return d;
+}
+
+
+static void pcmcia_copy_from_remap(struct map_info *map, void *to, unsigned long from, ssize_t len)
+{
+ struct pcmciamtd_dev *dev = (struct pcmciamtd_dev *)map->map_priv_1;
+ unsigned long win_size = dev->win_size;
+
+ DEBUG(3, "to = %p from = %lu len = %u", to, from, len);
+ while(len) {
+ int toread = win_size - (from & (win_size-1));
+ caddr_t addr;
+
+ if(toread > len)
+ toread = len;
+
+ addr = remap_window(map, from);
+ if(!addr)
+ return;
+
+ DEBUG(4, "memcpy from %p to %p len = %d", addr, to, toread);
+ memcpy_fromio(to, addr, toread);
+ len -= toread;
+ to += toread;
+ from += toread;
+ }
+}
+
+
+static void pcmcia_write8_remap(struct map_info *map, map_word d, unsigned long adr)
+{
+ caddr_t addr = remap_window(map, adr);
+
+ if(!addr)
+ return;
+
+ DEBUG(3, "adr = 0x%08lx (%p) data = 0x%02x", adr, addr, d.x[0]);
+ writeb(d.x[0], addr);
+}
+
+
+static void pcmcia_write16_remap(struct map_info *map, map_word d, unsigned long adr)
+{
+ caddr_t addr = remap_window(map, adr);
+ if(!addr)
+ return;
+
+ DEBUG(3, "adr = 0x%08lx (%p) data = 0x%04x", adr, addr, d.x[0]);
+ writew(d.x[0], addr);
+}
+
+
+static void pcmcia_copy_to_remap(struct map_info *map, unsigned long to, const void *from, ssize_t len)
+{
+ struct pcmciamtd_dev *dev = (struct pcmciamtd_dev *)map->map_priv_1;
+ unsigned long win_size = dev->win_size;
+
+ DEBUG(3, "to = %lu from = %p len = %u", to, from, len);
+ while(len) {
+ int towrite = win_size - (to & (win_size-1));
+ caddr_t addr;
+
+ if(towrite > len)
+ towrite = len;
+
+ addr = remap_window(map, to);
+ if(!addr)
+ return;
+
+ DEBUG(4, "memcpy from %p to %p len = %d", from, addr, towrite);
+ memcpy_toio(addr, from, towrite);
+ len -= towrite;
+ to += towrite;
+ from += towrite;
+ }
+}
+
+
+/* read/write{8,16} copy_{from,to} routines with direct access */
+
+#define DEV_REMOVED(x) (!(*(u_int *)x->map_priv_1 & DEV_PRESENT))
+
+static map_word pcmcia_read8(struct map_info *map, unsigned long ofs)
+{
+ caddr_t win_base = (caddr_t)map->map_priv_2;
+ map_word d = {{0}};
+
+ if(DEV_REMOVED(map))
+ return d;
+
+ d.x[0] = readb(win_base + ofs);
+ DEBUG(3, "ofs = 0x%08lx (%p) data = 0x%02x", ofs, win_base + ofs, d.x[0]);
+ return d;
+}
+
+
+static map_word pcmcia_read16(struct map_info *map, unsigned long ofs)
+{
+ caddr_t win_base = (caddr_t)map->map_priv_2;
+ map_word d = {{0}};
+
+ if(DEV_REMOVED(map))
+ return d;
+
+ d.x[0] = readw(win_base + ofs);
+ DEBUG(3, "ofs = 0x%08lx (%p) data = 0x%04x", ofs, win_base + ofs, d.x[0]);
+ return d;
+}
+
+
+static void pcmcia_copy_from(struct map_info *map, void *to, unsigned long from, ssize_t len)
+{
+ caddr_t win_base = (caddr_t)map->map_priv_2;
+
+ if(DEV_REMOVED(map))
+ return;
+
+ DEBUG(3, "to = %p from = %lu len = %u", to, from, len);
+ memcpy_fromio(to, win_base + from, len);
+}
+
+
+static void pcmcia_write8(struct map_info *map, u8 d, unsigned long adr)
+{
+ caddr_t win_base = (caddr_t)map->map_priv_2;
+
+ if(DEV_REMOVED(map))
+ return;
+
+ DEBUG(3, "adr = 0x%08lx (%p) data = 0x%02x", adr, win_base + adr, d);
+ writeb(d, win_base + adr);
+}
+
+
+static void pcmcia_write16(struct map_info *map, u16 d, unsigned long adr)
+{
+ caddr_t win_base = (caddr_t)map->map_priv_2;
+
+ if(DEV_REMOVED(map))
+ return;
+
+ DEBUG(3, "adr = 0x%08lx (%p) data = 0x%04x", adr, win_base + adr, d);
+ writew(d, win_base + adr);
+}
+
+
+static void pcmcia_copy_to(struct map_info *map, unsigned long to, const void *from, ssize_t len)
+{
+ caddr_t win_base = (caddr_t)map->map_priv_2;
+
+ if(DEV_REMOVED(map))
+ return;
+
+ DEBUG(3, "to = %lu from = %p len = %u", to, from, len);
+ memcpy_toio(win_base + to, from, len);
+}
+
+
+static void pcmciamtd_set_vpp(struct map_info *map, int on)
+{
+ struct pcmciamtd_dev *dev = (struct pcmciamtd_dev *)map->map_priv_1;
+ dev_link_t *link = &dev->link;
+ modconf_t mod;
+ int ret;
+
+ mod.Attributes = CONF_VPP1_CHANGE_VALID | CONF_VPP2_CHANGE_VALID;
+ mod.Vcc = 0;
+ mod.Vpp1 = mod.Vpp2 = on ? dev->vpp : 0;
+
+ DEBUG(2, "dev = %p on = %d vpp = %d\n", dev, on, dev->vpp);
+ ret = pcmcia_modify_configuration(link->handle, &mod);
+ if(ret != CS_SUCCESS) {
+ cs_error(link->handle, ModifyConfiguration, ret);
+ }
+}
+
+
+/* After a card is removed, pcmciamtd_release() will unregister the
+ * device, and release the PCMCIA configuration. If the device is
+ * still open, this will be postponed until it is closed.
+ */
+
+static void pcmciamtd_release(dev_link_t *link)
+{
+ struct pcmciamtd_dev *dev = link->priv;
+
+ DEBUG(3, "link = 0x%p", link);
+
+ if (link->win) {
+ if(dev->win_base) {
+ iounmap(dev->win_base);
+ dev->win_base = NULL;
+ }
+ pcmcia_release_window(link->win);
+ }
+ pcmcia_release_configuration(link->handle);
+ link->state &= ~DEV_CONFIG;
+}
+
+
+static void card_settings(struct pcmciamtd_dev *dev, dev_link_t *link, int *new_name)
+{
+ int rc;
+ tuple_t tuple;
+ cisparse_t parse;
+ u_char buf[64];
+
+ tuple.Attributes = 0;
+ tuple.TupleData = (cisdata_t *)buf;
+ tuple.TupleDataMax = sizeof(buf);
+ tuple.TupleOffset = 0;
+ tuple.DesiredTuple = RETURN_FIRST_TUPLE;
+
+ rc = pcmcia_get_first_tuple(link->handle, &tuple);
+ while(rc == CS_SUCCESS) {
+ rc = pcmcia_get_tuple_data(link->handle, &tuple);
+ if(rc != CS_SUCCESS) {
+ cs_error(link->handle, GetTupleData, rc);
+ break;
+ }
+ rc = pcmcia_parse_tuple(link->handle, &tuple, &parse);
+ if(rc != CS_SUCCESS) {
+ cs_error(link->handle, ParseTuple, rc);
+ break;
+ }
+
+ switch(tuple.TupleCode) {
+ case CISTPL_FORMAT: {
+ cistpl_format_t *t = &parse.format;
+ (void)t; /* Shut up, gcc */
+ DEBUG(2, "Format type: %u, Error Detection: %u, offset = %u, length =%u",
+ t->type, t->edc, t->offset, t->length);
+ break;
+
+ }
+
+ case CISTPL_DEVICE: {
+ cistpl_device_t *t = &parse.device;
+ int i;
+ DEBUG(2, "Common memory:");
+ dev->pcmcia_map.size = t->dev[0].size;
+ for(i = 0; i < t->ndev; i++) {
+ DEBUG(2, "Region %d, type = %u", i, t->dev[i].type);
+ DEBUG(2, "Region %d, wp = %u", i, t->dev[i].wp);
+ DEBUG(2, "Region %d, speed = %u ns", i, t->dev[i].speed);
+ DEBUG(2, "Region %d, size = %u bytes", i, t->dev[i].size);
+ }
+ break;
+ }
+
+ case CISTPL_VERS_1: {
+ cistpl_vers_1_t *t = &parse.version_1;
+ int i;
+ if(t->ns) {
+ dev->mtd_name[0] = '\0';
+ for(i = 0; i < t->ns; i++) {
+ if(i)
+ strcat(dev->mtd_name, " ");
+ strcat(dev->mtd_name, t->str+t->ofs[i]);
+ }
+ }
+ DEBUG(2, "Found name: %s", dev->mtd_name);
+ break;
+ }
+
+ case CISTPL_JEDEC_C: {
+ cistpl_jedec_t *t = &parse.jedec;
+ int i;
+ for(i = 0; i < t->nid; i++) {
+ DEBUG(2, "JEDEC: 0x%02x 0x%02x", t->id[i].mfr, t->id[i].info);
+ }
+ break;
+ }
+
+ case CISTPL_DEVICE_GEO: {
+ cistpl_device_geo_t *t = &parse.device_geo;
+ int i;
+ dev->pcmcia_map.bankwidth = t->geo[0].buswidth;
+ for(i = 0; i < t->ngeo; i++) {
+ DEBUG(2, "region: %d bankwidth = %u", i, t->geo[i].buswidth);
+ DEBUG(2, "region: %d erase_block = %u", i, t->geo[i].erase_block);
+ DEBUG(2, "region: %d read_block = %u", i, t->geo[i].read_block);
+ DEBUG(2, "region: %d write_block = %u", i, t->geo[i].write_block);
+ DEBUG(2, "region: %d partition = %u", i, t->geo[i].partition);
+ DEBUG(2, "region: %d interleave = %u", i, t->geo[i].interleave);
+ }
+ break;
+ }
+
+ default:
+ DEBUG(2, "Unknown tuple code %d", tuple.TupleCode);
+ }
+
+ rc = pcmcia_get_next_tuple(link->handle, &tuple);
+ }
+ if(!dev->pcmcia_map.size)
+ dev->pcmcia_map.size = MAX_PCMCIA_ADDR;
+
+ if(!dev->pcmcia_map.bankwidth)
+ dev->pcmcia_map.bankwidth = 2;
+
+ if(force_size) {
+ dev->pcmcia_map.size = force_size << 20;
+ DEBUG(2, "size forced to %dM", force_size);
+ }
+
+ if(bankwidth) {
+ dev->pcmcia_map.bankwidth = bankwidth;
+ DEBUG(2, "bankwidth forced to %d", bankwidth);
+ }
+
+ dev->pcmcia_map.name = dev->mtd_name;
+ if(!dev->mtd_name[0]) {
+ strcpy(dev->mtd_name, "PCMCIA Memory card");
+ *new_name = 1;
+ }
+
+ DEBUG(1, "Device: Size: %lu Width:%d Name: %s",
+ dev->pcmcia_map.size, dev->pcmcia_map.bankwidth << 3, dev->mtd_name);
+}
+
+
+/* pcmciamtd_config() is scheduled to run after a CARD_INSERTION event
+ * is received, to configure the PCMCIA socket, and to make the
+ * MTD device available to the system.
+ */
+
+#define CS_CHECK(fn, ret) \
+do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0)
+
+static void pcmciamtd_config(dev_link_t *link)
+{
+ struct pcmciamtd_dev *dev = link->priv;
+ struct mtd_info *mtd = NULL;
+ cs_status_t status;
+ win_req_t req;
+ int last_ret = 0, last_fn = 0;
+ int ret;
+ int i;
+ config_info_t t;
+ static char *probes[] = { "jedec_probe", "cfi_probe" };
+ cisinfo_t cisinfo;
+ int new_name = 0;
+
+ DEBUG(3, "link=0x%p", link);
+
+ /* Configure card */
+ link->state |= DEV_CONFIG;
+
+ DEBUG(2, "Validating CIS");
+ ret = pcmcia_validate_cis(link->handle, &cisinfo);
+ if(ret != CS_SUCCESS) {
+ cs_error(link->handle, GetTupleData, ret);
+ } else {
+ DEBUG(2, "ValidateCIS found %d chains", cisinfo.Chains);
+ }
+
+ card_settings(dev, link, &new_name);
+
+ dev->pcmcia_map.phys = NO_XIP;
+ dev->pcmcia_map.copy_from = pcmcia_copy_from_remap;
+ dev->pcmcia_map.copy_to = pcmcia_copy_to_remap;
+ if (dev->pcmcia_map.bankwidth == 1) {
+ dev->pcmcia_map.read = pcmcia_read8_remap;
+ dev->pcmcia_map.write = pcmcia_write8_remap;
+ } else {
+ dev->pcmcia_map.read = pcmcia_read16_remap;
+ dev->pcmcia_map.write = pcmcia_write16_remap;
+ }
+ if(setvpp == 1)
+ dev->pcmcia_map.set_vpp = pcmciamtd_set_vpp;
+
+ /* Request a memory window for PCMCIA. Some architeures can map windows upto the maximum
+ that PCMCIA can support (64MiB) - this is ideal and we aim for a window the size of the
+ whole card - otherwise we try smaller windows until we succeed */
+
+ req.Attributes = WIN_MEMORY_TYPE_CM | WIN_ENABLE;
+ req.Attributes |= (dev->pcmcia_map.bankwidth == 1) ? WIN_DATA_WIDTH_8 : WIN_DATA_WIDTH_16;
+ req.Base = 0;
+ req.AccessSpeed = mem_speed;
+ link->win = (window_handle_t)link->handle;
+ req.Size = (force_size) ? force_size << 20 : MAX_PCMCIA_ADDR;
+ dev->win_size = 0;
+
+ do {
+ int ret;
+ DEBUG(2, "requesting window with size = %dKiB memspeed = %d",
+ req.Size >> 10, req.AccessSpeed);
+ ret = pcmcia_request_window(&link->handle, &req, &link->win);
+ DEBUG(2, "ret = %d dev->win_size = %d", ret, dev->win_size);
+ if(ret) {
+ req.Size >>= 1;
+ } else {
+ DEBUG(2, "Got window of size %dKiB", req.Size >> 10);
+ dev->win_size = req.Size;
+ break;
+ }
+ } while(req.Size >= 0x1000);
+
+ DEBUG(2, "dev->win_size = %d", dev->win_size);
+
+ if(!dev->win_size) {
+ err("Cant allocate memory window");
+ pcmciamtd_release(link);
+ return;
+ }
+ DEBUG(1, "Allocated a window of %dKiB", dev->win_size >> 10);
+
+ /* Get write protect status */
+ CS_CHECK(GetStatus, pcmcia_get_status(link->handle, &status));
+ DEBUG(2, "status value: 0x%x window handle = 0x%8.8lx",
+ status.CardState, (unsigned long)link->win);
+ dev->win_base = ioremap(req.Base, req.Size);
+ if(!dev->win_base) {
+ err("ioremap(%lu, %u) failed", req.Base, req.Size);
+ pcmciamtd_release(link);
+ return;
+ }
+ DEBUG(1, "mapped window dev = %p req.base = 0x%lx base = %p size = 0x%x",
+ dev, req.Base, dev->win_base, req.Size);
+
+ dev->offset = 0;
+ dev->pcmcia_map.map_priv_1 = (unsigned long)dev;
+ dev->pcmcia_map.map_priv_2 = (unsigned long)link->win;
+
+ DEBUG(2, "Getting configuration");
+ CS_CHECK(GetConfigurationInfo, pcmcia_get_configuration_info(link->handle, &t));
+ DEBUG(2, "Vcc = %d Vpp1 = %d Vpp2 = %d", t.Vcc, t.Vpp1, t.Vpp2);
+ dev->vpp = (vpp) ? vpp : t.Vpp1;
+ link->conf.Attributes = 0;
+ link->conf.Vcc = t.Vcc;
+ if(setvpp == 2) {
+ link->conf.Vpp1 = dev->vpp;
+ link->conf.Vpp2 = dev->vpp;
+ } else {
+ link->conf.Vpp1 = 0;
+ link->conf.Vpp2 = 0;
+ }
+
+ link->conf.IntType = INT_MEMORY;
+ link->conf.ConfigBase = t.ConfigBase;
+ link->conf.Status = t.Status;
+ link->conf.Pin = t.Pin;
+ link->conf.Copy = t.Copy;
+ link->conf.ExtStatus = t.ExtStatus;
+ link->conf.ConfigIndex = 0;
+ link->conf.Present = t.Present;
+ DEBUG(2, "Setting Configuration");
+ ret = pcmcia_request_configuration(link->handle, &link->conf);
+ if(ret != CS_SUCCESS) {
+ cs_error(link->handle, RequestConfiguration, ret);
+ }
+
+ if(mem_type == 1) {
+ mtd = do_map_probe("map_ram", &dev->pcmcia_map);
+ } else if(mem_type == 2) {
+ mtd = do_map_probe("map_rom", &dev->pcmcia_map);
+ } else {
+ for(i = 0; i < ARRAY_SIZE(probes); i++) {
+ DEBUG(1, "Trying %s", probes[i]);
+ mtd = do_map_probe(probes[i], &dev->pcmcia_map);
+ if(mtd)
+ break;
+
+ DEBUG(1, "FAILED: %s", probes[i]);
+ }
+ }
+
+ if(!mtd) {
+ DEBUG(1, "Cant find an MTD");
+ pcmciamtd_release(link);
+ return;
+ }
+
+ dev->mtd_info = mtd;
+ mtd->owner = THIS_MODULE;
+
+ if(new_name) {
+ int size = 0;
+ char unit = ' ';
+ /* Since we are using a default name, make it better by adding in the
+ size */
+ if(mtd->size < 1048576) { /* <1MiB in size, show size in KiB */
+ size = mtd->size >> 10;
+ unit = 'K';
+ } else {
+ size = mtd->size >> 20;
+ unit = 'M';
+ }
+ snprintf(dev->mtd_name, sizeof(dev->mtd_name), "%d%ciB %s", size, unit, "PCMCIA Memory card");
+ }
+
+ /* If the memory found is fits completely into the mapped PCMCIA window,
+ use the faster non-remapping read/write functions */
+ if(mtd->size <= dev->win_size) {
+ DEBUG(1, "Using non remapping memory functions");
+ dev->pcmcia_map.map_priv_1 = (unsigned long)&(dev->link.state);
+ dev->pcmcia_map.map_priv_2 = (unsigned long)dev->win_base;
+ if (dev->pcmcia_map.bankwidth == 1) {
+ dev->pcmcia_map.read = pcmcia_read8;
+ dev->pcmcia_map.write = pcmcia_write8;
+ } else {
+ dev->pcmcia_map.read = pcmcia_read16;
+ dev->pcmcia_map.write = pcmcia_write16;
+ }
+ dev->pcmcia_map.copy_from = pcmcia_copy_from;
+ dev->pcmcia_map.copy_to = pcmcia_copy_to;
+ }
+
+ if(add_mtd_device(mtd)) {
+ map_destroy(mtd);
+ dev->mtd_info = NULL;
+ err("Couldnt register MTD device");
+ pcmciamtd_release(link);
+ return;
+ }
+ snprintf(dev->node.dev_name, sizeof(dev->node.dev_name), "mtd%d", mtd->index);
+ info("mtd%d: %s", mtd->index, mtd->name);
+ link->state &= ~DEV_CONFIG_PENDING;
+ link->dev = &dev->node;
+ return;
+
+ cs_failed:
+ cs_error(link->handle, last_fn, last_ret);
+ err("CS Error, exiting");
+ pcmciamtd_release(link);
+ return;
+}
+
+
+/* The card status event handler. Mostly, this schedules other
+ * stuff to run after an event is received. A CARD_REMOVAL event
+ * also sets some flags to discourage the driver from trying
+ * to talk to the card any more.
+ */
+
+static int pcmciamtd_event(event_t event, int priority,
+ event_callback_args_t *args)
+{
+ dev_link_t *link = args->client_data;
+
+ DEBUG(1, "event=0x%06x", event);
+ switch (event) {
+ case CS_EVENT_CARD_REMOVAL:
+ DEBUG(2, "EVENT_CARD_REMOVAL");
+ link->state &= ~DEV_PRESENT;
+ if (link->state & DEV_CONFIG) {
+ struct pcmciamtd_dev *dev = link->priv;
+ if(dev->mtd_info) {
+ del_mtd_device(dev->mtd_info);
+ map_destroy(dev->mtd_info);
+ info("mtd%d: Removed", dev->mtd_info->index);
+ }
+ pcmciamtd_release(link);
+ }
+ break;
+ case CS_EVENT_CARD_INSERTION:
+ DEBUG(2, "EVENT_CARD_INSERTION");
+ link->state |= DEV_PRESENT | DEV_CONFIG_PENDING;
+ pcmciamtd_config(link);
+ break;
+ case CS_EVENT_PM_SUSPEND:
+ DEBUG(2, "EVENT_PM_SUSPEND");
+ link->state |= DEV_SUSPEND;
+ /* Fall through... */
+ case CS_EVENT_RESET_PHYSICAL:
+ DEBUG(2, "EVENT_RESET_PHYSICAL");
+ /* get_lock(link); */
+ break;
+ case CS_EVENT_PM_RESUME:
+ DEBUG(2, "EVENT_PM_RESUME");
+ link->state &= ~DEV_SUSPEND;
+ /* Fall through... */
+ case CS_EVENT_CARD_RESET:
+ DEBUG(2, "EVENT_CARD_RESET");
+ /* free_lock(link); */
+ break;
+ default:
+ DEBUG(2, "Unknown event %d", event);
+ }
+ return 0;
+}
+
+
+/* This deletes a driver "instance". The device is de-registered
+ * with Card Services. If it has been released, all local data
+ * structures are freed. Otherwise, the structures will be freed
+ * when the device is released.
+ */
+
+static void pcmciamtd_detach(dev_link_t *link)
+{
+ DEBUG(3, "link=0x%p", link);
+
+ if(link->state & DEV_CONFIG) {
+ pcmciamtd_release(link);
+ }
+
+ if (link->handle) {
+ int ret;
+ DEBUG(2, "Deregistering with card services");
+ ret = pcmcia_deregister_client(link->handle);
+ if (ret != CS_SUCCESS)
+ cs_error(link->handle, DeregisterClient, ret);
+ }
+
+ link->state |= DEV_STALE_LINK;
+}
+
+
+/* pcmciamtd_attach() creates an "instance" of the driver, allocating
+ * local data structures for one device. The device is registered
+ * with Card Services.
+ */
+
+static dev_link_t *pcmciamtd_attach(void)
+{
+ struct pcmciamtd_dev *dev;
+ dev_link_t *link;
+ client_reg_t client_reg;
+ int ret;
+
+ /* Create new memory card device */
+ dev = kmalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev) return NULL;
+ DEBUG(1, "dev=0x%p", dev);
+
+ memset(dev, 0, sizeof(*dev));
+ link = &dev->link;
+ link->priv = dev;
+
+ link->conf.Attributes = 0;
+ link->conf.IntType = INT_MEMORY;
+
+ link->next = dev_list;
+ dev_list = link;
+
+ /* Register with Card Services */
+ client_reg.dev_info = &dev_info;
+ client_reg.Version = 0x0210;
+ client_reg.event_callback_args.client_data = link;
+ DEBUG(2, "Calling RegisterClient");
+ ret = pcmcia_register_client(&link->handle, &client_reg);
+ if (ret != 0) {
+ cs_error(link->handle, RegisterClient, ret);
+ pcmciamtd_detach(link);
+ return NULL;
+ }
+ DEBUG(2, "link = %p", link);
+ return link;
+}
+
+static struct pcmcia_device_id pcmciamtd_ids[] = {
+ PCMCIA_DEVICE_FUNC_ID(1),
+ PCMCIA_DEVICE_PROD_ID123("IO DATA", "PCS-2M", "2MB SRAM", 0x547e66dc, 0x1fed36cd, 0x36eadd21),
+ PCMCIA_DEVICE_PROD_ID12("IBM", "2MB SRAM", 0xb569a6e5, 0x36eadd21),
+ PCMCIA_DEVICE_PROD_ID12("IBM", "4MB FLASH", 0xb569a6e5, 0x8bc54d2a),
+ PCMCIA_DEVICE_PROD_ID12("IBM", "8MB FLASH", 0xb569a6e5, 0x6df1be3e),
+ PCMCIA_DEVICE_PROD_ID12("Intel", "S2E20SW", 0x816cc815, 0xd14c9dcf),
+ PCMCIA_DEVICE_PROD_ID12("Intel", "S2E8 SW", 0x816cc815, 0xa2d7dedb),
+ PCMCIA_DEVICE_PROD_ID12("intel", "SERIES2-02 ", 0x40ade711, 0x145cea5c),
+ PCMCIA_DEVICE_PROD_ID12("intel", "SERIES2-04 ", 0x40ade711, 0x42064dda),
+ PCMCIA_DEVICE_PROD_ID12("intel", "SERIES2-20 ", 0x40ade711, 0x25ee5cb0),
+ PCMCIA_DEVICE_PROD_ID12("intel", "VALUE SERIES 100 ", 0x40ade711, 0xdf8506d8),
+ PCMCIA_DEVICE_PROD_ID12("KINGMAX TECHNOLOGY INC.", "SRAM 256K Bytes", 0x54d0c69c, 0xad12c29c),
+ PCMCIA_DEVICE_PROD_ID12("Maxtor", "MAXFL MobileMax Flash Memory Card", 0xb68968c8, 0x2dfb47b0),
+ PCMCIA_DEVICE_PROD_ID12("SEIKO EPSON", "WWB101EN20", 0xf9876baf, 0xad0b207b),
+ PCMCIA_DEVICE_PROD_ID12("SEIKO EPSON", "WWB513EN20", 0xf9876baf, 0xe8d884ad),
+ PCMCIA_DEVICE_PROD_ID12("Starfish, Inc.", "REX-3000", 0x05ddca47, 0xe7d67bca),
+ PCMCIA_DEVICE_PROD_ID12("Starfish, Inc.", "REX-4100", 0x05ddca47, 0x7bc32944),
+ /* the following was commented out in pcmcia-cs-3.2.7 */
+ /* PCMCIA_DEVICE_PROD_ID12("RATOC Systems,Inc.", "SmartMedia ADAPTER PC Card", 0xf4a2fefe, 0x5885b2ae), */
+#ifdef CONFIG_MTD_PCMCIA_ANONYMOUS
+ { .match_flags = PCMCIA_DEV_ID_MATCH_ANONYMOUS, },
+#endif
+ PCMCIA_DEVICE_NULL
+};
+MODULE_DEVICE_TABLE(pcmcia, pcmciamtd_ids);
+
+static struct pcmcia_driver pcmciamtd_driver = {
+ .drv = {
+ .name = "pcmciamtd"
+ },
+ .attach = pcmciamtd_attach,
+ .event = pcmciamtd_event,
+ .detach = pcmciamtd_detach,
+ .owner = THIS_MODULE,
+ .id_table = pcmciamtd_ids,
+};
+
+
+static int __init init_pcmciamtd(void)
+{
+ info(DRIVER_DESC " " DRIVER_VERSION);
+
+ if(bankwidth && bankwidth != 1 && bankwidth != 2) {
+ info("bad bankwidth (%d), using default", bankwidth);
+ bankwidth = 2;
+ }
+ if(force_size && (force_size < 1 || force_size > 64)) {
+ info("bad force_size (%d), using default", force_size);
+ force_size = 0;
+ }
+ if(mem_type && mem_type != 1 && mem_type != 2) {
+ info("bad mem_type (%d), using default", mem_type);
+ mem_type = 0;
+ }
+ return pcmcia_register_driver(&pcmciamtd_driver);
+}
+
+
+static void __exit exit_pcmciamtd(void)
+{
+ DEBUG(1, DRIVER_DESC " unloading");
+ pcmcia_unregister_driver(&pcmciamtd_driver);
+ BUG_ON(dev_list != NULL);
+}
+
+module_init(init_pcmciamtd);
+module_exit(exit_pcmciamtd);
diff --git a/linux-2.4.x/drivers/mtd/maps/physmap.c b/linux-2.4.x/drivers/mtd/maps/physmap.c
index 182429b..f49ebc3 100644
--- a/linux-2.4.x/drivers/mtd/maps/physmap.c
+++ b/linux-2.4.x/drivers/mtd/maps/physmap.c
@@ -1,139 +1,120 @@
/*
- * $Id: physmap.c,v 1.15 2001/10/02 15:05:14 dwmw2 Exp $
+ * $Id: physmap.c,v 1.39 2005/11/29 14:49:36 gleixner Exp $
*
* Normal mappings of chips in physical memory
+ *
+ * Copyright (C) 2003 MontaVista Software Inc.
+ * Author: Jun Sun, jsun@mvista.com or jsun@junsun.net
+ *
+ * 031022 - [jsun] add run-time configure and partition setup
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/slab.h>
#include <asm/io.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/map.h>
#include <linux/config.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/physmap.h>
-
-#define WINDOW_ADDR CONFIG_MTD_PHYSMAP_START
-#define WINDOW_SIZE CONFIG_MTD_PHYSMAP_LEN
-#define BUSWIDTH CONFIG_MTD_PHYSMAP_BUSWIDTH
-
-static struct mtd_partition *parsed_parts;
static struct mtd_info *mymtd;
-__u8 physmap_read8(struct map_info *map, unsigned long ofs)
-{
- return __raw_readb(map->map_priv_1 + ofs);
-}
-
-__u16 physmap_read16(struct map_info *map, unsigned long ofs)
-{
- return __raw_readw(map->map_priv_1 + ofs);
-}
-
-__u32 physmap_read32(struct map_info *map, unsigned long ofs)
-{
- return __raw_readl(map->map_priv_1 + ofs);
-}
-
-void physmap_copy_from(struct map_info *map, void *to, unsigned long from, ssize_t len)
-{
- memcpy_fromio(to, map->map_priv_1 + from, len);
-}
+struct map_info physmap_map = {
+ .name = "phys_mapped_flash",
+ .phys = CONFIG_MTD_PHYSMAP_START,
+ .size = CONFIG_MTD_PHYSMAP_LEN,
+ .bankwidth = CONFIG_MTD_PHYSMAP_BANKWIDTH,
+};
-void physmap_write8(struct map_info *map, __u8 d, unsigned long adr)
-{
- __raw_writeb(d, map->map_priv_1 + adr);
- mb();
-}
+#ifdef CONFIG_MTD_PARTITIONS
+static struct mtd_partition *mtd_parts;
+static int mtd_parts_nb;
-void physmap_write16(struct map_info *map, __u16 d, unsigned long adr)
-{
- __raw_writew(d, map->map_priv_1 + adr);
- mb();
-}
+static int num_physmap_partitions;
+static struct mtd_partition *physmap_partitions;
-void physmap_write32(struct map_info *map, __u32 d, unsigned long adr)
-{
- __raw_writel(d, map->map_priv_1 + adr);
- mb();
-}
+static const char *part_probes[] __initdata = {"cmdlinepart", "RedBoot", NULL};
-void physmap_copy_to(struct map_info *map, unsigned long to, const void *from, ssize_t len)
+void physmap_set_partitions(struct mtd_partition *parts, int num_parts)
{
- memcpy_toio(map->map_priv_1 + to, from, len);
+ physmap_partitions=parts;
+ num_physmap_partitions=num_parts;
}
+#endif /* CONFIG_MTD_PARTITIONS */
-struct map_info physmap_map = {
- name: "Physically mapped flash",
- size: WINDOW_SIZE,
- buswidth: BUSWIDTH,
- read8: physmap_read8,
- read16: physmap_read16,
- read32: physmap_read32,
- copy_from: physmap_copy_from,
- write8: physmap_write8,
- write16: physmap_write16,
- write32: physmap_write32,
- copy_to: physmap_copy_to
-};
-
-int __init init_physmap(void)
+static int __init init_physmap(void)
{
- struct mtd_partition *parts;
- int nb_parts = 0, ret;
- int parsed_nr_parts = 0;
- const char *part_type;
+ static const char *rom_probe_types[] = { "cfi_probe", "jedec_probe", "map_rom", NULL };
+ const char **type;
- printk(KERN_NOTICE "physmap flash device: %x at %x\n", WINDOW_SIZE, WINDOW_ADDR);
- physmap_map.map_priv_1 = (unsigned long)ioremap(WINDOW_ADDR, WINDOW_SIZE);
+ printk(KERN_NOTICE "physmap flash device: %lx at %lx\n", physmap_map.size, physmap_map.phys);
+ physmap_map.virt = ioremap(physmap_map.phys, physmap_map.size);
- if (!physmap_map.map_priv_1) {
+ if (!physmap_map.virt) {
printk("Failed to ioremap\n");
return -EIO;
}
- mymtd = do_map_probe("jedec_probe", &physmap_map);
- if (!mymtd) {
- iounmap((void *)physmap_map.map_priv_1);
- return -ENXIO;
- }
- mymtd->module = THIS_MODULE;
+ simple_map_init(&physmap_map);
+
+ mymtd = NULL;
+ type = rom_probe_types;
+ for(; !mymtd && *type; type++) {
+ mymtd = do_map_probe(*type, &physmap_map);
+ }
+ if (mymtd) {
+ mymtd->owner = THIS_MODULE;
+
+#ifdef CONFIG_MTD_PARTITIONS
+ mtd_parts_nb = parse_mtd_partitions(mymtd, part_probes,
+ &mtd_parts, 0);
+
+ if (mtd_parts_nb > 0)
+ {
+ add_mtd_partitions (mymtd, mtd_parts, mtd_parts_nb);
+ return 0;
+ }
+
+ if (num_physmap_partitions != 0)
+ {
+ printk(KERN_NOTICE
+ "Using physmap partition definition\n");
+ add_mtd_partitions (mymtd, physmap_partitions, num_physmap_partitions);
+ return 0;
+ }
-#ifdef CONFIG_MTD_CMDLINE_PARTS
- if (parsed_nr_parts == 0) {
- int ret = parse_cmdline_partitions(mymtd, &parsed_parts, "physmap");
- if (ret > 0) {
- part_type = "Command Line";
- parsed_nr_parts = ret;
- }
- }
#endif
+ add_mtd_device(mymtd);
+
+ return 0;
+ }
- if (parsed_nr_parts > 0) {
- parts = parsed_parts;
- nb_parts = parsed_nr_parts;
- }
-
- if (nb_parts == 0) {
- printk(KERN_NOTICE "physmap: no partition info available, registering whole flash at once\n");
- add_mtd_device(mymtd);
- } else {
- printk(KERN_NOTICE "physmap: Using %s partition definition\n", part_type);
- add_mtd_partitions(mymtd, parts, nb_parts);
- }
- return 0;
+ iounmap(physmap_map.virt);
+ return -ENXIO;
}
static void __exit cleanup_physmap(void)
{
- if (mymtd) {
+#ifdef CONFIG_MTD_PARTITIONS
+ if (mtd_parts_nb) {
+ del_mtd_partitions(mymtd);
+ kfree(mtd_parts);
+ } else if (num_physmap_partitions) {
+ del_mtd_partitions(mymtd);
+ } else {
del_mtd_device(mymtd);
- map_destroy(mymtd);
- }
- if (physmap_map.map_priv_1) {
- iounmap((void *)physmap_map.map_priv_1);
- physmap_map.map_priv_1 = 0;
}
+#else
+ del_mtd_device(mymtd);
+#endif
+ map_destroy(mymtd);
+
+ iounmap(physmap_map.virt);
+ physmap_map.virt = NULL;
}
module_init(init_physmap);
diff --git a/linux-2.4.x/drivers/mtd/maps/plat-ram.c b/linux-2.4.x/drivers/mtd/maps/plat-ram.c
new file mode 100644
index 0000000..5b6419d
--- /dev/null
+++ b/linux-2.4.x/drivers/mtd/maps/plat-ram.c
@@ -0,0 +1,281 @@
+/* drivers/mtd/maps/plat-ram.c
+ *
+ * (c) 2004-2005 Simtec Electronics
+ * http://www.simtec.co.uk/products/SWLINUX/
+ * Ben Dooks <ben@simtec.co.uk>
+ *
+ * Generic platfrom device based RAM map
+ *
+ * $Id: plat-ram.c,v 1.8 2005/11/29 20:01:28 gleixner Exp $
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+*/
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/ioport.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/plat-ram.h>
+
+#include <asm/io.h>
+
+/* private structure for each mtd platform ram device created */
+
+struct platram_info {
+ struct device *dev;
+ struct mtd_info *mtd;
+ struct map_info map;
+ struct mtd_partition *partitions;
+ struct resource *area;
+ struct platdata_mtd_ram *pdata;
+};
+
+/* to_platram_info()
+ *
+ * device private data to struct platram_info conversion
+*/
+
+static inline struct platram_info *to_platram_info(struct platform_device *dev)
+{
+ return (struct platram_info *)platform_get_drvdata(dev);
+}
+
+/* platram_setrw
+ *
+ * call the platform device's set rw/ro control
+ *
+ * to = 0 => read-only
+ * = 1 => read-write
+*/
+
+static inline void platram_setrw(struct platram_info *info, int to)
+{
+ if (info->pdata == NULL)
+ return;
+
+ if (info->pdata->set_rw != NULL)
+ (info->pdata->set_rw)(info->dev, to);
+}
+
+/* platram_remove
+ *
+ * called to remove the device from the driver's control
+*/
+
+static int platram_remove(struct platform_device *pdev)
+{
+ struct platram_info *info = to_platram_info(pdev);
+
+ platform_set_drvdata(pdev, NULL);
+
+ dev_dbg(&pdev->dev, "removing device\n");
+
+ if (info == NULL)
+ return 0;
+
+ if (info->mtd) {
+#ifdef CONFIG_MTD_PARTITIONS
+ if (info->partitions) {
+ del_mtd_partitions(info->mtd);
+ kfree(info->partitions);
+ }
+#endif
+ del_mtd_device(info->mtd);
+ map_destroy(info->mtd);
+ }
+
+ /* ensure ram is left read-only */
+
+ platram_setrw(info, PLATRAM_RO);
+
+ /* release resources */
+
+ if (info->area) {
+ release_resource(info->area);
+ kfree(info->area);
+ }
+
+ if (info->map.virt != NULL)
+ iounmap(info->map.virt);
+
+ kfree(info);
+
+ return 0;
+}
+
+/* platram_probe
+ *
+ * called from device drive system when a device matching our
+ * driver is found.
+*/
+
+static int platram_probe(struct platform_device *pdev)
+{
+ struct platdata_mtd_ram *pdata;
+ struct platram_info *info;
+ struct resource *res;
+ int err = 0;
+
+ dev_dbg(&pdev->dev, "probe entered\n");
+
+ if (pdev->dev.platform_data == NULL) {
+ dev_err(&pdev->dev, "no platform data supplied\n");
+ err = -ENOENT;
+ goto exit_error;
+ }
+
+ pdata = pdev->dev.platform_data;
+
+ info = kmalloc(sizeof(*info), GFP_KERNEL);
+ if (info == NULL) {
+ dev_err(&pdev->dev, "no memory for flash info\n");
+ err = -ENOMEM;
+ goto exit_error;
+ }
+
+ memset(info, 0, sizeof(*info));
+ platform_set_drvdata(pdev, info);
+
+ info->dev = &pdev->dev;
+ info->pdata = pdata;
+
+ /* get the resource for the memory mapping */
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ if (res == NULL) {
+ dev_err(&pdev->dev, "no memory resource specified\n");
+ err = -ENOENT;
+ goto exit_free;
+ }
+
+ dev_dbg(&pdev->dev, "got platform resource %p (0x%lx)\n", res, res->start);
+
+ /* setup map parameters */
+
+ info->map.phys = res->start;
+ info->map.size = (res->end - res->start) + 1;
+ info->map.name = pdata->mapname != NULL ? pdata->mapname : (char *)pdev->name;
+ info->map.bankwidth = pdata->bankwidth;
+
+ /* register our usage of the memory area */
+
+ info->area = request_mem_region(res->start, info->map.size, pdev->name);
+ if (info->area == NULL) {
+ dev_err(&pdev->dev, "failed to request memory region\n");
+ err = -EIO;
+ goto exit_free;
+ }
+
+ /* remap the memory area */
+
+ info->map.virt = ioremap(res->start, info->map.size);
+ dev_dbg(&pdev->dev, "virt %p, %lu bytes\n", info->map.virt, info->map.size);
+
+ if (info->map.virt == NULL) {
+ dev_err(&pdev->dev, "failed to ioremap() region\n");
+ err = -EIO;
+ goto exit_free;
+ }
+
+ simple_map_init(&info->map);
+
+ dev_dbg(&pdev->dev, "initialised map, probing for mtd\n");
+
+ /* probe for the right mtd map driver */
+
+ info->mtd = do_map_probe("map_ram" , &info->map);
+ if (info->mtd == NULL) {
+ dev_err(&pdev->dev, "failed to probe for map_ram\n");
+ err = -ENOMEM;
+ goto exit_free;
+ }
+
+ info->mtd->owner = THIS_MODULE;
+
+ platram_setrw(info, PLATRAM_RW);
+
+ /* check to see if there are any available partitions, or wether
+ * to add this device whole */
+
+#ifdef CONFIG_MTD_PARTITIONS
+ if (pdata->nr_partitions > 0) {
+ const char **probes = { NULL };
+
+ if (pdata->probes)
+ probes = (const char **)pdata->probes;
+
+ err = parse_mtd_partitions(info->mtd, probes,
+ &info->partitions, 0);
+ if (err > 0) {
+ err = add_mtd_partitions(info->mtd, info->partitions,
+ err);
+ }
+ }
+#endif /* CONFIG_MTD_PARTITIONS */
+
+ if (add_mtd_device(info->mtd)) {
+ dev_err(&pdev->dev, "add_mtd_device() failed\n");
+ err = -ENOMEM;
+ }
+
+ dev_info(&pdev->dev, "registered mtd device\n");
+ return err;
+
+ exit_free:
+ platram_remove(pdev);
+ exit_error:
+ return err;
+}
+
+/* device driver info */
+
+static struct platform_driver platram_driver = {
+ .probe = platram_probe,
+ .remove = platram_remove,
+ .driver = {
+ .name = "mtd-ram",
+ .owner = THIS_MODULE,
+ },
+};
+
+/* module init/exit */
+
+static int __init platram_init(void)
+{
+ printk("Generic platform RAM MTD, (c) 2004 Simtec Electronics\n");
+ return platform_driver_register(&platram_driver);
+}
+
+static void __exit platram_exit(void)
+{
+ platform_driver_unregister(&platram_driver);
+}
+
+module_init(platram_init);
+module_exit(platram_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
+MODULE_DESCRIPTION("MTD platform RAM map driver");
diff --git a/linux-2.4.x/drivers/mtd/maps/pnc2000.c b/linux-2.4.x/drivers/mtd/maps/pnc2000.c
index 17741e2..d7e16c2 100644
--- a/linux-2.4.x/drivers/mtd/maps/pnc2000.c
+++ b/linux-2.4.x/drivers/mtd/maps/pnc2000.c
@@ -5,12 +5,13 @@
*
* This code is GPL
*
- * $Id: pnc2000.c,v 1.10 2001/10/02 15:05:14 dwmw2 Exp $
+ * $Id: pnc2000.c,v 1.18 2005/11/07 11:14:28 gleixner Exp $
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
+#include <linux/init.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/map.h>
@@ -20,99 +21,56 @@
#define WINDOW_ADDR 0xbf000000
#define WINDOW_SIZE 0x00400000
-/*
+/*
* MAP DRIVER STUFF
*/
-__u8 pnc_read8(struct map_info *map, unsigned long ofs)
-{
- return *(__u8 *)(WINDOW_ADDR + ofs);
-}
-
-__u16 pnc_read16(struct map_info *map, unsigned long ofs)
-{
- return *(__u16 *)(WINDOW_ADDR + ofs);
-}
-
-__u32 pnc_read32(struct map_info *map, unsigned long ofs)
-{
- return *(volatile unsigned int *)(WINDOW_ADDR + ofs);
-}
-
-void pnc_copy_from(struct map_info *map, void *to, unsigned long from, ssize_t len)
-{
- memcpy(to, (void *)(WINDOW_ADDR + from), len);
-}
-
-void pnc_write8(struct map_info *map, __u8 d, unsigned long adr)
-{
- *(__u8 *)(WINDOW_ADDR + adr) = d;
-}
-void pnc_write16(struct map_info *map, __u16 d, unsigned long adr)
-{
- *(__u16 *)(WINDOW_ADDR + adr) = d;
-}
-
-void pnc_write32(struct map_info *map, __u32 d, unsigned long adr)
-{
- *(__u32 *)(WINDOW_ADDR + adr) = d;
-}
-
-void pnc_copy_to(struct map_info *map, unsigned long to, const void *from, ssize_t len)
-{
- memcpy((void *)(WINDOW_ADDR + to), from, len);
-}
-
-struct map_info pnc_map = {
- name: "PNC-2000",
- size: WINDOW_SIZE,
- buswidth: 4,
- read8: pnc_read8,
- read16: pnc_read16,
- read32: pnc_read32,
- copy_from: pnc_copy_from,
- write8: pnc_write8,
- write16: pnc_write16,
- write32: pnc_write32,
- copy_to: pnc_copy_to
+static struct map_info pnc_map = {
+ .name = "PNC-2000",
+ .size = WINDOW_SIZE,
+ .bankwidth = 4,
+ .phys = 0xFFFFFFFF,
+ .virt = (void __iomem *)WINDOW_ADDR,
};
/*
- * MTD 'PARTITIONING' STUFF
+ * MTD 'PARTITIONING' STUFF
*/
static struct mtd_partition pnc_partitions[3] = {
{
- name: "PNC-2000 boot firmware",
- size: 0x20000,
- offset: 0
+ .name = "PNC-2000 boot firmware",
+ .size = 0x20000,
+ .offset = 0
},
{
- name: "PNC-2000 kernel",
- size: 0x1a0000,
- offset: 0x20000
+ .name = "PNC-2000 kernel",
+ .size = 0x1a0000,
+ .offset = 0x20000
},
{
- name: "PNC-2000 filesystem",
- size: 0x240000,
- offset: 0x1c0000
+ .name = "PNC-2000 filesystem",
+ .size = 0x240000,
+ .offset = 0x1c0000
}
};
-/*
+/*
* This is the master MTD device for which all the others are just
* auto-relocating aliases.
*/
static struct mtd_info *mymtd;
-int __init init_pnc2000(void)
+static int __init init_pnc2000(void)
{
printk(KERN_NOTICE "Photron PNC-2000 flash mapping: %x at %x\n", WINDOW_SIZE, WINDOW_ADDR);
+ simple_map_init(&pnc_map);
+
mymtd = do_map_probe("cfi_probe", &pnc_map);
if (mymtd) {
- mymtd->module = THIS_MODULE;
+ mymtd->owner = THIS_MODULE;
return add_mtd_partitions(mymtd, pnc_partitions, 3);
}
diff --git a/linux-2.4.x/drivers/mtd/maps/pxa2xx-flash.c b/linux-2.4.x/drivers/mtd/maps/pxa2xx-flash.c
new file mode 100644
index 0000000..2f01ff3
--- /dev/null
+++ b/linux-2.4.x/drivers/mtd/maps/pxa2xx-flash.c
@@ -0,0 +1,202 @@
+/*
+ * $Id: $
+ *
+ * Map driver for the Intel XScale PXA2xx developer platforms.
+ *
+ * Author: Nicolas Pitre
+ * Copyright: (C) 2001 MontaVista Software Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/partitions.h>
+
+#include <asm/io.h>
+#include <asm/hardware.h>
+
+#include <asm/mach/flash.h>
+
+static void pxa2xx_map_inval_cache(struct map_info *map, unsigned long from,
+ ssize_t len)
+{
+ consistent_sync((char *)map->cached + from, len, DMA_FROM_DEVICE);
+}
+
+struct pxa2xx_flash_info {
+ struct mtd_partition *parts;
+ int nr_parts;
+ struct mtd_info *mtd;
+ struct map_info map;
+};
+
+
+static const char *probes[] = { "RedBoot", "cmdlinepart", NULL };
+
+
+static int __init pxa2xx_flash_probe(struct platform_device *pdev)
+{
+ struct flash_platform_data *flash = pdev->dev.platform_data;
+ struct pxa2xx_flash_info *info;
+ struct mtd_partition *parts;
+ struct resource *res;
+ int ret = 0;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENODEV;
+
+ info = kmalloc(sizeof(struct pxa2xx_flash_info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ memset(info, 0, sizeof(struct pxa2xx_flash_info));
+ info->map.name = (char *) flash->name;
+ info->map.bankwidth = flash->width;
+ info->map.phys = res->start;
+ info->map.size = res->end - res->start + 1;
+ info->parts = flash->parts;
+ info->nr_parts = flash->nr_parts;
+
+ info->map.virt = ioremap(info->map.phys, info->map.size);
+ if (!info->map.virt) {
+ printk(KERN_WARNING "Failed to ioremap %s\n",
+ info->map.name);
+ return -ENOMEM;
+ }
+ info->map.cached =
+ ioremap_cached(info->map.phys, info->map.size);
+ if (!info->map.cached)
+ printk(KERN_WARNING "Failed to ioremap cached %s\n",
+ info->map.name);
+ info->map.inval_cache = pxa2xx_map_inval_cache;
+ simple_map_init(&info->map);
+
+ printk(KERN_NOTICE
+ "Probing %s at physical address 0x%08lx"
+ " (%d-bit bankwidth)\n",
+ info->map.name, info->map.phys, info->map.bankwidth * 8);
+
+ info->mtd = do_map_probe(flash->map_name, &info->map);
+
+ if (!info->mtd) {
+ iounmap((void *)info->map.virt);
+ if (info->map.cached)
+ iounmap(info->map.cached);
+ return -EIO;
+ }
+ info->mtd->owner = THIS_MODULE;
+
+#ifdef CONFIG_MTD_PARTITIONS
+ ret = parse_mtd_partitions(info->mtd, probes, &parts, 0);
+
+ if (ret > 0) {
+ info->nr_parts = ret;
+ info->parts = parts;
+ }
+#endif
+
+ if (info->nr_parts) {
+ add_mtd_partitions(info->mtd, info->parts,
+ info->nr_parts);
+ } else {
+ printk("Registering %s as whole device\n",
+ info->map.name);
+ add_mtd_device(info->mtd);
+ }
+
+ platform_set_drvdata(pdev, info);
+ return 0;
+}
+
+static int __exit pxa2xx_flash_remove(struct platform_device *pdev)
+{
+ struct pxa2xx_flash_info *info = platform_get_drvdata(pdev);
+
+ platform_set_drvdata(pdev, NULL);
+
+#ifdef CONFIG_MTD_PARTITIONS
+ if (info->nr_parts)
+ del_mtd_partitions(info->mtd);
+ else
+#endif
+ del_mtd_device(info->mtd);
+
+ map_destroy(info->mtd);
+ iounmap(info->map.virt);
+ if (info->map.cached)
+ iounmap(info->map.cached);
+ kfree(info->parts);
+ kfree(info);
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int pxa2xx_flash_suspend(struct platform_device *pdev,
+ pm_message_t state)
+{
+ struct pxa2xx_flash_info *info = platform_get_drvdata(pdev);
+ int ret = 0;
+
+ if (info->mtd && info->mtd->suspend)
+ ret = info->mtd->suspend(info->mtd);
+ return ret;
+}
+
+static int pxa2xx_flash_resume(struct platform_device *pdev)
+{
+ struct pxa2xx_flash_info *info = platform_get_drvdata(pdev);
+
+ if (info->mtd && info->mtd->resume)
+ info->mtd->resume(info->mtd);
+ return 0;
+}
+static void pxa2xx_flash_shutdown(struct platform_device *pdev)
+{
+ struct pxa2xx_flash_info *info = platform_get_drvdata(pdev);
+
+ if (info && info->mtd->suspend(info->mtd) == 0)
+ info->mtd->resume(info->mtd);
+}
+#else
+#define pxa2xx_flash_suspend NULL
+#define pxa2xx_flash_resume NULL
+#define pxa2xx_flash_shutdown NULL
+#endif
+
+static struct platform_driver pxa2xx_flash_driver = {
+ .probe = pxa2xx_flash_probe,
+ .remove = __exit_p(pxa2xx_flash_remove),
+ .suspend = pxa2xx_flash_suspend,
+ .resume = pxa2xx_flash_resume,
+ .shutdown = pxa2xx_flash_shutdown,
+ .driver = {
+ .name = "pxa2xx-flash",
+ },
+};
+
+static int __init init_pxa2xx_flash(void)
+{
+ return platform_driver_register(&pxa2xx_flash_driver);
+}
+
+static void __exit cleanup_pxa2xx_flash(void)
+{
+ platform_driver_unregister(&pxa2xx_flash_driver);
+}
+
+module_init(init_pxa2xx_flash);
+module_exit(cleanup_pxa2xx_flash);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Nicolas Pitre <nico@cam.org>");
+MODULE_DESCRIPTION("MTD map driver for Intel XScale PXA2xx");
diff --git a/linux-2.4.x/drivers/mtd/maps/redwood.c b/linux-2.4.x/drivers/mtd/maps/redwood.c
new file mode 100644
index 0000000..d63e127
--- /dev/null
+++ b/linux-2.4.x/drivers/mtd/maps/redwood.c
@@ -0,0 +1,168 @@
+/*
+ * $Id: redwood.c,v 1.12 2006/03/29 08:31:11 dwmw2 Exp $
+ *
+ * drivers/mtd/maps/redwood.c
+ *
+ * FLASH map for the IBM Redwood 4/5/6 boards.
+ *
+ * Author: MontaVista Software, Inc. <source@mvista.com>
+ *
+ * 2001-2003 (c) MontaVista, Software, Inc. This file is licensed under
+ * the terms of the GNU General Public License version 2. This program
+ * is licensed "as is" without any warranty of any kind, whether express
+ * or implied.
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/partitions.h>
+
+#include <asm/io.h>
+
+#if !defined (CONFIG_REDWOOD_6)
+
+#define WINDOW_ADDR 0xffc00000
+#define WINDOW_SIZE 0x00400000
+
+#define RW_PART0_OF 0
+#define RW_PART0_SZ 0x10000
+#define RW_PART1_OF RW_PART0_SZ
+#define RW_PART1_SZ 0x200000 - 0x10000
+#define RW_PART2_OF 0x200000
+#define RW_PART2_SZ 0x10000
+#define RW_PART3_OF 0x210000
+#define RW_PART3_SZ 0x200000 - (0x10000 + 0x20000)
+#define RW_PART4_OF 0x3e0000
+#define RW_PART4_SZ 0x20000
+
+static struct mtd_partition redwood_flash_partitions[] = {
+ {
+ .name = "Redwood OpenBIOS Vital Product Data",
+ .offset = RW_PART0_OF,
+ .size = RW_PART0_SZ,
+ .mask_flags = MTD_WRITEABLE /* force read-only */
+ },
+ {
+ .name = "Redwood kernel",
+ .offset = RW_PART1_OF,
+ .size = RW_PART1_SZ
+ },
+ {
+ .name = "Redwood OpenBIOS non-volatile storage",
+ .offset = RW_PART2_OF,
+ .size = RW_PART2_SZ,
+ .mask_flags = MTD_WRITEABLE /* force read-only */
+ },
+ {
+ .name = "Redwood filesystem",
+ .offset = RW_PART3_OF,
+ .size = RW_PART3_SZ
+ },
+ {
+ .name = "Redwood OpenBIOS",
+ .offset = RW_PART4_OF,
+ .size = RW_PART4_SZ,
+ .mask_flags = MTD_WRITEABLE /* force read-only */
+ }
+};
+
+#else /* CONFIG_REDWOOD_6 */
+/* FIXME: the window is bigger - armin */
+#define WINDOW_ADDR 0xff800000
+#define WINDOW_SIZE 0x00800000
+
+#define RW_PART0_OF 0
+#define RW_PART0_SZ 0x400000 /* 4 MiB data */
+#define RW_PART1_OF RW_PART0_OF + RW_PART0_SZ
+#define RW_PART1_SZ 0x10000 /* 64K VPD */
+#define RW_PART2_OF RW_PART1_OF + RW_PART1_SZ
+#define RW_PART2_SZ 0x400000 - (0x10000 + 0x20000)
+#define RW_PART3_OF RW_PART2_OF + RW_PART2_SZ
+#define RW_PART3_SZ 0x20000
+
+static struct mtd_partition redwood_flash_partitions[] = {
+ {
+ .name = "Redwood filesystem",
+ .offset = RW_PART0_OF,
+ .size = RW_PART0_SZ
+ },
+ {
+ .name = "Redwood OpenBIOS Vital Product Data",
+ .offset = RW_PART1_OF,
+ .size = RW_PART1_SZ,
+ .mask_flags = MTD_WRITEABLE /* force read-only */
+ },
+ {
+ .name = "Redwood kernel",
+ .offset = RW_PART2_OF,
+ .size = RW_PART2_SZ
+ },
+ {
+ .name = "Redwood OpenBIOS",
+ .offset = RW_PART3_OF,
+ .size = RW_PART3_SZ,
+ .mask_flags = MTD_WRITEABLE /* force read-only */
+ }
+};
+
+#endif /* CONFIG_REDWOOD_6 */
+
+struct map_info redwood_flash_map = {
+ .name = "IBM Redwood",
+ .size = WINDOW_SIZE,
+ .bankwidth = 2,
+ .phys = WINDOW_ADDR,
+};
+
+
+#define NUM_REDWOOD_FLASH_PARTITIONS ARRAY_SIZE(redwood_flash_partitions)
+
+static struct mtd_info *redwood_mtd;
+
+int __init init_redwood_flash(void)
+{
+ printk(KERN_NOTICE "redwood: flash mapping: %x at %x\n",
+ WINDOW_SIZE, WINDOW_ADDR);
+
+ redwood_flash_map.virt = ioremap(WINDOW_ADDR, WINDOW_SIZE);
+
+ if (!redwood_flash_map.virt) {
+ printk("init_redwood_flash: failed to ioremap\n");
+ return -EIO;
+ }
+ simple_map_init(&redwood_flash_map);
+
+ redwood_mtd = do_map_probe("cfi_probe",&redwood_flash_map);
+
+ if (redwood_mtd) {
+ redwood_mtd->owner = THIS_MODULE;
+ return add_mtd_partitions(redwood_mtd,
+ redwood_flash_partitions,
+ NUM_REDWOOD_FLASH_PARTITIONS);
+ }
+
+ return -ENXIO;
+}
+
+static void __exit cleanup_redwood_flash(void)
+{
+ if (redwood_mtd) {
+ del_mtd_partitions(redwood_mtd);
+ /* moved iounmap after map_destroy - armin */
+ map_destroy(redwood_mtd);
+ iounmap((void *)redwood_flash_map.virt);
+ }
+}
+
+module_init(init_redwood_flash);
+module_exit(cleanup_redwood_flash);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("MontaVista Software <source@mvista.com>");
+MODULE_DESCRIPTION("MTD map driver for the IBM Redwood reference boards");
diff --git a/linux-2.4.x/drivers/mtd/maps/rpxlite.c b/linux-2.4.x/drivers/mtd/maps/rpxlite.c
index 41b168f..809a0c8 100644
--- a/linux-2.4.x/drivers/mtd/maps/rpxlite.c
+++ b/linux-2.4.x/drivers/mtd/maps/rpxlite.c
@@ -1,5 +1,5 @@
/*
- * $Id: rpxlite.c,v 1.15 2001/10/02 15:05:14 dwmw2 Exp $
+ * $Id: rpxlite.c,v 1.22 2004/11/04 13:24:15 gleixner Exp $
*
* Handle mapping of the flash on the RPX Lite and CLLF boards
*/
@@ -7,6 +7,7 @@
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
+#include <linux/init.h>
#include <asm/io.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/map.h>
@@ -17,80 +18,31 @@
static struct mtd_info *mymtd;
-__u8 rpxlite_read8(struct map_info *map, unsigned long ofs)
-{
- return __raw_readb(map->map_priv_1 + ofs);
-}
-
-__u16 rpxlite_read16(struct map_info *map, unsigned long ofs)
-{
- return __raw_readw(map->map_priv_1 + ofs);
-}
-
-__u32 rpxlite_read32(struct map_info *map, unsigned long ofs)
-{
- return __raw_readl(map->map_priv_1 + ofs);
-}
-
-void rpxlite_copy_from(struct map_info *map, void *to, unsigned long from, ssize_t len)
-{
- memcpy_fromio(to, (void *)(map->map_priv_1 + from), len);
-}
-
-void rpxlite_write8(struct map_info *map, __u8 d, unsigned long adr)
-{
- __raw_writeb(d, map->map_priv_1 + adr);
- mb();
-}
-
-void rpxlite_write16(struct map_info *map, __u16 d, unsigned long adr)
-{
- __raw_writew(d, map->map_priv_1 + adr);
- mb();
-}
-
-void rpxlite_write32(struct map_info *map, __u32 d, unsigned long adr)
-{
- __raw_writel(d, map->map_priv_1 + adr);
- mb();
-}
-
-void rpxlite_copy_to(struct map_info *map, unsigned long to, const void *from, ssize_t len)
-{
- memcpy_toio((void *)(map->map_priv_1 + to), from, len);
-}
-
-struct map_info rpxlite_map = {
- name: "RPX",
- size: WINDOW_SIZE,
- buswidth: 4,
- read8: rpxlite_read8,
- read16: rpxlite_read16,
- read32: rpxlite_read32,
- copy_from: rpxlite_copy_from,
- write8: rpxlite_write8,
- write16: rpxlite_write16,
- write32: rpxlite_write32,
- copy_to: rpxlite_copy_to
+static struct map_info rpxlite_map = {
+ .name = "RPX",
+ .size = WINDOW_SIZE,
+ .bankwidth = 4,
+ .phys = WINDOW_ADDR,
};
int __init init_rpxlite(void)
{
printk(KERN_NOTICE "RPX Lite or CLLF flash device: %x at %x\n", WINDOW_SIZE*4, WINDOW_ADDR);
- rpxlite_map.map_priv_1 = (unsigned long)ioremap(WINDOW_ADDR, WINDOW_SIZE * 4);
+ rpxlite_map.virt = ioremap(WINDOW_ADDR, WINDOW_SIZE * 4);
- if (!rpxlite_map.map_priv_1) {
+ if (!rpxlite_map.virt) {
printk("Failed to ioremap\n");
return -EIO;
}
+ simple_map_init(&rpxlite_map);
mymtd = do_map_probe("cfi_probe", &rpxlite_map);
if (mymtd) {
- mymtd->module = THIS_MODULE;
+ mymtd->owner = THIS_MODULE;
add_mtd_device(mymtd);
return 0;
}
- iounmap((void *)rpxlite_map.map_priv_1);
+ iounmap((void *)rpxlite_map.virt);
return -ENXIO;
}
@@ -100,9 +52,9 @@ static void __exit cleanup_rpxlite(void)
del_mtd_device(mymtd);
map_destroy(mymtd);
}
- if (rpxlite_map.map_priv_1) {
- iounmap((void *)rpxlite_map.map_priv_1);
- rpxlite_map.map_priv_1 = 0;
+ if (rpxlite_map.virt) {
+ iounmap((void *)rpxlite_map.virt);
+ rpxlite_map.virt = 0;
}
}
diff --git a/linux-2.4.x/drivers/mtd/maps/sa1100-flash.c b/linux-2.4.x/drivers/mtd/maps/sa1100-flash.c
index a957813..3ee3906 100644
--- a/linux-2.4.x/drivers/mtd/maps/sa1100-flash.c
+++ b/linux-2.4.x/drivers/mtd/maps/sa1100-flash.c
@@ -1,1028 +1,479 @@
/*
* Flash memory access on SA11x0 based devices
- *
+ *
* (C) 2000 Nicolas Pitre <nico@cam.org>
- *
- * $Id: sa1100-flash.c,v 1.26 2002/03/13 16:30:44 rmk Exp $
+ *
+ * $Id: sa1100-flash.c,v 1.52 2005/11/29 20:01:28 gleixner Exp $
*/
-
#include <linux/config.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/ioport.h>
#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/err.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/map.h>
#include <linux/mtd/partitions.h>
+#include <linux/mtd/concat.h>
#include <asm/hardware.h>
#include <asm/io.h>
+#include <asm/sizes.h>
+#include <asm/mach/flash.h>
-
-#ifndef CONFIG_ARCH_SA1100
-#error This is for SA1100 architecture only
-#endif
-
-
-#define WINDOW_ADDR 0xe8000000
-
-static __u8 sa1100_read8(struct map_info *map, unsigned long ofs)
-{
- return readb(map->map_priv_1 + ofs);
-}
-
-static __u16 sa1100_read16(struct map_info *map, unsigned long ofs)
-{
- return readw(map->map_priv_1 + ofs);
-}
-
-static __u32 sa1100_read32(struct map_info *map, unsigned long ofs)
-{
- return readl(map->map_priv_1 + ofs);
-}
-
-static void sa1100_copy_from(struct map_info *map, void *to, unsigned long from, ssize_t len)
-{
- memcpy(to, (void *)(map->map_priv_1 + from), len);
-}
-
-static void sa1100_write8(struct map_info *map, __u8 d, unsigned long adr)
-{
- writeb(d, map->map_priv_1 + adr);
-}
-
-static void sa1100_write16(struct map_info *map, __u16 d, unsigned long adr)
-{
- writew(d, map->map_priv_1 + adr);
-}
-
-static void sa1100_write32(struct map_info *map, __u32 d, unsigned long adr)
-{
- writel(d, map->map_priv_1 + adr);
-}
-
-static void sa1100_copy_to(struct map_info *map, unsigned long to, const void *from, ssize_t len)
-{
- memcpy((void *)(map->map_priv_1 + to), from, len);
-}
-
-static struct map_info sa1100_map = {
- name: "SA1100 flash",
- read8: sa1100_read8,
- read16: sa1100_read16,
- read32: sa1100_read32,
- copy_from: sa1100_copy_from,
- write8: sa1100_write8,
- write16: sa1100_write16,
- write32: sa1100_write32,
- copy_to: sa1100_copy_to,
-
- map_priv_1: WINDOW_ADDR,
- map_priv_2: -1,
-};
-
-
-/*
- * Here are partition information for all known SA1100-based devices.
- * See include/linux/mtd/partitions.h for definition of the mtd_partition
- * structure.
- *
- * The *_max_flash_size is the maximum possible mapped flash size which
- * is not necessarily the actual flash size. It must be no more than
- * the value specified in the "struct map_desc *_io_desc" mapping
- * definition for the corresponding machine.
- *
- * Please keep these in alphabetical order, and formatted as per existing
- * entries. Thanks.
- */
-
-#ifdef CONFIG_SA1100_ADSBITSY
-#define ADSBITSY_FLASH_SIZE 0x02000000
-static struct mtd_partition adsbitsy_partitions[] = {
- {
- name: "bootROM",
- size: 0x80000,
- offset: 0,
- mask_flags: MTD_WRITEABLE, /* force read-only */
- }, {
- name: "zImage",
- size: 0x100000,
- offset: MTDPART_OFS_APPEND,
- mask_flags: MTD_WRITEABLE, /* force read-only */
- }, {
- name: "ramdisk.gz",
- size: 0x300000,
- offset: MTDPART_OFS_APPEND,
- mask_flags: MTD_WRITEABLE, /* force read-only */
- }, {
- name: "User FS",
- size: MTDPART_SIZ_FULL,
- offset: MTDPART_OFS_APPEND,
- }
-};
-#endif
-
-#ifdef CONFIG_SA1100_ASSABET
-/* Phase 4 Assabet has two 28F160B3 flash parts in bank 0: */
-#define ASSABET4_FLASH_SIZE 0x00400000
-static struct mtd_partition assabet4_partitions[] = {
- {
- name: "bootloader",
- size: 0x00020000,
- offset: 0,
- mask_flags: MTD_WRITEABLE,
- }, {
- name: "bootloader params",
- size: 0x00020000,
- offset: MTDPART_OFS_APPEND,
- mask_flags: MTD_WRITEABLE,
- }, {
- name: "jffs",
- size: MTDPART_SIZ_FULL,
- offset: MTDPART_OFS_APPEND,
- }
-};
-
-/* Phase 5 Assabet has two 28F128J3A flash parts in bank 0: */
-#define ASSABET5_FLASH_SIZE 0x02000000
-static struct mtd_partition assabet5_partitions[] = {
- {
- name: "bootloader",
- size: 0x00040000,
- offset: 0,
- mask_flags: MTD_WRITEABLE,
- }, {
- name: "bootloader params",
- size: 0x00040000,
- offset: MTDPART_OFS_APPEND,
- mask_flags: MTD_WRITEABLE,
- }, {
- name: "jffs",
- size: MTDPART_SIZ_FULL,
- offset: MTDPART_OFS_APPEND,
- }
-};
-
-#define ASSABET_FLASH_SIZE ASSABET5_FLASH_SIZE
-#define assabet_partitions assabet5_partitions
-#endif
-
-#ifdef CONFIG_SA1100_BADGE4
-
+#if 0
/*
- * 1 x Intel 28F320C3BA100 Advanced+ Boot Block Flash (32 Mi bit)
- * Eight 4 KiW Parameter Bottom Blocks (64 KiB)
- * Sixty-three 32 KiW Main Blocks (4032 Ki b)
+ * This is here for documentation purposes only - until these people
+ * submit their machine types. It will be gone January 2005.
*/
-#define BADGE4_FLASH_SIZE 0x00400000
-static struct mtd_partition badge4_partitions[] = {
- {
- name: "BLOB boot loader",
- offset: 0,
- size: 0x0000A000
- }, {
- name: "params",
- offset: MTDPART_OFS_APPEND,
- size: 0x00006000
- }, {
- name: "kernel",
- offset: MTDPART_OFS_APPEND,
- size: 0x00100000
- }, {
- name: "root",
- offset: MTDPART_OFS_APPEND,
- size: MTDPART_SIZ_FULL
- }
-};
-
-#endif
-
-
-#ifdef CONFIG_SA1100_CERF
-#ifdef CONFIG_SA1100_CERF_FLASH_32MB
-#define CERF_FLASH_SIZE 0x02000000
-static struct mtd_partition cerf_partitions[] = {
- {
- name: "firmware",
- size: 0x00040000,
- offset: 0,
- }, {
- name: "params",
- size: 0x00040000,
- offset: 0x00040000,
- }, {
- name: "kernel",
- size: 0x00100000,
- offset: 0x00080000,
- }, {
- name: "rootdisk",
- size: 0x01E80000,
- offset: 0x00180000,
- }
-};
-#elif defined CONFIG_SA1100_CERF_FLASH_16MB
-#define CERF_FLASH_SIZE 0x01000000
-static struct mtd_partition cerf_partitions[] = {
- {
- name: "firmware",
- size: 0x00020000,
- offset: 0,
- }, {
- name: "params",
- size: 0x00020000,
- offset: 0x00020000,
- }, {
- name: "kernel",
- size: 0x00100000,
- offset: 0x00040000,
- }, {
- name: "rootdisk",
- size: 0x00EC0000,
- offset: 0x00140000,
- }
-};
-#elif defined CONFIG_SA1100_CERF_FLASH_8MB
-# error "Unwritten type definition"
-#else
-# error "Undefined memory orientation for CERF in sa1100-flash.c"
-#endif
-#endif
-
-#ifdef CONFIG_SA1100_CONSUS
-#define CONSUS_FLASH_SIZE 0x02000000
static struct mtd_partition consus_partitions[] = {
{
- name: "Consus boot firmware",
- offset: 0,
- size: 0x00040000,
- mask_flags: MTD_WRITABLE, /* force read-only */
- }, {
- name: "Consus kernel",
- offset: 0x00040000,
- size: 0x00100000,
- mask_flags: 0,
- }, {
- name: "Consus disk",
- offset: 0x00140000,
+ .name = "Consus boot firmware",
+ .offset = 0,
+ .size = 0x00040000,
+ .mask_flags = MTD_WRITABLE, /* force read-only */
+ }, {
+ .name = "Consus kernel",
+ .offset = 0x00040000,
+ .size = 0x00100000,
+ .mask_flags = 0,
+ }, {
+ .name = "Consus disk",
+ .offset = 0x00140000,
/* The rest (up to 16M) for jffs. We could put 0 and
make it find the size automatically, but right now
i have 32 megs. jffs will use all 32 megs if given
the chance, and this leads to horrible problems
when you try to re-flash the image because blob
won't erase the whole partition. */
- size: 0x01000000 - 0x00140000,
- mask_flags: 0,
+ .size = 0x01000000 - 0x00140000,
+ .mask_flags = 0,
}, {
/* this disk is a secondary disk, which can be used as
needed, for simplicity, make it the size of the other
consus partition, although realistically it could be
the remainder of the disk (depending on the file
system used) */
- name: "Consus disk2",
- offset: 0x01000000,
- size: 0x01000000 - 0x00140000,
- mask_flags: 0,
- }
-};
-#endif
-
-#ifdef CONFIG_SA1100_FLEXANET
-/* Flexanet has two 28F128J3A flash parts in bank 0: */
-#define FLEXANET_FLASH_SIZE 0x02000000
-static struct mtd_partition flexanet_partitions[] = {
- {
- name: "bootloader",
- size: 0x00040000,
- offset: 0,
- mask_flags: MTD_WRITEABLE,
- }, {
- name: "bootloader params",
- size: 0x00040000,
- offset: MTDPART_OFS_APPEND,
- mask_flags: MTD_WRITEABLE,
- }, {
- name: "kernel",
- size: 0x000C0000,
- offset: MTDPART_OFS_APPEND,
- mask_flags: MTD_WRITEABLE,
- }, {
- name: "altkernel",
- size: 0x000C0000,
- offset: MTDPART_OFS_APPEND,
- mask_flags: MTD_WRITEABLE,
- }, {
- name: "root",
- size: 0x00400000,
- offset: MTDPART_OFS_APPEND,
- mask_flags: MTD_WRITEABLE,
- }, {
- name: "free1",
- size: 0x00300000,
- offset: MTDPART_OFS_APPEND,
- mask_flags: MTD_WRITEABLE,
- }, {
- name: "free2",
- size: 0x00300000,
- offset: MTDPART_OFS_APPEND,
- mask_flags: MTD_WRITEABLE,
- }, {
- name: "free3",
- size: MTDPART_SIZ_FULL,
- offset: MTDPART_OFS_APPEND,
- mask_flags: MTD_WRITEABLE,
+ .name = "Consus disk2",
+ .offset = 0x01000000,
+ .size = 0x01000000 - 0x00140000,
+ .mask_flags = 0,
}
};
-#endif
-#ifdef CONFIG_SA1100_FREEBIRD
-#define FREEBIRD_FLASH_SIZE 0x02000000
-static struct mtd_partition freebird_partitions[] = {
-#if CONFIG_SA1100_FREEBIRD_NEW
- {
- name: "firmware",
- size: 0x00040000,
- offset: 0,
- mask_flags: MTD_WRITEABLE, /* force read-only */
- }, {
- name: "kernel",
- size: 0x00080000,
- offset: 0x00040000,
- }, {
- name: "params",
- size: 0x00040000,
- offset: 0x000C0000,
- }, {
- name: "initrd",
- size: 0x00100000,
- offset: 0x00100000,
- }, {
- name: "root cramfs",
- size: 0x00300000,
- offset: 0x00200000,
- }, {
- name: "usr cramfs",
- size: 0x00C00000,
- offset: 0x00500000,
- }, {
- name: "local",
- size: MTDPART_SIZ_FULL,
- offset: 0x01100000,
- }
-#else
- {
- size: 0x00040000,
- offset: 0,
- }, {
- size: 0x000c0000,
- offset: MTDPART_OFS_APPEND,
- }, {
- size: 0x00400000,
- offset: MTDPART_OFS_APPEND,
- }, {
- size: MTDPART_SIZ_FULL,
- offset: MTDPART_OFS_APPEND,
- }
-#endif
-};
-#endif
-
-#ifdef CONFIG_SA1100_FRODO
/* Frodo has 2 x 16M 28F128J3A flash chips in bank 0: */
-#define FRODO_FLASH_SIZE 0x02000000
static struct mtd_partition frodo_partitions[] =
{
{
- name: "bootloader",
- size: 0x00040000,
- offset: 0x00000000,
- mask_flags: MTD_WRITEABLE
- }, {
- name: "bootloader params",
- size: 0x00040000,
- offset: MTDPART_OFS_APPEND,
- mask_flags: MTD_WRITEABLE
- }, {
- name: "kernel",
- size: 0x00100000,
- offset: MTDPART_OFS_APPEND,
- mask_flags: MTD_WRITEABLE
- }, {
- name: "ramdisk",
- size: 0x00400000,
- offset: MTDPART_OFS_APPEND,
- mask_flags: MTD_WRITEABLE
- }, {
- name: "file system",
- size: MTDPART_SIZ_FULL,
- offset: MTDPART_OFS_APPEND
- }
-};
-#endif
-
-#ifdef CONFIG_SA1100_GRAPHICSCLIENT
-#define GRAPHICSCLIENT_FLASH_SIZE 0x02000000
-static struct mtd_partition graphicsclient_partitions[] = {
- {
- name: "zImage",
- size: 0x100000,
- offset: 0,
- mask_flags: MTD_WRITEABLE, /* force read-only */
- }, {
- name: "ramdisk.gz",
- size: 0x300000,
- offset: MTDPART_OFS_APPEND,
- mask_flags: MTD_WRITEABLE, /* force read-only */
- }, {
- name: "User FS",
- size: MTDPART_SIZ_FULL,
- offset: MTDPART_OFS_APPEND,
- }
-};
-#endif
-
-#ifdef CONFIG_SA1100_GRAPHICSMASTER
-#define GRAPHICSMASTER_FLASH_SIZE 0x01000000
-static struct mtd_partition graphicsmaster_partitions[] = {
- {
- name: "zImage",
- size: 0x100000,
- offset: 0,
- mask_flags: MTD_WRITEABLE, /* force read-only */
- },
- {
- name: "ramdisk.gz",
- size: 0x300000,
- offset: MTDPART_OFS_APPEND,
- mask_flags: MTD_WRITEABLE, /* force read-only */
- },
- {
- name: "User FS",
- size: MTDPART_SIZ_FULL,
- offset: MTDPART_OFS_APPEND,
+ .name = "bootloader",
+ .size = 0x00040000,
+ .offset = 0x00000000,
+ .mask_flags = MTD_WRITEABLE
+ }, {
+ .name = "bootloader params",
+ .size = 0x00040000,
+ .offset = MTDPART_OFS_APPEND,
+ .mask_flags = MTD_WRITEABLE
+ }, {
+ .name = "kernel",
+ .size = 0x00100000,
+ .offset = MTDPART_OFS_APPEND,
+ .mask_flags = MTD_WRITEABLE
+ }, {
+ .name = "ramdisk",
+ .size = 0x00400000,
+ .offset = MTDPART_OFS_APPEND,
+ .mask_flags = MTD_WRITEABLE
+ }, {
+ .name = "file system",
+ .size = MTDPART_SIZ_FULL,
+ .offset = MTDPART_OFS_APPEND
}
};
-#endif
-#ifdef CONFIG_SA1100_H3600
-#define H3600_FLASH_SIZE 0x02000000
-static struct mtd_partition h3600_partitions[] = {
+static struct mtd_partition jornada56x_partitions[] = {
{
- name: "H3600 boot firmware",
- size: 0x00040000,
- offset: 0,
- mask_flags: MTD_WRITEABLE, /* force read-only */
+ .name = "bootldr",
+ .size = 0x00040000,
+ .offset = 0,
+ .mask_flags = MTD_WRITEABLE,
}, {
- name: "H3600 kernel",
- size: 0x00080000,
- offset: 0x00040000,
- }, {
- name: "H3600 params",
- size: 0x00040000,
- offset: 0x000C0000,
- }, {
-#ifdef CONFIG_JFFS2_FS
- name: "H3600 root jffs2",
- size: MTDPART_SIZ_FULL,
- offset: 0x00100000,
-#else
- name: "H3600 initrd",
- size: 0x00100000,
- offset: 0x00100000,
- }, {
- name: "H3600 root cramfs",
- size: 0x00300000,
- offset: 0x00200000,
- }, {
- name: "H3600 usr cramfs",
- size: 0x00800000,
- offset: 0x00500000,
- }, {
- name: "H3600 usr local",
- size: MTDPART_SIZ_FULL,
- offset: 0x00d00000,
-#endif
+ .name = "rootfs",
+ .size = MTDPART_SIZ_FULL,
+ .offset = MTDPART_OFS_APPEND,
}
};
-static void h3600_set_vpp(struct map_info *map, int vpp)
+static void jornada56x_set_vpp(int vpp)
{
- assign_h3600_egpio(IPAQ_EGPIO_VPP_ON, vpp);
+ if (vpp)
+ GPSR = GPIO_GPIO26;
+ else
+ GPCR = GPIO_GPIO26;
+ GPDR |= GPIO_GPIO26;
}
+
+/*
+ * Machine Phys Size set_vpp
+ * Consus : SA1100_CS0_PHYS SZ_32M
+ * Frodo : SA1100_CS0_PHYS SZ_32M
+ * Jornada56x: SA1100_CS0_PHYS SZ_32M jornada56x_set_vpp
+ */
#endif
-#ifdef CONFIG_SA1100_HUW_WEBPANEL
-#define HUW_WEBPANEL_FLASH_SIZE 0x01000000
-static struct mtd_partition huw_webpanel_partitions[] = {
- {
- name: "Loader",
- size: 0x00040000,
- offset: 0,
- }, {
- name: "Sector 1",
- size: 0x00040000,
- offset: MTDPART_OFS_APPEND,
- }, {
- size: MTDPART_SIZ_FULL,
- offset: MTDPART_OFS_APPEND,
- }
+struct sa_subdev_info {
+ char name[16];
+ struct map_info map;
+ struct mtd_info *mtd;
+ struct flash_platform_data *plat;
};
-#endif
-#ifdef CONFIG_SA1100_JORNADA720
-#define JORNADA720_FLASH_SIZE 0x02000000
-static struct mtd_partition jornada720_partitions[] = {
- {
- name: "JORNADA720 boot firmware",
- size: 0x00040000,
- offset: 0,
- mask_flags: MTD_WRITEABLE, /* force read-only */
- }, {
- name: "JORNADA720 kernel",
- size: 0x000c0000,
- offset: 0x00040000,
- }, {
- name: "JORNADA720 params",
- size: 0x00040000,
- offset: 0x00100000,
- }, {
- name: "JORNADA720 initrd",
- size: 0x00100000,
- offset: 0x00140000,
- }, {
- name: "JORNADA720 root cramfs",
- size: 0x00300000,
- offset: 0x00240000,
- }, {
- name: "JORNADA720 usr cramfs",
- size: 0x00800000,
- offset: 0x00540000,
- }, {
- name: "JORNADA720 usr local",
- size: 0 /* will expand to the end of the flash */
- offset: 0x00d00000,
- }
+struct sa_info {
+ struct mtd_partition *parts;
+ struct mtd_info *mtd;
+ int num_subdev;
+ unsigned int nr_parts;
+ struct sa_subdev_info subdev[0];
};
-static void jornada720_set_vpp(int vpp)
+static void sa1100_set_vpp(struct map_info *map, int on)
{
- if (vpp)
- PPSR |= 0x80;
- else
- PPSR &= ~0x80;
- PPDR |= 0x80;
+ struct sa_subdev_info *subdev = container_of(map, struct sa_subdev_info, map);
+ subdev->plat->set_vpp(on);
}
-#endif
+static void sa1100_destroy_subdev(struct sa_subdev_info *subdev)
+{
+ if (subdev->mtd)
+ map_destroy(subdev->mtd);
+ if (subdev->map.virt)
+ iounmap(subdev->map.virt);
+ release_mem_region(subdev->map.phys, subdev->map.size);
+}
-#ifdef CONFIG_SA1100_PANGOLIN
-#define PANGOLIN_FLASH_SIZE 0x04000000
-static struct mtd_partition pangolin_partitions[] = {
- {
- name: "boot firmware",
- size: 0x00080000,
- offset: 0x00000000,
- mask_flags: MTD_WRITEABLE, /* force read-only */
- }, {
- name: "kernel",
- size: 0x00100000,
- offset: 0x00080000,
- }, {
- name: "initrd",
- size: 0x00280000,
- offset: 0x00180000,
- }, {
- name: "initrd-test",
- size: 0x03C00000,
- offset: 0x00400000,
- }
-};
-#endif
+static int sa1100_probe_subdev(struct sa_subdev_info *subdev, struct resource *res)
+{
+ unsigned long phys;
+ unsigned int size;
+ int ret;
-#ifdef CONFIG_SA1100_PT_SYSTEM3
-/* erase size is 0x40000 == 256k partitions have to have this boundary */
-#define SYSTEM3_FLASH_SIZE 0x01000000
-static struct mtd_partition system3_partitions[] = {
- {
- name: "BLOB",
- size: 0x00040000,
- offset: 0x00000000,
- mask_flags: MTD_WRITEABLE, /* force read-only */
- }, {
- name: "config",
- size: 0x00040000,
- offset: MTDPART_OFS_APPEND,
- }, {
- name: "kernel",
- size: 0x00100000,
- offset: MTDPART_OFS_APPEND,
- }, {
- name: "root",
- size: MTDPART_SIZ_FULL,
- offset: MTDPART_OFS_APPEND,
+ phys = res->start;
+ size = res->end - phys + 1;
+
+ /*
+ * Retrieve the bankwidth from the MSC registers.
+ * We currently only implement CS0 and CS1 here.
+ */
+ switch (phys) {
+ default:
+ printk(KERN_WARNING "SA1100 flash: unknown base address "
+ "0x%08lx, assuming CS0\n", phys);
+
+ case SA1100_CS0_PHYS:
+ subdev->map.bankwidth = (MSC0 & MSC_RBW) ? 2 : 4;
+ break;
+
+ case SA1100_CS1_PHYS:
+ subdev->map.bankwidth = ((MSC0 >> 16) & MSC_RBW) ? 2 : 4;
+ break;
}
-};
-#endif
-#ifdef CONFIG_SA1100_SHANNON
-#define SHANNON_FLASH_SIZE 0x00400000
-static struct mtd_partition shannon_partitions[] = {
- {
- name: "BLOB boot loader",
- offset: 0,
- size: 0x20000
- },
- {
- name: "kernel",
- offset: MTDPART_OFS_APPEND,
- size: 0xe0000
- },
- {
- name: "initrd",
- offset: MTDPART_OFS_APPEND,
- size: MTDPART_SIZ_FULL
+ if (!request_mem_region(phys, size, subdev->name)) {
+ ret = -EBUSY;
+ goto out;
}
-};
-#endif
+ if (subdev->plat->set_vpp)
+ subdev->map.set_vpp = sa1100_set_vpp;
-#ifdef CONFIG_SA1100_SHERMAN
-#define SHERMAN_FLASH_SIZE 0x02000000
-static struct mtd_partition sherman_partitions[] = {
- {
- size: 0x50000,
- offset: 0,
- }, {
- size: 0x70000,
- offset: MTDPART_OFS_APPEND,
- }, {
- size: 0x600000,
- offset: MTDPART_OFS_APPEND,
- }, {
- size: 0xA0000,
- offset: MTDPART_OFS_APPEND,
+ subdev->map.phys = phys;
+ subdev->map.size = size;
+ subdev->map.virt = ioremap(phys, size);
+ if (!subdev->map.virt) {
+ ret = -ENOMEM;
+ goto err;
}
-};
-#endif
-#ifdef CONFIG_SA1100_SIMPAD
-#define SIMPAD_FLASH_SIZE 0x02000000
-static struct mtd_partition simpad_partitions[] = {
- {
- name: "SIMpad boot firmware",
- size: 0x00080000,
- offset: 0,
- mask_flags: MTD_WRITEABLE, /* force read-only */
- }, {
- name: "SIMpad kernel",
- size: 0x00100000,
- offset: 0x00080000,
- }, {
-#ifdef CONFIG_JFFS2_FS
- name: "SIMpad root jffs2",
- size: MTDPART_SIZ_FULL,
- offset: 0x00180000,
-#else
- name: "SIMpad initrd",
- size: 0x00300000,
- offset: 0x00180000,
- }, {
- name: "SIMpad root cramfs",
- size: 0x00300000,
- offset: 0x00480000,
- }, {
- name: "SIMpad usr cramfs",
- size: 0x005c0000,
- offset: 0x00780000,
- }, {
- name: "SIMpad usr local",
- size: MTDPART_SIZ_FULL,
- offset: 0x00d40000,
-#endif
+ simple_map_init(&subdev->map);
+
+ /*
+ * Now let's probe for the actual flash. Do it here since
+ * specific machine settings might have been set above.
+ */
+ subdev->mtd = do_map_probe(subdev->plat->map_name, &subdev->map);
+ if (subdev->mtd == NULL) {
+ ret = -ENXIO;
+ goto err;
}
-};
-#endif /* CONFIG_SA1100_SIMPAD */
+ subdev->mtd->owner = THIS_MODULE;
-#ifdef CONFIG_SA1100_STORK
-#define STORK_FLASH_SIZE 0x02000000
-static struct mtd_partition stork_partitions[] = {
- {
- name: "STORK boot firmware",
- size: 0x00040000,
- offset: 0,
- mask_flags: MTD_WRITEABLE, /* force read-only */
- }, {
- name: "STORK params",
- size: 0x00040000,
- offset: 0x00040000,
- }, {
- name: "STORK kernel",
- size: 0x00100000,
- offset: 0x00080000,
- }, {
-#ifdef CONFIG_JFFS2_FS
- name: "STORK root jffs2",
- offset: 0x00180000,
- size: MTDPART_SIZ_FULL,
-#else
- name: "STORK initrd",
- size: 0x00100000,
- offset: 0x00180000,
- }, {
- name: "STORK root cramfs",
- size: 0x00300000,
- offset: 0x00280000,
- }, {
- name: "STORK usr cramfs",
- size: 0x00800000,
- offset: 0x00580000,
- }, {
- name: "STORK usr local",
- offset: 0x00d80000,
- size: MTDPART_SIZ_FULL,
+ printk(KERN_INFO "SA1100 flash: CFI device at 0x%08lx, %dMiB, "
+ "%d-bit\n", phys, subdev->mtd->size >> 20,
+ subdev->map.bankwidth * 8);
+
+ return 0;
+
+ err:
+ sa1100_destroy_subdev(subdev);
+ out:
+ return ret;
+}
+
+static void sa1100_destroy(struct sa_info *info, struct flash_platform_data *plat)
+{
+ int i;
+
+ if (info->mtd) {
+ if (info->nr_parts == 0)
+ del_mtd_device(info->mtd);
+#ifdef CONFIG_MTD_PARTITIONS
+ else
+ del_mtd_partitions(info->mtd);
#endif
- }
-};
+#ifdef CONFIG_MTD_CONCAT
+ if (info->mtd != info->subdev[0].mtd)
+ mtd_concat_destroy(info->mtd);
#endif
-
-#ifdef CONFIG_SA1100_YOPY
-#define YOPY_FLASH_SIZE 0x08000000
-static struct mtd_partition yopy_partitions[] = {
- {
- name: "boot firmware",
- size: 0x00040000,
- offset: 0x00000000,
- mask_flags: MTD_WRITEABLE, /* force read-only */
- }, {
- name: "kernel",
- size: 0x00080000,
- offset: 0x00080000,
- }, {
- name: "initrd",
- size: 0x00300000,
- offset: 0x00100000,
- }, {
- name: "root",
- size: 0x01000000,
- offset: 0x00400000,
}
-};
-#endif
-extern int parse_redboot_partitions(struct mtd_info *master, struct mtd_partition **pparts);
-extern int parse_bootldr_partitions(struct mtd_info *master, struct mtd_partition **pparts);
+ kfree(info->parts);
-static struct mtd_partition *parsed_parts;
-static struct mtd_info *mymtd;
+ for (i = info->num_subdev - 1; i >= 0; i--)
+ sa1100_destroy_subdev(&info->subdev[i]);
+ kfree(info);
-int __init sa1100_mtd_init(void)
-{
- struct mtd_partition *parts;
- int nb_parts = 0, ret;
- int parsed_nr_parts = 0;
- const char *part_type;
- unsigned long base = -1UL;
+ if (plat->exit)
+ plat->exit();
+}
- /* Default flash buswidth */
- sa1100_map.buswidth = (MSC0 & MSC_RBW) ? 2 : 4;
+static struct sa_info *__init
+sa1100_setup_mtd(struct platform_device *pdev, struct flash_platform_data *plat)
+{
+ struct sa_info *info;
+ int nr, size, i, ret = 0;
/*
- * Static partition definition selection
+ * Count number of devices.
*/
- part_type = "static";
-
-#ifdef CONFIG_SA1100_ADSBITSY
- if (machine_is_adsbitsy()) {
- parts = adsbitsy_partitions;
- nb_parts = ARRAY_SIZE(adsbitsy_partitions);
- sa1100_map.size = ADSBITSY_FLASH_SIZE;
- sa1100_map.buswidth = (MSC1 & MSC_RBW) ? 2 : 4;
- }
-#endif
-#ifdef CONFIG_SA1100_ASSABET
- if (machine_is_assabet()) {
- parts = assabet_partitions;
- nb_parts = ARRAY_SIZE(assabet_partitions);
- sa1100_map.size = ASSABET_FLASH_SIZE;
- }
-#endif
-#ifdef CONFIG_SA1100_BADGE4
- if (machine_is_badge4()) {
- parts = badge4_partitions;
- nb_parts = ARRAY_SIZE(badge4_partitions);
- sa1100_map.size = BADGE4_FLASH_SIZE;
- }
-#endif
-#ifdef CONFIG_SA1100_CERF
- if (machine_is_cerf()) {
- parts = cerf_partitions;
- nb_parts = ARRAY_SIZE(cerf_partitions);
- sa1100_map.size = CERF_FLASH_SIZE;
- }
-#endif
-#ifdef CONFIG_SA1100_CONSUS
- if (machine_is_consus()) {
- parts = consus_partitions;
- nb_parts = ARRAY_SIZE(consus_partitions);
- sa1100_map.size = CONSUS_FLASH_SIZE;
- }
-#endif
-#ifdef CONFIG_SA1100_FLEXANET
- if (machine_is_flexanet()) {
- parts = flexanet_partitions;
- nb_parts = ARRAY_SIZE(flexanet_partitions);
- sa1100_map.size = FLEXANET_FLASH_SIZE;
- }
-#endif
-#ifdef CONFIG_SA1100_FREEBIRD
- if (machine_is_freebird()) {
- parts = freebird_partitions;
- nb_parts = ARRAY_SIZE(freebird_partitions);
- sa1100_map.size = FREEBIRD_FLASH_SIZE;
- }
-#endif
-#ifdef CONFIG_SA1100_FRODO
- if (machine_is_frodo()) {
- parts = frodo_partitions;
- nb_parts = ARRAY_SIZE(frodo_partitions);
- sa1100_map.size = FRODO_FLASH_SIZE;
- base = 0x00000000;
- }
-#ifdef CONFIG_SA1100_GRAPHICSCLIENT
- if (machine_is_graphicsclient()) {
- parts = graphicsclient_partitions;
- nb_parts = ARRAY_SIZE(graphicsclient_partitions);
- sa1100_map.size = GRAPHICSCLIENT_FLASH_SIZE;
- sa1100_map.buswidth = (MSC1 & MSC_RBW) ? 2:4;
- }
-#endif
-#ifdef CONFIG_SA1100_GRAPHICSMASTER
- if (machine_is_graphicsmaster()) {
- parts = graphicsmaster_partitions;
- nb_parts = ARRAY_SIZE(graphicsmaster_partitions);
- sa1100_map.size = GRAPHICSMASTER_FLASH_SIZE;
- sa1100_map.buswidth = (MSC1 & MSC_RBW) ? 2:4;
- }
-#endif
-#ifdef CONFIG_SA1100_H3600
- if (machine_is_h3600()) {
- parts = h3600_partitions;
- nb_parts = ARRAY_SIZE(h3600_partitions);
- sa1100_map.size = H3600_FLASH_SIZE;
- sa1100_map.set_vpp = h3600_set_vpp;
- }
-#endif
-#ifdef CONFIG_SA1100_HUW_WEBPANEL
- if (machine_is_huw_webpanel()) {
- parts = huw_webpanel_partitions;
- nb_parts = ARRAY_SIZE(huw_webpanel_partitions);
- sa1100_map.size = HUW_WEBPANEL_FLASH_SIZE;
- }
-#endif
-#ifdef CONFIG_SA1100_JORNADA720
- if (machine_is_jornada720()) {
- parts = jornada720_partitions;
- nb_parts = ARRAY_SIZE(jornada720_partitions);
- sa1100_map.size = JORNADA720_FLASH_SIZE;
- sa1100_map.set_vpp = jornada720_set_vpp;
- }
-#endif
-#ifdef CONFIG_SA1100_PANGOLIN
- if (machine_is_pangolin()) {
- parts = pangolin_partitions;
- nb_parts = ARRAY_SIZE(pangolin_partitions);
- sa1100_map.size = PANGOLIN_FLASH_SIZE;
- }
-#endif
-#ifdef CONFIG_SA1100_PT_SYSTEM3
- if (machine_is_pt_system3()) {
- parts = system3_partitions;
- nb_parts = ARRAY_SIZE(system3_partitions);
- sa1100_map.size = SYSTEM3_FLASH_SIZE;
- }
-#endif
-#ifdef CONFIG_SA1100_SHANNON
- if (machine_is_shannon()) {
- parts = shannon_partitions;
- nb_parts = ARRAY_SIZE(shannon_partitions);
- sa1100_map.size = SHANNON_FLASH_SIZE;
- }
-#endif
-#ifdef CONFIG_SA1100_SHERMAN
- if (machine_is_sherman()) {
- parts = sherman_partitions;
- nb_parts = ARRAY_SIZE(sherman_partitions);
- sa1100_map.size = SHERMAN_FLASH_SIZE;
- }
-#endif
-#ifdef CONFIG_SA1100_SIMPAD
- if (machine_is_simpad()) {
- parts = simpad_partitions;
- nb_parts = ARRAY_SIZE(simpad_partitions);
- sa1100_map.size = SIMPAD_FLASH_SIZE;
+ for (nr = 0; ; nr++)
+ if (!platform_get_resource(pdev, IORESOURCE_MEM, nr))
+ break;
+
+ if (nr == 0) {
+ ret = -ENODEV;
+ goto out;
}
-#endif
-#ifdef CONFIG_SA1100_STORK
- if (machine_is_stork()) {
- parts = stork_partitions;
- nb_parts = ARRAY_SIZE(stork_partitions);
- sa1100_map.size = STORK_FLASH_SIZE;
+
+ size = sizeof(struct sa_info) + sizeof(struct sa_subdev_info) * nr;
+
+ /*
+ * Allocate the map_info structs in one go.
+ */
+ info = kmalloc(size, GFP_KERNEL);
+ if (!info) {
+ ret = -ENOMEM;
+ goto out;
}
-#endif
-#ifdef CONFIG_SA1100_YOPY
- if (machine_is_yopy()) {
- parts = yopy_partitions;
- nb_parts = ARRAY_SIZE(yopy_partitions);
- sa1100_map.size = YOPY_FLASH_SIZE;
+
+ memset(info, 0, size);
+
+ if (plat->init) {
+ ret = plat->init();
+ if (ret)
+ goto err;
}
-#endif
/*
- * For simple flash devices, use ioremap to map the flash.
+ * Claim and then map the memory regions.
*/
- if (base != (unsigned long)-1) {
- if (!request_mem_region(base, sa1100_map.size, "flash"))
- return -EBUSY;
- sa1100_map.map_priv_2 = base;
- sa1100_map.map_priv_1 = (unsigned long)
- ioremap(base, sa1100_map.size);
- ret = -ENOMEM;
- if (!sa1100_map.map_priv_1)
- goto out_err;
+ for (i = 0; i < nr; i++) {
+ struct sa_subdev_info *subdev = &info->subdev[i];
+ struct resource *res;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, i);
+ if (!res)
+ break;
+
+ subdev->map.name = subdev->name;
+ sprintf(subdev->name, "%s-%d", plat->name, i);
+ subdev->plat = plat;
+
+ ret = sa1100_probe_subdev(subdev, res);
+ if (ret)
+ break;
}
+ info->num_subdev = i;
+
/*
- * Now let's probe for the actual flash. Do it here since
- * specific machine settings might have been set above.
+ * ENXIO is special. It means we didn't find a chip when we probed.
*/
- printk(KERN_NOTICE "SA1100 flash: probing %d-bit flash bus\n", sa1100_map.buswidth*8);
- mymtd = do_map_probe("cfi_probe", &sa1100_map);
- ret = -ENXIO;
- if (!mymtd)
- goto out_err;
- mymtd->module = THIS_MODULE;
+ if (ret != 0 && !(ret == -ENXIO && info->num_subdev > 0))
+ goto err;
/*
- * Dynamic partition selection stuff (might override the static ones)
+ * If we found one device, don't bother with concat support. If
+ * we found multiple devices, use concat if we have it available,
+ * otherwise fail. Either way, it'll be called "sa1100".
*/
-#ifdef CONFIG_MTD_REDBOOT_PARTS
- if (parsed_nr_parts == 0) {
- int ret = parse_redboot_partitions(mymtd, &parsed_parts);
-
- if (ret > 0) {
- part_type = "RedBoot";
- parsed_nr_parts = ret;
- }
- }
+ if (info->num_subdev == 1) {
+ strcpy(info->subdev[0].name, plat->name);
+ info->mtd = info->subdev[0].mtd;
+ ret = 0;
+ } else if (info->num_subdev > 1) {
+#ifdef CONFIG_MTD_CONCAT
+ struct mtd_info *cdev[nr];
+ /*
+ * We detected multiple devices. Concatenate them together.
+ */
+ for (i = 0; i < info->num_subdev; i++)
+ cdev[i] = info->subdev[i].mtd;
+
+ info->mtd = mtd_concat_create(cdev, info->num_subdev,
+ plat->name);
+ if (info->mtd == NULL)
+ ret = -ENXIO;
+#else
+ printk(KERN_ERR "SA1100 flash: multiple devices "
+ "found but MTD concat support disabled.\n");
+ ret = -ENXIO;
#endif
-#ifdef CONFIG_MTD_BOOTLDR_PARTS
- if (parsed_nr_parts == 0) {
- int ret = parse_bootldr_partitions(mymtd, &parsed_parts);
- if (ret > 0) {
- part_type = "Compaq bootldr";
- parsed_nr_parts = ret;
- }
}
-#endif
- if (parsed_nr_parts > 0) {
- parts = parsed_parts;
- nb_parts = parsed_nr_parts;
+ if (ret == 0)
+ return info;
+
+ err:
+ sa1100_destroy(info, plat);
+ out:
+ return ERR_PTR(ret);
+}
+
+static const char *part_probes[] = { "cmdlinepart", "RedBoot", NULL };
+
+static int __init sa1100_mtd_probe(struct platform_device *pdev)
+{
+ struct flash_platform_data *plat = pdev->dev.platform_data;
+ struct mtd_partition *parts;
+ const char *part_type = NULL;
+ struct sa_info *info;
+ int err, nr_parts = 0;
+
+ if (!plat)
+ return -ENODEV;
+
+ info = sa1100_setup_mtd(pdev, plat);
+ if (IS_ERR(info)) {
+ err = PTR_ERR(info);
+ goto out;
+ }
+
+ /*
+ * Partition selection stuff.
+ */
+#ifdef CONFIG_MTD_PARTITIONS
+ nr_parts = parse_mtd_partitions(info->mtd, part_probes, &parts, 0);
+ if (nr_parts > 0) {
+ info->parts = parts;
+ part_type = "dynamic";
+ } else
+#endif
+ {
+ parts = plat->parts;
+ nr_parts = plat->nr_parts;
+ part_type = "static";
}
- if (nb_parts == 0) {
- printk(KERN_NOTICE "SA1100 flash: no partition info available, registering whole flash at once\n");
- add_mtd_device(mymtd);
+ if (nr_parts == 0) {
+ printk(KERN_NOTICE "SA1100 flash: no partition info "
+ "available, registering whole flash\n");
+ add_mtd_device(info->mtd);
} else {
- printk(KERN_NOTICE "Using %s partition definition\n", part_type);
- add_mtd_partitions(mymtd, parts, nb_parts);
+ printk(KERN_NOTICE "SA1100 flash: using %s partition "
+ "definition\n", part_type);
+ add_mtd_partitions(info->mtd, parts, nr_parts);
}
+
+ info->nr_parts = nr_parts;
+
+ platform_set_drvdata(pdev, info);
+ err = 0;
+
+ out:
+ return err;
+}
+
+static int __exit sa1100_mtd_remove(struct platform_device *pdev)
+{
+ struct sa_info *info = platform_get_drvdata(pdev);
+ struct flash_platform_data *plat = pdev->dev.platform_data;
+
+ platform_set_drvdata(pdev, NULL);
+ sa1100_destroy(info, plat);
+
return 0;
+}
+
+#ifdef CONFIG_PM
+static int sa1100_mtd_suspend(struct platform_device *dev, pm_message_t state)
+{
+ struct sa_info *info = platform_get_drvdata(dev);
+ int ret = 0;
+
+ if (info)
+ ret = info->mtd->suspend(info->mtd);
- out_err:
- if (sa1100_map.map_priv_2 != -1) {
- iounmap((void *)sa1100_map.map_priv_1);
- release_mem_region(sa1100_map.map_priv_2, sa1100_map.size);
- }
return ret;
}
-static void __exit sa1100_mtd_cleanup(void)
+static int sa1100_mtd_resume(struct platform_device *dev)
{
- if (mymtd) {
- del_mtd_partitions(mymtd);
- map_destroy(mymtd);
- if (parsed_parts)
- kfree(parsed_parts);
- }
- if (sa1100_map.map_priv_2 != -1) {
- iounmap((void *)sa1100_map.map_priv_1);
- release_mem_region(sa1100_map.map_priv_2, sa1100_map.size);
- }
+ struct sa_info *info = platform_get_drvdata(dev);
+ if (info)
+ info->mtd->resume(info->mtd);
+ return 0;
+}
+
+static void sa1100_mtd_shutdown(struct platform_device *dev)
+{
+ struct sa_info *info = platform_get_drvdata(dev);
+ if (info && info->mtd->suspend(info->mtd) == 0)
+ info->mtd->resume(info->mtd);
+}
+#else
+#define sa1100_mtd_suspend NULL
+#define sa1100_mtd_resume NULL
+#define sa1100_mtd_shutdown NULL
+#endif
+
+static struct platform_driver sa1100_mtd_driver = {
+ .probe = sa1100_mtd_probe,
+ .remove = __exit_p(sa1100_mtd_remove),
+ .suspend = sa1100_mtd_suspend,
+ .resume = sa1100_mtd_resume,
+ .shutdown = sa1100_mtd_shutdown,
+ .driver = {
+ .name = "flash",
+ },
+};
+
+static int __init sa1100_mtd_init(void)
+{
+ return platform_driver_register(&sa1100_mtd_driver);
+}
+
+static void __exit sa1100_mtd_exit(void)
+{
+ platform_driver_unregister(&sa1100_mtd_driver);
}
module_init(sa1100_mtd_init);
-module_exit(sa1100_mtd_cleanup);
+module_exit(sa1100_mtd_exit);
MODULE_AUTHOR("Nicolas Pitre");
MODULE_DESCRIPTION("SA1100 CFI map driver");
diff --git a/linux-2.4.x/drivers/mtd/maps/sbc8240.c b/linux-2.4.x/drivers/mtd/maps/sbc8240.c
new file mode 100644
index 0000000..2a0b57d
--- /dev/null
+++ b/linux-2.4.x/drivers/mtd/maps/sbc8240.c
@@ -0,0 +1,245 @@
+/*
+ * Handle mapping of the flash memory access routines on the SBC8240 board.
+ *
+ * Carolyn Smith, Tektronix, Inc.
+ *
+ * This code is GPLed
+ *
+ * $Id: sbc8240.c,v 1.6 2006/03/29 08:31:11 dwmw2 Exp $
+ *
+ */
+
+/*
+ * The SBC8240 has 2 flash banks.
+ * Bank 0 is a 512 KiB AMD AM29F040B; 8 x 64 KiB sectors.
+ * It contains the U-Boot code (7 sectors) and the environment (1 sector).
+ * Bank 1 is 4 x 1 MiB AMD AM29LV800BT; 15 x 64 KiB sectors, 1 x 32 KiB sector,
+ * 2 x 8 KiB sectors, 1 x 16 KiB sectors.
+ * Both parts are JEDEC compatible.
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <asm/io.h>
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/cfi.h>
+
+#ifdef CONFIG_MTD_PARTITIONS
+#include <linux/mtd/partitions.h>
+#endif
+
+#define DEBUG
+
+#ifdef DEBUG
+# define debugk(fmt,args...) printk(fmt ,##args)
+#else
+# define debugk(fmt,args...)
+#endif
+
+
+#define WINDOW_ADDR0 0xFFF00000 /* 512 KiB */
+#define WINDOW_SIZE0 0x00080000
+#define BUSWIDTH0 1
+
+#define WINDOW_ADDR1 0xFF000000 /* 4 MiB */
+#define WINDOW_SIZE1 0x00400000
+#define BUSWIDTH1 8
+
+#define MSG_PREFIX "sbc8240:" /* prefix for our printk()'s */
+#define MTDID "sbc8240-%d" /* for mtdparts= partitioning */
+
+
+static struct map_info sbc8240_map[2] = {
+ {
+ .name = "sbc8240 Flash Bank #0",
+ .size = WINDOW_SIZE0,
+ .bankwidth = BUSWIDTH0,
+ },
+ {
+ .name = "sbc8240 Flash Bank #1",
+ .size = WINDOW_SIZE1,
+ .bankwidth = BUSWIDTH1,
+ }
+};
+
+#define NUM_FLASH_BANKS ARRAY_SIZE(sbc8240_map)
+
+/*
+ * The following defines the partition layout of SBC8240 boards.
+ *
+ * See include/linux/mtd/partitions.h for definition of the
+ * mtd_partition structure.
+ *
+ * The *_max_flash_size is the maximum possible mapped flash size
+ * which is not necessarily the actual flash size. It must correspond
+ * to the value specified in the mapping definition defined by the
+ * "struct map_desc *_io_desc" for the corresponding machine.
+ */
+
+#ifdef CONFIG_MTD_PARTITIONS
+
+static struct mtd_partition sbc8240_uboot_partitions [] = {
+ /* Bank 0 */
+ {
+ .name = "U-boot", /* U-Boot Firmware */
+ .offset = 0,
+ .size = 0x00070000, /* 7 x 64 KiB sectors */
+ .mask_flags = MTD_WRITEABLE, /* force read-only */
+ },
+ {
+ .name = "environment", /* U-Boot environment */
+ .offset = 0x00070000,
+ .size = 0x00010000, /* 1 x 64 KiB sector */
+ },
+};
+
+static struct mtd_partition sbc8240_fs_partitions [] = {
+ {
+ .name = "jffs", /* JFFS filesystem */
+ .offset = 0,
+ .size = 0x003C0000, /* 4 * 15 * 64KiB */
+ },
+ {
+ .name = "tmp32",
+ .offset = 0x003C0000,
+ .size = 0x00020000, /* 4 * 32KiB */
+ },
+ {
+ .name = "tmp8a",
+ .offset = 0x003E0000,
+ .size = 0x00008000, /* 4 * 8KiB */
+ },
+ {
+ .name = "tmp8b",
+ .offset = 0x003E8000,
+ .size = 0x00008000, /* 4 * 8KiB */
+ },
+ {
+ .name = "tmp16",
+ .offset = 0x003F0000,
+ .size = 0x00010000, /* 4 * 16KiB */
+ }
+};
+
+/* trivial struct to describe partition information */
+struct mtd_part_def
+{
+ int nums;
+ unsigned char *type;
+ struct mtd_partition* mtd_part;
+};
+
+static struct mtd_info *sbc8240_mtd[NUM_FLASH_BANKS];
+static struct mtd_part_def sbc8240_part_banks[NUM_FLASH_BANKS];
+
+
+#endif /* CONFIG_MTD_PARTITIONS */
+
+
+int __init init_sbc8240_mtd (void)
+{
+ static struct _cjs {
+ u_long addr;
+ u_long size;
+ } pt[NUM_FLASH_BANKS] = {
+ {
+ .addr = WINDOW_ADDR0,
+ .size = WINDOW_SIZE0
+ },
+ {
+ .addr = WINDOW_ADDR1,
+ .size = WINDOW_SIZE1
+ },
+ };
+
+ int devicesfound = 0;
+ int i;
+
+ for (i = 0; i < NUM_FLASH_BANKS; i++) {
+ printk (KERN_NOTICE MSG_PREFIX
+ "Probing 0x%08lx at 0x%08lx\n", pt[i].size, pt[i].addr);
+
+ sbc8240_map[i].map_priv_1 =
+ (unsigned long) ioremap (pt[i].addr, pt[i].size);
+ if (!sbc8240_map[i].map_priv_1) {
+ printk (MSG_PREFIX "failed to ioremap\n");
+ return -EIO;
+ }
+ simple_map_init(&sbc8240_mtd[i]);
+
+ sbc8240_mtd[i] = do_map_probe("jedec_probe", &sbc8240_map[i]);
+
+ if (sbc8240_mtd[i]) {
+ sbc8240_mtd[i]->module = THIS_MODULE;
+ devicesfound++;
+ }
+ }
+
+ if (!devicesfound) {
+ printk(KERN_NOTICE MSG_PREFIX
+ "No suppported flash chips found!\n");
+ return -ENXIO;
+ }
+
+#ifdef CONFIG_MTD_PARTITIONS
+ sbc8240_part_banks[0].mtd_part = sbc8240_uboot_partitions;
+ sbc8240_part_banks[0].type = "static image";
+ sbc8240_part_banks[0].nums = ARRAY_SIZE(sbc8240_uboot_partitions);
+ sbc8240_part_banks[1].mtd_part = sbc8240_fs_partitions;
+ sbc8240_part_banks[1].type = "static file system";
+ sbc8240_part_banks[1].nums = ARRAY_SIZE(sbc8240_fs_partitions);
+
+ for (i = 0; i < NUM_FLASH_BANKS; i++) {
+
+ if (!sbc8240_mtd[i]) continue;
+ if (sbc8240_part_banks[i].nums == 0) {
+ printk (KERN_NOTICE MSG_PREFIX
+ "No partition info available, registering whole device\n");
+ add_mtd_device(sbc8240_mtd[i]);
+ } else {
+ printk (KERN_NOTICE MSG_PREFIX
+ "Using %s partition definition\n", sbc8240_part_banks[i].mtd_part->name);
+ add_mtd_partitions (sbc8240_mtd[i],
+ sbc8240_part_banks[i].mtd_part,
+ sbc8240_part_banks[i].nums);
+ }
+ }
+#else
+ printk(KERN_NOTICE MSG_PREFIX
+ "Registering %d flash banks at once\n", devicesfound);
+
+ for (i = 0; i < devicesfound; i++) {
+ add_mtd_device(sbc8240_mtd[i]);
+ }
+#endif /* CONFIG_MTD_PARTITIONS */
+
+ return devicesfound == 0 ? -ENXIO : 0;
+}
+
+static void __exit cleanup_sbc8240_mtd (void)
+{
+ int i;
+
+ for (i = 0; i < NUM_FLASH_BANKS; i++) {
+ if (sbc8240_mtd[i]) {
+ del_mtd_device (sbc8240_mtd[i]);
+ map_destroy (sbc8240_mtd[i]);
+ }
+ if (sbc8240_map[i].map_priv_1) {
+ iounmap ((void *) sbc8240_map[i].map_priv_1);
+ sbc8240_map[i].map_priv_1 = 0;
+ }
+ }
+}
+
+module_init (init_sbc8240_mtd);
+module_exit (cleanup_sbc8240_mtd);
+
+MODULE_LICENSE ("GPL");
+MODULE_AUTHOR ("Carolyn Smith <carolyn.smith@tektronix.com>");
+MODULE_DESCRIPTION ("MTD map driver for SBC8240 boards");
+
diff --git a/linux-2.4.x/drivers/mtd/maps/sbc_gxx.c b/linux-2.4.x/drivers/mtd/maps/sbc_gxx.c
index aafa441..7cc4041 100644
--- a/linux-2.4.x/drivers/mtd/maps/sbc_gxx.c
+++ b/linux-2.4.x/drivers/mtd/maps/sbc_gxx.c
@@ -1,35 +1,35 @@
/* sbc_gxx.c -- MTD map driver for Arcom Control Systems SBC-MediaGX,
SBC-GXm and SBC-GX1 series boards.
-
+
Copyright (C) 2001 Arcom Control System Ltd
-
+
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
-
+
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
-
+
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
- $Id: sbc_gxx.c,v 1.20 2002/02/13 15:30:20 dwmw2 Exp $
+ $Id: sbc_gxx.c,v 1.35 2005/11/07 11:14:28 gleixner Exp $
-The SBC-MediaGX / SBC-GXx has up to 16 MiB of
-Intel StrataFlash (28F320/28F640) in x8 mode.
+The SBC-MediaGX / SBC-GXx has up to 16 MiB of
+Intel StrataFlash (28F320/28F640) in x8 mode.
This driver uses the CFI probe and Intel Extended Command Set drivers.
The flash is accessed as follows:
- 16 kbyte memory window at 0xdc000-0xdffff
-
+ 16 KiB memory window at 0xdc000-0xdffff
+
Two IO address locations for paging
-
+
0x258
bit 0-7: address bit 14-21
0x259
@@ -37,7 +37,7 @@ The flash is accessed as follows:
bit 7: 0 - reset/powered down
1 - device enabled
-The single flash device is divided into 3 partition which appear as
+The single flash device is divided into 3 partition which appear as
separate MTD devices.
25/04/2001 AJL (Arcom) Modified signon strings and partition sizes
@@ -84,21 +84,21 @@ separate MTD devices.
// Globals
static volatile int page_in_window = -1; // Current page in window.
-static unsigned long iomapadr;
-static spinlock_t sbc_gxx_spin = SPIN_LOCK_UNLOCKED;
+static void __iomem *iomapadr;
+static DEFINE_SPINLOCK(sbc_gxx_spin);
-/* partition_info gives details on the logical partitions that the split the
+/* partition_info gives details on the logical partitions that the split the
* single flash device into. If the size if zero we use up to the end of the
* device. */
static struct mtd_partition partition_info[]={
- { name: "SBC-GXx flash boot partition",
- offset: 0,
- size: BOOT_PARTITION_SIZE_KiB*1024 },
- { name: "SBC-GXx flash data partition",
- offset: BOOT_PARTITION_SIZE_KiB*1024,
- size: (DATA_PARTITION_SIZE_KiB)*1024 },
- { name: "SBC-GXx flash application partition",
- offset: (BOOT_PARTITION_SIZE_KiB+DATA_PARTITION_SIZE_KiB)*1024 }
+ { .name = "SBC-GXx flash boot partition",
+ .offset = 0,
+ .size = BOOT_PARTITION_SIZE_KiB*1024 },
+ { .name = "SBC-GXx flash data partition",
+ .offset = BOOT_PARTITION_SIZE_KiB*1024,
+ .size = (DATA_PARTITION_SIZE_KiB)*1024 },
+ { .name = "SBC-GXx flash application partition",
+ .offset = (BOOT_PARTITION_SIZE_KiB+DATA_PARTITION_SIZE_KiB)*1024 }
};
#define NUM_PARTITIONS 3
@@ -114,32 +114,12 @@ static inline void sbc_gxx_page(struct map_info *map, unsigned long ofs)
}
-static __u8 sbc_gxx_read8(struct map_info *map, unsigned long ofs)
-{
- __u8 ret;
- spin_lock(&sbc_gxx_spin);
- sbc_gxx_page(map, ofs);
- ret = readb(iomapadr + (ofs & WINDOW_MASK));
- spin_unlock(&sbc_gxx_spin);
- return ret;
-}
-
-static __u16 sbc_gxx_read16(struct map_info *map, unsigned long ofs)
+static map_word sbc_gxx_read8(struct map_info *map, unsigned long ofs)
{
- __u16 ret;
+ map_word ret;
spin_lock(&sbc_gxx_spin);
sbc_gxx_page(map, ofs);
- ret = readw(iomapadr + (ofs & WINDOW_MASK));
- spin_unlock(&sbc_gxx_spin);
- return ret;
-}
-
-static __u32 sbc_gxx_read32(struct map_info *map, unsigned long ofs)
-{
- __u32 ret;
- spin_lock(&sbc_gxx_spin);
- sbc_gxx_page(map, ofs);
- ret = readl(iomapadr + (ofs & WINDOW_MASK));
+ ret.x[0] = readb(iomapadr + (ofs & WINDOW_MASK));
spin_unlock(&sbc_gxx_spin);
return ret;
}
@@ -150,48 +130,32 @@ static void sbc_gxx_copy_from(struct map_info *map, void *to, unsigned long from
unsigned long thislen = len;
if (len > (WINDOW_LENGTH - (from & WINDOW_MASK)))
thislen = WINDOW_LENGTH-(from & WINDOW_MASK);
-
+
spin_lock(&sbc_gxx_spin);
sbc_gxx_page(map, from);
memcpy_fromio(to, iomapadr + (from & WINDOW_MASK), thislen);
spin_unlock(&sbc_gxx_spin);
- (__u8*)to += thislen;
+ to += thislen;
from += thislen;
len -= thislen;
}
}
-static void sbc_gxx_write8(struct map_info *map, __u8 d, unsigned long adr)
-{
- spin_lock(&sbc_gxx_spin);
- sbc_gxx_page(map, adr);
- writeb(d, iomapadr + (adr & WINDOW_MASK));
- spin_unlock(&sbc_gxx_spin);
-}
-
-static void sbc_gxx_write16(struct map_info *map, __u16 d, unsigned long adr)
+static void sbc_gxx_write8(struct map_info *map, map_word d, unsigned long adr)
{
spin_lock(&sbc_gxx_spin);
sbc_gxx_page(map, adr);
- writew(d, iomapadr + (adr & WINDOW_MASK));
- spin_unlock(&sbc_gxx_spin);
-}
-
-static void sbc_gxx_write32(struct map_info *map, __u32 d, unsigned long adr)
-{
- spin_lock(&sbc_gxx_spin);
- sbc_gxx_page(map, adr);
- writel(d, iomapadr + (adr & WINDOW_MASK));
+ writeb(d.x[0], iomapadr + (adr & WINDOW_MASK));
spin_unlock(&sbc_gxx_spin);
}
static void sbc_gxx_copy_to(struct map_info *map, unsigned long to, const void *from, ssize_t len)
-{
+{
while(len) {
unsigned long thislen = len;
if (len > (WINDOW_LENGTH - (to & WINDOW_MASK)))
thislen = WINDOW_LENGTH-(to & WINDOW_MASK);
-
+
spin_lock(&sbc_gxx_spin);
sbc_gxx_page(map, to);
memcpy_toio(iomapadr + (to & WINDOW_MASK), from, thislen);
@@ -203,19 +167,16 @@ static void sbc_gxx_copy_to(struct map_info *map, unsigned long to, const void *
}
static struct map_info sbc_gxx_map = {
- name: "SBC-GXx flash",
- size: MAX_SIZE_KiB*1024, /* this must be set to a maximum possible amount
+ .name = "SBC-GXx flash",
+ .phys = NO_XIP,
+ .size = MAX_SIZE_KiB*1024, /* this must be set to a maximum possible amount
of flash so the cfi probe routines find all
the chips */
- buswidth: 1,
- read8: sbc_gxx_read8,
- read16: sbc_gxx_read16,
- read32: sbc_gxx_read32,
- copy_from: sbc_gxx_copy_from,
- write8: sbc_gxx_write8,
- write16: sbc_gxx_write16,
- write32: sbc_gxx_write32,
- copy_to: sbc_gxx_copy_to
+ .bankwidth = 1,
+ .read = sbc_gxx_read8,
+ .copy_from = sbc_gxx_copy_from,
+ .write = sbc_gxx_write8,
+ .copy_to = sbc_gxx_copy_to
};
/* MTD device for all of the flash. */
@@ -228,27 +189,28 @@ static void cleanup_sbc_gxx(void)
map_destroy( all_mtd );
}
- iounmap((void *)iomapadr);
+ iounmap(iomapadr);
release_region(PAGE_IO,PAGE_IO_SIZE);
}
-int __init init_sbc_gxx(void)
+static int __init init_sbc_gxx(void)
{
- if (check_region(PAGE_IO,PAGE_IO_SIZE) != 0) {
- printk( KERN_ERR"%s: IO ports 0x%x-0x%x in use\n",
- sbc_gxx_map.name,
- PAGE_IO, PAGE_IO+PAGE_IO_SIZE-1 );
- return -EAGAIN;
- }
- iomapadr = (unsigned long)ioremap(WINDOW_START, WINDOW_LENGTH);
+ iomapadr = ioremap(WINDOW_START, WINDOW_LENGTH);
if (!iomapadr) {
printk( KERN_ERR"%s: failed to ioremap memory region\n",
sbc_gxx_map.name );
return -EIO;
}
-
- request_region( PAGE_IO, PAGE_IO_SIZE, "SBC-GXx flash" );
-
+
+ if (!request_region( PAGE_IO, PAGE_IO_SIZE, "SBC-GXx flash")) {
+ printk( KERN_ERR"%s: IO ports 0x%x-0x%x in use\n",
+ sbc_gxx_map.name,
+ PAGE_IO, PAGE_IO+PAGE_IO_SIZE-1 );
+ iounmap(iomapadr);
+ return -EAGAIN;
+ }
+
+
printk( KERN_INFO"%s: IO:0x%x-0x%x MEM:0x%x-0x%x\n",
sbc_gxx_map.name,
PAGE_IO, PAGE_IO+PAGE_IO_SIZE-1,
@@ -260,8 +222,8 @@ int __init init_sbc_gxx(void)
cleanup_sbc_gxx();
return -ENXIO;
}
-
- all_mtd->module=THIS_MODULE;
+
+ all_mtd->owner = THIS_MODULE;
/* Create MTD devices for each partition. */
add_mtd_partitions(all_mtd, partition_info, NUM_PARTITIONS );
diff --git a/linux-2.4.x/drivers/mtd/maps/sc520cdp.c b/linux-2.4.x/drivers/mtd/maps/sc520cdp.c
index ac7bd98..a93a998 100644
--- a/linux-2.4.x/drivers/mtd/maps/sc520cdp.c
+++ b/linux-2.4.x/drivers/mtd/maps/sc520cdp.c
@@ -16,7 +16,7 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
*
- * $Id: sc520cdp.c,v 1.11 2002/03/08 16:34:35 rkaiser Exp $
+ * $Id: sc520cdp.c,v 1.24 2006/03/29 08:31:11 dwmw2 Exp $
*
*
* The SC520CDP is an evaluation board for the Elan SC520 processor available
@@ -29,6 +29,7 @@
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
+#include <linux/init.h>
#include <asm/io.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/map.h>
@@ -84,92 +85,29 @@
#define WINDOW_SIZE_1 0x00800000
#define WINDOW_SIZE_2 0x00080000
-static __u8 sc520cdp_read8(struct map_info *map, unsigned long ofs)
-{
- return readb(map->map_priv_1 + ofs);
-}
-
-static __u16 sc520cdp_read16(struct map_info *map, unsigned long ofs)
-{
- return readw(map->map_priv_1 + ofs);
-}
-
-static __u32 sc520cdp_read32(struct map_info *map, unsigned long ofs)
-{
- return readl(map->map_priv_1 + ofs);
-}
-
-static void sc520cdp_copy_from(struct map_info *map, void *to, unsigned long from, ssize_t len)
-{
- memcpy_fromio(to, (void *)(map->map_priv_1 + from), len);
-}
-
-static void sc520cdp_write8(struct map_info *map, __u8 d, unsigned long adr)
-{
- writeb(d, map->map_priv_1 + adr);
-}
-
-static void sc520cdp_write16(struct map_info *map, __u16 d, unsigned long adr)
-{
- writew(d, map->map_priv_1 + adr);
-}
-
-static void sc520cdp_write32(struct map_info *map, __u32 d, unsigned long adr)
-{
- writel(d, map->map_priv_1 + adr);
-}
-
-static void sc520cdp_copy_to(struct map_info *map, unsigned long to, const void *from, ssize_t len)
-{
- memcpy_toio((void *)(map->map_priv_1 + to), from, len);
-}
static struct map_info sc520cdp_map[] = {
{
- name: "SC520CDP Flash Bank #0",
- size: WINDOW_SIZE_0,
- buswidth: 4,
- read8: sc520cdp_read8,
- read16: sc520cdp_read16,
- read32: sc520cdp_read32,
- copy_from: sc520cdp_copy_from,
- write8: sc520cdp_write8,
- write16: sc520cdp_write16,
- write32: sc520cdp_write32,
- copy_to: sc520cdp_copy_to,
- map_priv_2: WINDOW_ADDR_0
+ .name = "SC520CDP Flash Bank #0",
+ .size = WINDOW_SIZE_0,
+ .bankwidth = 4,
+ .phys = WINDOW_ADDR_0
},
{
- name: "SC520CDP Flash Bank #1",
- size: WINDOW_SIZE_1,
- buswidth: 4,
- read8: sc520cdp_read8,
- read16: sc520cdp_read16,
- read32: sc520cdp_read32,
- copy_from: sc520cdp_copy_from,
- write8: sc520cdp_write8,
- write16: sc520cdp_write16,
- write32: sc520cdp_write32,
- copy_to: sc520cdp_copy_to,
- map_priv_2: WINDOW_ADDR_1
+ .name = "SC520CDP Flash Bank #1",
+ .size = WINDOW_SIZE_1,
+ .bankwidth = 4,
+ .phys = WINDOW_ADDR_1
},
{
- name: "SC520CDP DIL Flash",
- size: WINDOW_SIZE_2,
- buswidth: 1,
- read8: sc520cdp_read8,
- read16: sc520cdp_read16,
- read32: sc520cdp_read32,
- copy_from: sc520cdp_copy_from,
- write8: sc520cdp_write8,
- write16: sc520cdp_write16,
- write32: sc520cdp_write32,
- copy_to: sc520cdp_copy_to,
- map_priv_2: WINDOW_ADDR_2
+ .name = "SC520CDP DIL Flash",
+ .size = WINDOW_SIZE_2,
+ .bankwidth = 1,
+ .phys = WINDOW_ADDR_2
},
};
-#define NUM_FLASH_BANKS (sizeof(sc520cdp_map)/sizeof(struct map_info))
+#define NUM_FLASH_BANKS ARRAY_SIZE(sc520cdp_map)
static struct mtd_info *mymtd[NUM_FLASH_BANKS];
static struct mtd_info *merged_mtd;
@@ -226,7 +164,7 @@ struct sc520_par_table
unsigned long default_address;
};
-static struct sc520_par_table par_table[NUM_FLASH_BANKS] =
+static const struct sc520_par_table par_table[NUM_FLASH_BANKS] =
{
{ /* Flash Bank #0: selected by ROMCS0 */
SC520_PAR_ROMCS0,
@@ -248,16 +186,16 @@ static struct sc520_par_table par_table[NUM_FLASH_BANKS] =
static void sc520cdp_setup_par(void)
{
- volatile unsigned long *mmcr;
+ volatile unsigned long __iomem *mmcr;
unsigned long mmcr_val;
int i, j;
/* map in SC520's MMCR area */
- mmcr = (unsigned long *)ioremap_nocache(SC520_MMCR_BASE, SC520_MMCR_EXTENT);
+ mmcr = ioremap_nocache(SC520_MMCR_BASE, SC520_MMCR_EXTENT);
if(!mmcr) { /* ioremap_nocache failed: skip the PAR reprogramming */
- /* force map_priv_2 fields to BIOS defaults: */
+ /* force physical address fields to BIOS defaults: */
for(i = 0; i < NUM_FLASH_BANKS; i++)
- sc520cdp_map[i].map_priv_2 = par_table[i].default_address;
+ sc520cdp_map[i].phys = par_table[i].default_address;
return;
}
@@ -282,10 +220,10 @@ static void sc520cdp_setup_par(void)
sc520cdp_map[i].name);
printk(KERN_NOTICE "Trying default address 0x%lx\n",
par_table[i].default_address);
- sc520cdp_map[i].map_priv_2 = par_table[i].default_address;
+ sc520cdp_map[i].phys = par_table[i].default_address;
}
}
- iounmap((void *)mmcr);
+ iounmap(mmcr);
}
#endif
@@ -293,20 +231,25 @@ static void sc520cdp_setup_par(void)
static int __init init_sc520cdp(void)
{
int i, devices_found = 0;
-
+
#ifdef REPROGRAM_PAR
/* reprogram PAR registers so flash appears at the desired addresses */
sc520cdp_setup_par();
#endif
for (i = 0; i < NUM_FLASH_BANKS; i++) {
- printk(KERN_NOTICE "SC520 CDP flash device: %lx at %lx\n", sc520cdp_map[i].size, sc520cdp_map[i].map_priv_2);
- sc520cdp_map[i].map_priv_1 = (unsigned long)ioremap_nocache(sc520cdp_map[i].map_priv_2, sc520cdp_map[i].size);
+ printk(KERN_NOTICE "SC520 CDP flash device: 0x%lx at 0x%lx\n",
+ sc520cdp_map[i].size, sc520cdp_map[i].phys);
+
+ sc520cdp_map[i].virt = ioremap_nocache(sc520cdp_map[i].phys, sc520cdp_map[i].size);
- if (!sc520cdp_map[i].map_priv_1) {
+ if (!sc520cdp_map[i].virt) {
printk("Failed to ioremap_nocache\n");
return -EIO;
}
+
+ simple_map_init(&sc520cdp_map[i]);
+
mymtd[i] = do_map_probe("cfi_probe", &sc520cdp_map[i]);
if(!mymtd[i])
mymtd[i] = do_map_probe("jedec_probe", &sc520cdp_map[i]);
@@ -314,11 +257,11 @@ static int __init init_sc520cdp(void)
mymtd[i] = do_map_probe("map_rom", &sc520cdp_map[i]);
if (mymtd[i]) {
- mymtd[i]->module = THIS_MODULE;
+ mymtd[i]->owner = THIS_MODULE;
++devices_found;
}
else {
- iounmap((void *)sc520cdp_map[i].map_priv_1);
+ iounmap(sc520cdp_map[i].virt);
}
}
if(devices_found >= 2) {
@@ -335,7 +278,7 @@ static int __init init_sc520cdp(void)
static void __exit cleanup_sc520cdp(void)
{
int i;
-
+
if (merged_mtd) {
del_mtd_device(merged_mtd);
mtd_concat_destroy(merged_mtd);
@@ -346,9 +289,9 @@ static void __exit cleanup_sc520cdp(void)
for (i = 0; i < NUM_FLASH_BANKS; i++) {
if (mymtd[i])
map_destroy(mymtd[i]);
- if (sc520cdp_map[i].map_priv_1) {
- iounmap((void *)sc520cdp_map[i].map_priv_1);
- sc520cdp_map[i].map_priv_1 = 0;
+ if (sc520cdp_map[i].virt) {
+ iounmap(sc520cdp_map[i].virt);
+ sc520cdp_map[i].virt = NULL;
}
}
}
diff --git a/linux-2.4.x/drivers/mtd/maps/scb2_flash.c b/linux-2.4.x/drivers/mtd/maps/scb2_flash.c
new file mode 100644
index 0000000..97a8dfd
--- /dev/null
+++ b/linux-2.4.x/drivers/mtd/maps/scb2_flash.c
@@ -0,0 +1,256 @@
+/*
+ * MTD map driver for BIOS Flash on Intel SCB2 boards
+ * $Id: scb2_flash.c,v 1.12 2005/03/18 14:04:35 gleixner Exp $
+ * Copyright (C) 2002 Sun Microsystems, Inc.
+ * Tim Hockin <thockin@sun.com>
+ *
+ * A few notes on this MTD map:
+ *
+ * This was developed with a small number of SCB2 boards to test on.
+ * Hopefully, Intel has not introducted too many unaccounted variables in the
+ * making of this board.
+ *
+ * The BIOS marks its own memory region as 'reserved' in the e820 map. We
+ * try to request it here, but if it fails, we carry on anyway.
+ *
+ * This is how the chip is attached, so said the schematic:
+ * * a 4 MiB (32 Mib) 16 bit chip
+ * * a 1 MiB memory region
+ * * A20 and A21 pulled up
+ * * D8-D15 ignored
+ * What this means is that, while we are addressing bytes linearly, we are
+ * really addressing words, and discarding the other byte. This means that
+ * the chip MUST BE at least 2 MiB. This also means that every block is
+ * actually half as big as the chip reports. It also means that accesses of
+ * logical address 0 hit higher-address sections of the chip, not physical 0.
+ * One can only hope that these 4MiB x16 chips were a lot cheaper than 1MiB x8
+ * chips.
+ *
+ * This driver assumes the chip is not write-protected by an external signal.
+ * As of the this writing, that is true, but may change, just to spite me.
+ *
+ * The actual BIOS layout has been mostly reverse engineered. Intel BIOS
+ * updates for this board include 10 related (*.bio - &.bi9) binary files and
+ * another separate (*.bbo) binary file. The 10 files are 64k of data + a
+ * small header. If the headers are stripped off, the 10 64k files can be
+ * concatenated into a 640k image. This is your BIOS image, proper. The
+ * separate .bbo file also has a small header. It is the 'Boot Block'
+ * recovery BIOS. Once the header is stripped, no further prep is needed.
+ * As best I can tell, the BIOS is arranged as such:
+ * offset 0x00000 to 0x4ffff (320k): unknown - SCSI BIOS, etc?
+ * offset 0x50000 to 0xeffff (640k): BIOS proper
+ * offset 0xf0000 ty 0xfffff (64k): Boot Block region
+ *
+ * Intel's BIOS update program flashes the BIOS and Boot Block in separate
+ * steps. Probably a wise thing to do.
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <asm/io.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/cfi.h>
+#include <linux/config.h>
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+
+#define MODNAME "scb2_flash"
+#define SCB2_ADDR 0xfff00000
+#define SCB2_WINDOW 0x00100000
+
+
+static void __iomem *scb2_ioaddr;
+static struct mtd_info *scb2_mtd;
+static struct map_info scb2_map = {
+ .name = "SCB2 BIOS Flash",
+ .size = 0,
+ .bankwidth = 1,
+};
+static int region_fail;
+
+static int __devinit
+scb2_fixup_mtd(struct mtd_info *mtd)
+{
+ int i;
+ int done = 0;
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+
+ /* barf if this doesn't look right */
+ if (cfi->cfiq->InterfaceDesc != 1) {
+ printk(KERN_ERR MODNAME ": unsupported InterfaceDesc: %#x\n",
+ cfi->cfiq->InterfaceDesc);
+ return -1;
+ }
+
+ /* I wasn't here. I didn't see. dwmw2. */
+
+ /* the chip is sometimes bigger than the map - what a waste */
+ mtd->size = map->size;
+
+ /*
+ * We only REALLY get half the chip, due to the way it is
+ * wired up - D8-D15 are tossed away. We read linear bytes,
+ * but in reality we are getting 1/2 of each 16-bit read,
+ * which LOOKS linear to us. Because CFI code accounts for
+ * things like lock/unlock/erase by eraseregions, we need to
+ * fudge them to reflect this. Erases go like this:
+ * * send an erase to an address
+ * * the chip samples the address and erases the block
+ * * add the block erasesize to the address and repeat
+ * -- the problem is that addresses are 16-bit addressable
+ * -- we end up erasing every-other block
+ */
+ mtd->erasesize /= 2;
+ for (i = 0; i < mtd->numeraseregions; i++) {
+ struct mtd_erase_region_info *region = &mtd->eraseregions[i];
+ region->erasesize /= 2;
+ }
+
+ /*
+ * If the chip is bigger than the map, it is wired with the high
+ * address lines pulled up. This makes us access the top portion of
+ * the chip, so all our erase-region info is wrong. Start cutting from
+ * the bottom.
+ */
+ for (i = 0; !done && i < mtd->numeraseregions; i++) {
+ struct mtd_erase_region_info *region = &mtd->eraseregions[i];
+
+ if (region->numblocks * region->erasesize > mtd->size) {
+ region->numblocks = (mtd->size / region->erasesize);
+ done = 1;
+ } else {
+ region->numblocks = 0;
+ }
+ region->offset = 0;
+ }
+
+ return 0;
+}
+
+/* CSB5's 'Function Control Register' has bits for decoding @ >= 0xffc00000 */
+#define CSB5_FCR 0x41
+#define CSB5_FCR_DECODE_ALL 0x0e
+static int __devinit
+scb2_flash_probe(struct pci_dev *dev, const struct pci_device_id *ent)
+{
+ u8 reg;
+
+ /* enable decoding of the flash region in the south bridge */
+ pci_read_config_byte(dev, CSB5_FCR, &reg);
+ pci_write_config_byte(dev, CSB5_FCR, reg | CSB5_FCR_DECODE_ALL);
+
+ if (!request_mem_region(SCB2_ADDR, SCB2_WINDOW, scb2_map.name)) {
+ /*
+ * The BIOS seems to mark the flash region as 'reserved'
+ * in the e820 map. Warn and go about our business.
+ */
+ printk(KERN_WARNING MODNAME
+ ": warning - can't reserve rom window, continuing\n");
+ region_fail = 1;
+ }
+
+ /* remap the IO window (w/o caching) */
+ scb2_ioaddr = ioremap_nocache(SCB2_ADDR, SCB2_WINDOW);
+ if (!scb2_ioaddr) {
+ printk(KERN_ERR MODNAME ": Failed to ioremap window!\n");
+ if (!region_fail)
+ release_mem_region(SCB2_ADDR, SCB2_WINDOW);
+ return -ENOMEM;
+ }
+
+ scb2_map.phys = SCB2_ADDR;
+ scb2_map.virt = scb2_ioaddr;
+ scb2_map.size = SCB2_WINDOW;
+
+ simple_map_init(&scb2_map);
+
+ /* try to find a chip */
+ scb2_mtd = do_map_probe("cfi_probe", &scb2_map);
+
+ if (!scb2_mtd) {
+ printk(KERN_ERR MODNAME ": flash probe failed!\n");
+ iounmap(scb2_ioaddr);
+ if (!region_fail)
+ release_mem_region(SCB2_ADDR, SCB2_WINDOW);
+ return -ENODEV;
+ }
+
+ scb2_mtd->owner = THIS_MODULE;
+ if (scb2_fixup_mtd(scb2_mtd) < 0) {
+ del_mtd_device(scb2_mtd);
+ map_destroy(scb2_mtd);
+ iounmap(scb2_ioaddr);
+ if (!region_fail)
+ release_mem_region(SCB2_ADDR, SCB2_WINDOW);
+ return -ENODEV;
+ }
+
+ printk(KERN_NOTICE MODNAME ": chip size 0x%x at offset 0x%x\n",
+ scb2_mtd->size, SCB2_WINDOW - scb2_mtd->size);
+
+ add_mtd_device(scb2_mtd);
+
+ return 0;
+}
+
+static void __devexit
+scb2_flash_remove(struct pci_dev *dev)
+{
+ if (!scb2_mtd)
+ return;
+
+ /* disable flash writes */
+ if (scb2_mtd->lock)
+ scb2_mtd->lock(scb2_mtd, 0, scb2_mtd->size);
+
+ del_mtd_device(scb2_mtd);
+ map_destroy(scb2_mtd);
+
+ iounmap(scb2_ioaddr);
+ scb2_ioaddr = NULL;
+
+ if (!region_fail)
+ release_mem_region(SCB2_ADDR, SCB2_WINDOW);
+ pci_set_drvdata(dev, NULL);
+}
+
+static struct pci_device_id scb2_flash_pci_ids[] = {
+ {
+ .vendor = PCI_VENDOR_ID_SERVERWORKS,
+ .device = PCI_DEVICE_ID_SERVERWORKS_CSB5,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID
+ },
+ { 0, }
+};
+
+static struct pci_driver scb2_flash_driver = {
+ .name = "Intel SCB2 BIOS Flash",
+ .id_table = scb2_flash_pci_ids,
+ .probe = scb2_flash_probe,
+ .remove = __devexit_p(scb2_flash_remove),
+};
+
+static int __init
+scb2_flash_init(void)
+{
+ return pci_register_driver(&scb2_flash_driver);
+}
+
+static void __exit
+scb2_flash_exit(void)
+{
+ pci_unregister_driver(&scb2_flash_driver);
+}
+
+module_init(scb2_flash_init);
+module_exit(scb2_flash_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Tim Hockin <thockin@sun.com>");
+MODULE_DESCRIPTION("MTD map driver for Intel SCB2 BIOS Flash");
+MODULE_DEVICE_TABLE(pci, scb2_flash_pci_ids);
diff --git a/linux-2.4.x/drivers/mtd/maps/scx200_docflash.c b/linux-2.4.x/drivers/mtd/maps/scx200_docflash.c
new file mode 100644
index 0000000..d5cf7cb
--- /dev/null
+++ b/linux-2.4.x/drivers/mtd/maps/scx200_docflash.c
@@ -0,0 +1,233 @@
+/* linux/drivers/mtd/maps/scx200_docflash.c
+
+ Copyright (c) 2001,2002 Christer Weinigel <wingel@nano-system.com>
+
+ $Id: scx200_docflash.c,v 1.13 2006/03/29 08:31:12 dwmw2 Exp $
+
+ National Semiconductor SCx200 flash mapped with DOCCS
+*/
+
+#include <linux/module.h>
+#include <linux/config.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <asm/io.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/partitions.h>
+
+#include <linux/pci.h>
+#include <linux/scx200.h>
+
+#define NAME "scx200_docflash"
+
+MODULE_AUTHOR("Christer Weinigel <wingel@hack.org>");
+MODULE_DESCRIPTION("NatSemi SCx200 DOCCS Flash Driver");
+MODULE_LICENSE("GPL");
+
+static int probe = 0; /* Don't autoprobe */
+static unsigned size = 0x1000000; /* 16 MiB the whole ISA address space */
+static unsigned width = 8; /* Default to 8 bits wide */
+static char *flashtype = "cfi_probe";
+
+module_param(probe, int, 0);
+MODULE_PARM_DESC(probe, "Probe for a BIOS mapping");
+module_param(size, int, 0);
+MODULE_PARM_DESC(size, "Size of the flash mapping");
+module_param(width, int, 0);
+MODULE_PARM_DESC(width, "Data width of the flash mapping (8/16)");
+module_param(flashtype, charp, 0);
+MODULE_PARM_DESC(flashtype, "Type of MTD probe to do");
+
+static struct resource docmem = {
+ .flags = IORESOURCE_MEM,
+ .name = "NatSemi SCx200 DOCCS Flash",
+};
+
+static struct mtd_info *mymtd;
+
+#ifdef CONFIG_MTD_PARTITIONS
+static struct mtd_partition partition_info[] = {
+ {
+ .name = "DOCCS Boot kernel",
+ .offset = 0,
+ .size = 0xc0000
+ },
+ {
+ .name = "DOCCS Low BIOS",
+ .offset = 0xc0000,
+ .size = 0x40000
+ },
+ {
+ .name = "DOCCS File system",
+ .offset = 0x100000,
+ .size = ~0 /* calculate from flash size */
+ },
+ {
+ .name = "DOCCS High BIOS",
+ .offset = ~0, /* calculate from flash size */
+ .size = 0x80000
+ },
+};
+#define NUM_PARTITIONS ARRAY_SIZE(partition_info)
+#endif
+
+
+static struct map_info scx200_docflash_map = {
+ .name = "NatSemi SCx200 DOCCS Flash",
+};
+
+static int __init init_scx200_docflash(void)
+{
+ unsigned u;
+ unsigned base;
+ unsigned ctrl;
+ unsigned pmr;
+ struct pci_dev *bridge;
+
+ printk(KERN_DEBUG NAME ": NatSemi SCx200 DOCCS Flash Driver\n");
+
+ if ((bridge = pci_find_device(PCI_VENDOR_ID_NS,
+ PCI_DEVICE_ID_NS_SCx200_BRIDGE,
+ NULL)) == NULL)
+ return -ENODEV;
+
+ /* check that we have found the configuration block */
+ if (!scx200_cb_present())
+ return -ENODEV;
+
+ if (probe) {
+ /* Try to use the present flash mapping if any */
+ pci_read_config_dword(bridge, SCx200_DOCCS_BASE, &base);
+ pci_read_config_dword(bridge, SCx200_DOCCS_CTRL, &ctrl);
+ pmr = inl(scx200_cb_base + SCx200_PMR);
+
+ if (base == 0
+ || (ctrl & 0x07000000) != 0x07000000
+ || (ctrl & 0x0007ffff) == 0)
+ return -ENODEV;
+
+ size = ((ctrl&0x1fff)<<13) + (1<<13);
+
+ for (u = size; u > 1; u >>= 1)
+ ;
+ if (u != 1)
+ return -ENODEV;
+
+ if (pmr & (1<<6))
+ width = 16;
+ else
+ width = 8;
+
+ docmem.start = base;
+ docmem.end = base + size;
+
+ if (request_resource(&iomem_resource, &docmem)) {
+ printk(KERN_ERR NAME ": unable to allocate memory for flash mapping\n");
+ return -ENOMEM;
+ }
+ } else {
+ for (u = size; u > 1; u >>= 1)
+ ;
+ if (u != 1) {
+ printk(KERN_ERR NAME ": invalid size for flash mapping\n");
+ return -EINVAL;
+ }
+
+ if (width != 8 && width != 16) {
+ printk(KERN_ERR NAME ": invalid bus width for flash mapping\n");
+ return -EINVAL;
+ }
+
+ if (allocate_resource(&iomem_resource, &docmem,
+ size,
+ 0xc0000000, 0xffffffff,
+ size, NULL, NULL)) {
+ printk(KERN_ERR NAME ": unable to allocate memory for flash mapping\n");
+ return -ENOMEM;
+ }
+
+ ctrl = 0x07000000 | ((size-1) >> 13);
+
+ printk(KERN_INFO "DOCCS BASE=0x%08lx, CTRL=0x%08lx\n", (long)docmem.start, (long)ctrl);
+
+ pci_write_config_dword(bridge, SCx200_DOCCS_BASE, docmem.start);
+ pci_write_config_dword(bridge, SCx200_DOCCS_CTRL, ctrl);
+ pmr = inl(scx200_cb_base + SCx200_PMR);
+
+ if (width == 8) {
+ pmr &= ~(1<<6);
+ } else {
+ pmr |= (1<<6);
+ }
+ outl(pmr, scx200_cb_base + SCx200_PMR);
+ }
+
+ printk(KERN_INFO NAME ": DOCCS mapped at 0x%lx-0x%lx, width %d\n",
+ docmem.start, docmem.end, width);
+
+ scx200_docflash_map.size = size;
+ if (width == 8)
+ scx200_docflash_map.bankwidth = 1;
+ else
+ scx200_docflash_map.bankwidth = 2;
+
+ simple_map_init(&scx200_docflash_map);
+
+ scx200_docflash_map.phys = docmem.start;
+ scx200_docflash_map.virt = ioremap(docmem.start, scx200_docflash_map.size);
+ if (!scx200_docflash_map.virt) {
+ printk(KERN_ERR NAME ": failed to ioremap the flash\n");
+ release_resource(&docmem);
+ return -EIO;
+ }
+
+ mymtd = do_map_probe(flashtype, &scx200_docflash_map);
+ if (!mymtd) {
+ printk(KERN_ERR NAME ": unable to detect flash\n");
+ iounmap(scx200_docflash_map.virt);
+ release_resource(&docmem);
+ return -ENXIO;
+ }
+
+ if (size < mymtd->size)
+ printk(KERN_WARNING NAME ": warning, flash mapping is smaller than flash size\n");
+
+ mymtd->owner = THIS_MODULE;
+
+#ifdef CONFIG_MTD_PARTITIONS
+ partition_info[3].offset = mymtd->size-partition_info[3].size;
+ partition_info[2].size = partition_info[3].offset-partition_info[2].offset;
+ add_mtd_partitions(mymtd, partition_info, NUM_PARTITIONS);
+#else
+ add_mtd_device(mymtd);
+#endif
+ return 0;
+}
+
+static void __exit cleanup_scx200_docflash(void)
+{
+ if (mymtd) {
+#ifdef CONFIG_MTD_PARTITIONS
+ del_mtd_partitions(mymtd);
+#else
+ del_mtd_device(mymtd);
+#endif
+ map_destroy(mymtd);
+ }
+ if (scx200_docflash_map.virt) {
+ iounmap(scx200_docflash_map.virt);
+ release_resource(&docmem);
+ }
+}
+
+module_init(init_scx200_docflash);
+module_exit(cleanup_scx200_docflash);
+
+/*
+ Local variables:
+ compile-command: "make -k -C ../../.. SUBDIRS=drivers/mtd/maps modules"
+ c-basic-offset: 8
+ End:
+*/
diff --git a/linux-2.4.x/drivers/mtd/maps/sharpsl-flash.c b/linux-2.4.x/drivers/mtd/maps/sharpsl-flash.c
new file mode 100644
index 0000000..238f7fa
--- /dev/null
+++ b/linux-2.4.x/drivers/mtd/maps/sharpsl-flash.c
@@ -0,0 +1,118 @@
+/*
+ * sharpsl-flash.c
+ *
+ * Copyright (C) 2001 Lineo Japan, Inc.
+ * Copyright (C) 2002 SHARP
+ *
+ * $Id: sharpsl-flash.c,v 1.8 2006/03/29 08:31:12 dwmw2 Exp $
+ *
+ * based on rpxlite.c,v 1.15 2001/10/02 15:05:14 dwmw2 Exp
+ * Handle mapping of the flash on the RPX Lite and CLLF boards
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/partitions.h>
+#include <asm/io.h>
+#include <asm/mach-types.h>
+
+#define WINDOW_ADDR 0x00000000
+#define WINDOW_SIZE 0x00800000
+#define BANK_WIDTH 2
+
+static struct mtd_info *mymtd;
+
+struct map_info sharpsl_map = {
+ .name = "sharpsl-flash",
+ .size = WINDOW_SIZE,
+ .bankwidth = BANK_WIDTH,
+ .phys = WINDOW_ADDR
+};
+
+static struct mtd_partition sharpsl_partitions[1] = {
+ {
+ name: "Boot PROM Filesystem",
+ }
+};
+
+int __init init_sharpsl(void)
+{
+ struct mtd_partition *parts;
+ int nb_parts = 0;
+ char *part_type = "static";
+
+ printk(KERN_NOTICE "Sharp SL series flash device: %x at %x\n",
+ WINDOW_SIZE, WINDOW_ADDR);
+ sharpsl_map.virt = ioremap(WINDOW_ADDR, WINDOW_SIZE);
+ if (!sharpsl_map.virt) {
+ printk("Failed to ioremap\n");
+ return -EIO;
+ }
+
+ simple_map_init(&sharpsl_map);
+
+ mymtd = do_map_probe("map_rom", &sharpsl_map);
+ if (!mymtd) {
+ iounmap(sharpsl_map.virt);
+ return -ENXIO;
+ }
+
+ mymtd->owner = THIS_MODULE;
+
+ if (machine_is_corgi() || machine_is_shepherd() || machine_is_husky()
+ || machine_is_poodle()) {
+ sharpsl_partitions[0].size=0x006d0000;
+ sharpsl_partitions[0].offset=0x00120000;
+ } else if (machine_is_tosa()) {
+ sharpsl_partitions[0].size=0x006a0000;
+ sharpsl_partitions[0].offset=0x00160000;
+ } else if (machine_is_spitz() || machine_is_akita() || machine_is_borzoi()) {
+ sharpsl_partitions[0].size=0x006b0000;
+ sharpsl_partitions[0].offset=0x00140000;
+ } else {
+ map_destroy(mymtd);
+ iounmap(sharpsl_map.virt);
+ return -ENODEV;
+ }
+
+ parts = sharpsl_partitions;
+ nb_parts = ARRAY_SIZE(sharpsl_partitions);
+
+ printk(KERN_NOTICE "Using %s partision definition\n", part_type);
+ add_mtd_partitions(mymtd, parts, nb_parts);
+
+ return 0;
+}
+
+static void __exit cleanup_sharpsl(void)
+{
+ if (mymtd) {
+ del_mtd_partitions(mymtd);
+ map_destroy(mymtd);
+ }
+ if (sharpsl_map.virt) {
+ iounmap(sharpsl_map.virt);
+ sharpsl_map.virt = 0;
+ }
+}
+
+module_init(init_sharpsl);
+module_exit(cleanup_sharpsl);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("SHARP (Original: Arnold Christensen <AKC@pel.dk>)");
+MODULE_DESCRIPTION("MTD map driver for SHARP SL series");
diff --git a/linux-2.4.x/drivers/mtd/maps/solutionengine.c b/linux-2.4.x/drivers/mtd/maps/solutionengine.c
index 185a573..c53c2c3 100644
--- a/linux-2.4.x/drivers/mtd/maps/solutionengine.c
+++ b/linux-2.4.x/drivers/mtd/maps/solutionengine.c
@@ -1,5 +1,5 @@
/*
- * $Id: solutionengine.c,v 1.4 2001/11/07 01:20:59 jsiegel Exp $
+ * $Id: solutionengine.c,v 1.15 2005/11/07 11:14:28 gleixner Exp $
*
* Flash and EPROM on Hitachi Solution Engine and similar boards.
*
@@ -11,31 +11,13 @@
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
+#include <linux/init.h>
#include <asm/io.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/map.h>
#include <linux/mtd/partitions.h>
#include <linux/config.h>
-
-
-extern int parse_redboot_partitions(struct mtd_info *master, struct mtd_partition **pparts);
-
-__u32 soleng_read32(struct map_info *map, unsigned long ofs)
-{
- return __raw_readl(map->map_priv_1 + ofs);
-}
-
-void soleng_write32(struct map_info *map, __u32 d, unsigned long adr)
-{
- __raw_writel(d, map->map_priv_1 + adr);
- mb();
-}
-
-void soleng_copy_from(struct map_info *map, void *to, unsigned long from, ssize_t len)
-{
- memcpy_fromio(to, map->map_priv_1 + from, len);
-}
-
+#include <linux/errno.h>
static struct mtd_info *flash_mtd;
static struct mtd_info *eprom_mtd;
@@ -43,35 +25,33 @@ static struct mtd_info *eprom_mtd;
static struct mtd_partition *parsed_parts;
struct map_info soleng_eprom_map = {
- name: "Solution Engine EPROM",
- size: 0x400000,
- buswidth: 4,
- copy_from: soleng_copy_from,
+ .name = "Solution Engine EPROM",
+ .size = 0x400000,
+ .bankwidth = 4,
};
struct map_info soleng_flash_map = {
- name: "Solution Engine FLASH",
- size: 0x400000,
- buswidth: 4,
- read32: soleng_read32,
- copy_from: soleng_copy_from,
- write32: soleng_write32,
+ .name = "Solution Engine FLASH",
+ .size = 0x400000,
+ .bankwidth = 4,
};
+static const char *probes[] = { "RedBoot", "cmdlinepart", NULL };
+
#ifdef CONFIG_MTD_SUPERH_RESERVE
static struct mtd_partition superh_se_partitions[] = {
/* Reserved for boot code, read-only */
{
- name: "flash_boot",
- offset: 0x00000000,
- size: CONFIG_MTD_SUPERH_RESERVE,
- mask_flags: MTD_WRITEABLE,
+ .name = "flash_boot",
+ .offset = 0x00000000,
+ .size = CONFIG_MTD_SUPERH_RESERVE,
+ .mask_flags = MTD_WRITEABLE,
},
/* All else is writable (e.g. JFFS) */
{
- name: "Flash FS",
- offset: MTDPART_OFS_NXTBLK,
- size: MTDPART_SIZ_FULL,
+ .name = "Flash FS",
+ .offset = MTDPART_OFS_NXTBLK,
+ .size = MTDPART_SIZ_FULL,
}
};
#endif /* CONFIG_MTD_SUPERH_RESERVE */
@@ -81,16 +61,22 @@ static int __init init_soleng_maps(void)
int nr_parts = 0;
/* First probe at offset 0 */
- soleng_flash_map.map_priv_1 = P2SEGADDR(0);
- soleng_eprom_map.map_priv_1 = P1SEGADDR(0x01000000);
+ soleng_flash_map.phys = 0;
+ soleng_flash_map.virt = (void __iomem *)P2SEGADDR(0);
+ soleng_eprom_map.phys = 0x01000000;
+ soleng_eprom_map.virt = (void __iomem *)P1SEGADDR(0x01000000);
+ simple_map_init(&soleng_eprom_map);
+ simple_map_init(&soleng_flash_map);
printk(KERN_NOTICE "Probing for flash chips at 0x00000000:\n");
flash_mtd = do_map_probe("cfi_probe", &soleng_flash_map);
if (!flash_mtd) {
/* Not there. Try swapping */
printk(KERN_NOTICE "Probing for flash chips at 0x01000000:\n");
- soleng_flash_map.map_priv_1 = P2SEGADDR(0x01000000);
- soleng_eprom_map.map_priv_1 = P1SEGADDR(0);
+ soleng_flash_map.phys = 0x01000000;
+ soleng_flash_map.virt = P2SEGADDR(0x01000000);
+ soleng_eprom_map.phys = 0;
+ soleng_eprom_map.virt = P1SEGADDR(0);
flash_mtd = do_map_probe("cfi_probe", &soleng_flash_map);
if (!flash_mtd) {
/* Eep. */
@@ -99,25 +85,20 @@ static int __init init_soleng_maps(void)
}
}
printk(KERN_NOTICE "Solution Engine: Flash at 0x%08lx, EPROM at 0x%08lx\n",
- soleng_flash_map.map_priv_1 & 0x1fffffff,
- soleng_eprom_map.map_priv_1 & 0x1fffffff);
- flash_mtd->module = THIS_MODULE;
+ soleng_flash_map.phys & 0x1fffffff,
+ soleng_eprom_map.phys & 0x1fffffff);
+ flash_mtd->owner = THIS_MODULE;
eprom_mtd = do_map_probe("map_rom", &soleng_eprom_map);
if (eprom_mtd) {
- eprom_mtd->module = THIS_MODULE;
+ eprom_mtd->owner = THIS_MODULE;
add_mtd_device(eprom_mtd);
}
-#ifdef CONFIG_MTD_REDBOOT_PARTS
- nr_parts = parse_redboot_partitions(flash_mtd, &parsed_parts);
- if (nr_parts > 0)
- printk(KERN_NOTICE "Found RedBoot partition table.\n");
- else if (nr_parts < 0)
- printk(KERN_NOTICE "Error looking for RedBoot partitions.\n");
-#endif /* CONFIG_MTD_REDBOOT_PARTS */
-#if CONFIG_MTD_SUPERH_RESERVE
- if (nr_parts == 0) {
+ nr_parts = parse_mtd_partitions(flash_mtd, probes, &parsed_parts, 0);
+
+#ifdef CONFIG_MTD_SUPERH_RESERVE
+ if (nr_parts <= 0) {
printk(KERN_NOTICE "Using configured partition at 0x%08x.\n",
CONFIG_MTD_SUPERH_RESERVE);
parsed_parts = superh_se_partitions;
diff --git a/linux-2.4.x/drivers/mtd/maps/sun_uflash.c b/linux-2.4.x/drivers/mtd/maps/sun_uflash.c
index b431bf2..f64fae9 100644
--- a/linux-2.4.x/drivers/mtd/maps/sun_uflash.c
+++ b/linux-2.4.x/drivers/mtd/maps/sun_uflash.c
@@ -1,4 +1,4 @@
-/* $Id: sun_uflash.c,v 1.4 2001/10/02 15:05:14 dwmw2 Exp $
+/* $Id: sun_uflash.c,v 1.14 2005/11/29 20:01:28 gleixner Exp $
*
* sun_uflash - Driver implementation for user-programmable flash
* present on many Sun Microsystems SME boardsets.
@@ -12,7 +12,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/version.h>
#include <linux/fs.h>
#include <linux/errno.h>
#include <linux/init.h>
@@ -48,60 +47,11 @@ struct uflash_dev {
struct list_head list;
};
-__u8 uflash_read8(struct map_info *map, unsigned long ofs)
-{
- return(__raw_readb(map->map_priv_1 + ofs));
-}
-
-__u16 uflash_read16(struct map_info *map, unsigned long ofs)
-{
- return(__raw_readw(map->map_priv_1 + ofs));
-}
-
-__u32 uflash_read32(struct map_info *map, unsigned long ofs)
-{
- return(__raw_readl(map->map_priv_1 + ofs));
-}
-
-void uflash_copy_from(struct map_info *map, void *to, unsigned long from,
- ssize_t len)
-{
- memcpy_fromio(to, map->map_priv_1 + from, len);
-}
-
-void uflash_write8(struct map_info *map, __u8 d, unsigned long adr)
-{
- __raw_writeb(d, map->map_priv_1 + adr);
-}
-
-void uflash_write16(struct map_info *map, __u16 d, unsigned long adr)
-{
- __raw_writew(d, map->map_priv_1 + adr);
-}
-
-void uflash_write32(struct map_info *map, __u32 d, unsigned long adr)
-{
- __raw_writel(d, map->map_priv_1 + adr);
-}
-
-void uflash_copy_to(struct map_info *map, unsigned long to, const void *from,
- ssize_t len)
-{
- memcpy_toio(map->map_priv_1 + to, from, len);
-}
struct map_info uflash_map_templ = {
- name: "SUNW,???-????",
- size: UFLASH_WINDOW_SIZE,
- buswidth: UFLASH_BUSWIDTH,
- read8: uflash_read8,
- read16: uflash_read16,
- read32: uflash_read32,
- copy_from: uflash_copy_from,
- write8: uflash_write8,
- write16: uflash_write16,
- write32: uflash_write32,
- copy_to: uflash_copy_to
+ .name = "SUNW,???-????",
+ .size = UFLASH_WINDOW_SIZE,
+ .bankwidth = UFLASH_BUSWIDTH,
};
int uflash_devinit(struct linux_ebus_device* edev)
@@ -113,7 +63,7 @@ int uflash_devinit(struct linux_ebus_device* edev)
iTmp = prom_getproperty(
edev->prom_node, "reg", (void *)regs, sizeof(regs));
if ((iTmp % sizeof(regs[0])) != 0) {
- printk("%s: Strange reg property size %d\n",
+ printk("%s: Strange reg property size %d\n",
UFLASH_DEVNAME, iTmp);
return -ENODEV;
}
@@ -125,7 +75,7 @@ int uflash_devinit(struct linux_ebus_device* edev)
* can work on supporting it.
*/
printk("%s: unsupported device at 0x%lx (%d regs): " \
- "email ebrower@usa.net\n",
+ "email ebrower@usa.net\n",
UFLASH_DEVNAME, edev->resource[0].start, nregs);
return -ENODEV;
}
@@ -134,7 +84,7 @@ int uflash_devinit(struct linux_ebus_device* edev)
printk("%s: unable to kmalloc new device\n", UFLASH_DEVNAME);
return(-ENOMEM);
}
-
+
/* copy defaults and tweak parameters */
memcpy(&pdev->map, &uflash_map_templ, sizeof(uflash_map_templ));
pdev->map.size = regs[0].reg_size;
@@ -145,20 +95,21 @@ int uflash_devinit(struct linux_ebus_device* edev)
if(0 != pdev->name && 0 < strlen(pdev->name)) {
pdev->map.name = pdev->name;
}
-
- pdev->map.map_priv_1 =
- (unsigned long)ioremap_nocache(edev->resource[0].start, pdev->map.size);
- if(0 == pdev->map.map_priv_1) {
+ pdev->map.phys = edev->resource[0].start;
+ pdev->map.virt = ioremap_nocache(edev->resource[0].start, pdev->map.size);
+ if(0 == pdev->map.virt) {
printk("%s: failed to map device\n", __FUNCTION__);
kfree(pdev->name);
kfree(pdev);
return(-1);
}
+ simple_map_init(&pdev->map);
+
/* MTD registration */
pdev->mtd = do_map_probe("cfi_probe", &pdev->map);
if(0 == pdev->mtd) {
- iounmap((void *)pdev->map.map_priv_1);
+ iounmap(pdev->map.virt);
kfree(pdev->name);
kfree(pdev);
return(-ENXIO);
@@ -166,7 +117,7 @@ int uflash_devinit(struct linux_ebus_device* edev)
list_add(&pdev->list, &device_list);
- pdev->mtd->module = THIS_MODULE;
+ pdev->mtd->owner = THIS_MODULE;
add_mtd_device(pdev->mtd);
return(0);
@@ -204,22 +155,20 @@ static void __exit uflash_cleanup(void)
list_for_each(udevlist, &device_list) {
udev = list_entry(udevlist, struct uflash_dev, list);
- DEBUG(2, "%s: removing device %s\n",
+ DEBUG(2, "%s: removing device %s\n",
UFLASH_DEVNAME, udev->name);
if(0 != udev->mtd) {
del_mtd_device(udev->mtd);
map_destroy(udev->mtd);
}
- if(0 != udev->map.map_priv_1) {
- iounmap((void*)udev->map.map_priv_1);
- udev->map.map_priv_1 = 0;
- }
- if(0 != udev->name) {
- kfree(udev->name);
+ if(0 != udev->map.virt) {
+ iounmap(udev->map.virt);
+ udev->map.virt = NULL;
}
+ kfree(udev->name);
kfree(udev);
- }
+ }
}
module_init(uflash_init);
diff --git a/linux-2.4.x/drivers/mtd/maps/tqm834x.c b/linux-2.4.x/drivers/mtd/maps/tqm834x.c
new file mode 100644
index 0000000..c7ae9a5
--- /dev/null
+++ b/linux-2.4.x/drivers/mtd/maps/tqm834x.c
@@ -0,0 +1,291 @@
+/*
+ * drivers/mtd/maps/tqm834x.c
+ *
+ * MTD mapping driver for TQM834x boards
+ *
+ * Copyright 2005 Wolfgang Denk, DENX Software Engineering, <wd@denx.de>.
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ *
+ */
+
+#include <linux/config.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <asm/io.h>
+#include <asm/ppcboot.h>
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/partitions.h>
+
+#define FLASH_BANK_MAX 2
+
+extern unsigned char __res[];
+
+/* trivial struct to describe partition information */
+struct mtd_part_def
+{
+ int nums;
+ unsigned char *type;
+ struct mtd_partition* mtd_part;
+};
+
+static struct mtd_info* mtd_banks[FLASH_BANK_MAX];
+static struct map_info* map_banks[FLASH_BANK_MAX];
+static struct mtd_part_def part_banks[FLASH_BANK_MAX];
+
+static unsigned long num_banks;
+static unsigned long start_scan_addr;
+
+#ifdef CONFIG_MTD_PARTITIONS
+/*
+ * The following defines the partition layout of TQM834x boards.
+ *
+ * See include/linux/mtd/partitions.h for definition of the
+ * mtd_partition structure.
+ *
+ * Assume minimal initial size of 4 MiB per bank, will be updated
+ * later in init_tqm834x_mtd() routine.
+ */
+
+/* Partition definition for the first flash bank which is always present. */
+static struct mtd_partition tqm834x_partitions_bank1[] = {
+ {
+ .name = "u-boot", /* u-boot firmware */
+ .offset = 0x00000000,
+ .size = 0x00040000, /* 256 KiB */
+ /*mask_flags: MTD_WRITEABLE, * force read-only */
+ },
+ {
+ .name = "env", /* u-boot environment */
+ .offset = 0x00040000,
+ .size = 0x00020000, /* 128 KiB */
+ /*mask_flags: MTD_WRITEABLE, * force read-only */
+ },
+ {
+ .name = "kernel", /* linux kernel image */
+ .offset = 0x00060000,
+ .size = 0x00100000, /* 1 MiB */
+ /*mask_flags: MTD_WRITEABLE, * force read-only */
+ },
+ {
+ .name = "initrd", /* ramdisk image */
+ .offset = 0x00160000,
+ .size = 0x00200000, /* 2 MiB */
+ },
+ {
+ .name = "user", /* user data */
+ .offset = 0x00360000,
+ .size = 0x000a0000, /* remaining space */
+ /* NOTE: this parttion size is re-calcated in */
+ /* init_tqm834x_mtd() to cover actual remaining space. */
+ },
+};
+
+/* Partition definition for the second flash bank which may be present on some
+ * TQM834x boards.
+ */
+static struct mtd_partition tqm834x_partitions_bank2[] = {
+ {
+ .name = "jffs2", /* jffs2 filesystem */
+ .offset = 0x00000000,
+ .size = 0x00400000, /* whole device */
+ /* NOTE: this parttion size is re-calcated in */
+ /* init_tqm834x_mtd() to cover actual device size. */
+ },
+};
+
+#endif /* CONFIG_MTD_PARTITIONS */
+
+static int __init init_tqm834x_mtd(void)
+{
+ int idx = 0, ret = 0;
+ unsigned long flash_addr, flash_size, mtd_size = 0;
+
+ /* pointer to TQM834x board info data */
+ bd_t *bd = (bd_t *)__res;
+#ifdef CONFIG_MTD_CMDLINE_PARTS
+ int n;
+ char mtdid[4];
+ const char *part_probes[] = { "cmdlinepart", NULL };
+#endif
+
+ flash_addr = bd->bi_flashstart;
+ flash_size = bd->bi_flashsize;
+
+ /* request maximum flash size address space */
+ start_scan_addr = (unsigned long)ioremap(flash_addr, flash_size);
+ if (!start_scan_addr) {
+ printk("%s: Failed to ioremap address: 0x%lx\n",
+ __FUNCTION__, flash_addr);
+ return -EIO;
+ }
+
+ for(idx = 0 ; idx < FLASH_BANK_MAX ; idx++) {
+ if (mtd_size >= flash_size)
+ break;
+
+ pr_debug("%s: chip probing count %d\n", __FUNCTION__, idx);
+
+ map_banks[idx] =
+ (struct map_info *)kmalloc(sizeof(struct map_info),
+ GFP_KERNEL);
+ if (map_banks[idx] == NULL) {
+ ret = -ENOMEM;
+ goto error_mem;
+ }
+ memset((void *)map_banks[idx], 0, sizeof(struct map_info));
+ map_banks[idx]->name = (char *)kmalloc(16, GFP_KERNEL);
+ if (map_banks[idx]->name == NULL) {
+ ret = -ENOMEM;
+ goto error_mem;
+ }
+ memset((void *)map_banks[idx]->name, 0, 16);
+
+ sprintf(map_banks[idx]->name, "TQM834x-%d", idx);
+ map_banks[idx]->size = flash_size;
+ map_banks[idx]->bankwidth = 4;
+
+ simple_map_init(map_banks[idx]);
+
+ map_banks[idx]->virt = (void __iomem *)
+ (start_scan_addr + ((idx > 0) ?
+ (mtd_banks[idx-1] ? mtd_banks[idx-1]->size : 0) : 0));
+ map_banks[idx]->phys =
+ flash_addr + ((idx > 0) ?
+ (mtd_banks[idx-1] ? mtd_banks[idx-1]->size : 0) : 0);
+
+ /* start to probe flash chips */
+ mtd_banks[idx] = do_map_probe("cfi_probe", map_banks[idx]);
+ if (mtd_banks[idx]) {
+ mtd_banks[idx]->owner = THIS_MODULE;
+ mtd_size += mtd_banks[idx]->size;
+ num_banks++;
+ pr_debug("%s: bank %ld, name: %s, size: %d bytes \n",
+ __FUNCTION__, num_banks,
+ mtd_banks[idx]->name, mtd_banks[idx]->size);
+ }
+ }
+
+ /* no supported flash chips found */
+ if (!num_banks) {
+ printk("TQM834x: No supported flash chips found!\n");
+ ret = -ENXIO;
+ goto error_mem;
+ }
+
+#ifdef CONFIG_MTD_PARTITIONS
+ /*
+ * Select static partition definitions
+ */
+ n = ARRAY_SIZE(tqm834x_partitions_bank1);
+ part_banks[0].mtd_part = tqm834x_partitions_bank1;
+ part_banks[0].type = "static image bank1";
+ part_banks[0].nums = n;
+
+ /* update last partition size to cover actual remaining space */
+ tqm834x_partitions_bank1[n - 1].size =
+ mtd_banks[0]->size -
+ tqm834x_partitions_bank1[n - 1].offset;
+
+ /* check if we have second bank? */
+ if (num_banks == 2) {
+ n = ARRAY_SIZE(tqm834x_partitions_bank2);
+ part_banks[1].mtd_part = tqm834x_partitions_bank2;
+ part_banks[1].type = "static image bank2";
+ part_banks[1].nums = n;
+
+ /* update last partition size to cover actual remaining space */
+ tqm834x_partitions_bank2[n - 1].size =
+ mtd_banks[1]->size -
+ tqm834x_partitions_bank2[n - 1].offset;
+ }
+
+ for(idx = 0; idx < num_banks ; idx++) {
+#ifdef CONFIG_MTD_CMDLINE_PARTS
+ sprintf(mtdid, "%d", idx);
+ n = parse_mtd_partitions(mtd_banks[idx],
+ part_probes,
+ &part_banks[idx].mtd_part,
+ 0);
+ pr_debug("%s: %d command line partitions on bank %s\n",
+ __FUNCTION__, n, mtdid);
+ if (n > 0) {
+ part_banks[idx].type = "command line";
+ part_banks[idx].nums = n;
+ }
+#endif /* CONFIG_MTD_CMDLINE_PARTS */
+ if (part_banks[idx].nums == 0) {
+ printk(KERN_NOTICE
+ "TQM834x flash bank %d: no partition info "
+ "available, registering whole device\n", idx);
+ add_mtd_device(mtd_banks[idx]);
+ } else {
+ printk(KERN_NOTICE
+ "TQM834x flash bank %d: Using %s partition "
+ "definition\n", idx, part_banks[idx].type);
+ add_mtd_partitions(mtd_banks[idx],
+ part_banks[idx].mtd_part,
+ part_banks[idx].nums);
+ }
+ }
+#else /* ! CONFIG_MTD_PARTITIONS */
+ printk(KERN_NOTICE "TQM834x flash: registering %d flash banks "
+ "at once\n", num_banks);
+
+ for(idx = 0 ; idx < num_banks ; idx++)
+ add_mtd_device(mtd_banks[idx]);
+
+#endif /* CONFIG_MTD_PARTITIONS */
+
+ return 0;
+error_mem:
+ for (idx = 0 ; idx < FLASH_BANK_MAX ; idx++) {
+ if (map_banks[idx] != NULL) {
+ if (map_banks[idx]->name != NULL) {
+ kfree(map_banks[idx]->name);
+ map_banks[idx]->name = NULL;
+ }
+ kfree(map_banks[idx]);
+ map_banks[idx] = NULL;
+ }
+ }
+
+ iounmap((void *)start_scan_addr);
+
+ return ret;
+}
+
+static void __exit cleanup_tqm834x_mtd(void)
+{
+ unsigned int idx = 0;
+ for(idx = 0 ; idx < num_banks ; idx++) {
+ /* destroy mtd_info previously allocated */
+ if (mtd_banks[idx]) {
+ del_mtd_partitions(mtd_banks[idx]);
+ map_destroy(mtd_banks[idx]);
+ }
+
+ /* release map_info not used anymore */
+ kfree(map_banks[idx]->name);
+ kfree(map_banks[idx]);
+ }
+
+ if (start_scan_addr) {
+ iounmap((void *)start_scan_addr);
+ start_scan_addr = 0;
+ }
+}
+
+module_init(init_tqm834x_mtd);
+module_exit(cleanup_tqm834x_mtd);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Wolfgang Denk <wd@denx.de>");
+MODULE_DESCRIPTION("MTD map driver for TQM834x boards");
diff --git a/linux-2.4.x/drivers/mtd/maps/tqm8xxl.c b/linux-2.4.x/drivers/mtd/maps/tqm8xxl.c
index b7c22c4..893ea3f 100644
--- a/linux-2.4.x/drivers/mtd/maps/tqm8xxl.c
+++ b/linux-2.4.x/drivers/mtd/maps/tqm8xxl.c
@@ -1,15 +1,15 @@
/*
- * Handle mapping of the flash memory access routines
+ * Handle mapping of the flash memory access routines
* on TQM8xxL based devices.
*
- * $Id: tqm8xxl.c,v 1.3 2001/10/02 15:05:14 dwmw2 Exp $
+ * $Id: tqm8xxl.c,v 1.16 2005/11/29 20:01:28 gleixner Exp $
*
* based on rpxlite.c
*
* Copyright(C) 2001 Kirk Lee <kirk@hpc.ee.ntu.edu.tw>
*
* This code is GPLed
- *
+ *
*/
/*
@@ -19,19 +19,22 @@
* 2MiB 512Kx16 2MiB 0
* 4MiB 1Mx16 4MiB 0
* 8MiB 1Mx16 4MiB 4MiB
- * Thus, we choose CONFIG_MTD_CFI_I2 & CONFIG_MTD_CFI_B4 at
+ * Thus, we choose CONFIG_MTD_CFI_I2 & CONFIG_MTD_CFI_B4 at
* kernel configuration.
*/
#include <linux/config.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
-#include <asm/io.h>
+#include <linux/init.h>
+#include <linux/slab.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/map.h>
#include <linux/mtd/partitions.h>
+#include <asm/io.h>
+
#define FLASH_ADDR 0x40000000
#define FLASH_SIZE 0x00800000
#define FLASH_BANK_MAX 4
@@ -49,69 +52,15 @@ static struct mtd_info* mtd_banks[FLASH_BANK_MAX];
static struct map_info* map_banks[FLASH_BANK_MAX];
static struct mtd_part_def part_banks[FLASH_BANK_MAX];
static unsigned long num_banks;
-static unsigned long start_scan_addr;
-
-__u8 tqm8xxl_read8(struct map_info *map, unsigned long ofs)
-{
- return *((__u8 *)(map->map_priv_1 + ofs));
-}
-
-__u16 tqm8xxl_read16(struct map_info *map, unsigned long ofs)
-{
- return *((__u16 *)(map->map_priv_1 + ofs));
-}
-
-__u32 tqm8xxl_read32(struct map_info *map, unsigned long ofs)
-{
- return *((__u32 *)(map->map_priv_1 + ofs));
-}
-
-void tqm8xxl_copy_from(struct map_info *map, void *to, unsigned long from, ssize_t len)
-{
- memcpy_fromio(to, (void *)(map->map_priv_1 + from), len);
-}
-
-void tqm8xxl_write8(struct map_info *map, __u8 d, unsigned long adr)
-{
- *((__u8 *)(map->map_priv_1 + adr)) = d;
-}
-
-void tqm8xxl_write16(struct map_info *map, __u16 d, unsigned long adr)
-{
- *((__u16 *)( map->map_priv_1 + adr)) = d;
-}
-
-void tqm8xxl_write32(struct map_info *map, __u32 d, unsigned long adr)
-{
- *((__u32 *)(map->map_priv_1 + adr)) = d;
-}
-
-void tqm8xxl_copy_to(struct map_info *map, unsigned long to, const void *from, ssize_t len)
-{
- memcpy_toio((void *)(map->map_priv_1 + to), from, len);
-}
-
-struct map_info tqm8xxl_map = {
- name: "TQM8xxL",
- //size: WINDOW_SIZE,
- buswidth: 4,
- read8: tqm8xxl_read8,
- read16: tqm8xxl_read16,
- read32: tqm8xxl_read32,
- copy_from: tqm8xxl_copy_from,
- write8: tqm8xxl_write8,
- write16: tqm8xxl_write16,
- write32: tqm8xxl_write32,
- copy_to: tqm8xxl_copy_to
-};
+static void __iomem *start_scan_addr;
/*
* Here are partition information for all known TQM8xxL series devices.
* See include/linux/mtd/partitions.h for definition of the mtd_partition
* structure.
- *
+ *
* The *_max_flash_size is the maximum possible mapped flash size which
- * is not necessarily the actual flash size. It must correspond to the
+ * is not necessarily the actual flash size. It must correspond to the
* value specified in the mapping definition defined by the
* "struct map_desc *_io_desc" for the corresponding machine.
*/
@@ -121,50 +70,48 @@ struct map_info tqm8xxl_map = {
static unsigned long tqm8xxl_max_flash_size = 0x00800000;
/* partition definition for first flash bank
- * also ref. to "drivers\char\flash_config.c"
+ * (cf. "drivers/char/flash_config.c")
*/
static struct mtd_partition tqm8xxl_partitions[] = {
{
- name: "ppcboot",
- offset: 0x00000000,
- size: 0x00020000, /* 128KB */
- mask_flags: MTD_WRITEABLE, /* force read-only */
+ .name = "ppcboot",
+ .offset = 0x00000000,
+ .size = 0x00020000, /* 128KB */
+ .mask_flags = MTD_WRITEABLE, /* force read-only */
},
{
- name: "kernel", /* default kernel image */
- offset: 0x00020000,
- size: 0x000e0000,
- mask_flags: MTD_WRITEABLE, /* force read-only */
+ .name = "kernel", /* default kernel image */
+ .offset = 0x00020000,
+ .size = 0x000e0000,
+ .mask_flags = MTD_WRITEABLE, /* force read-only */
},
{
- name: "user",
- offset: 0x00100000,
- size: 0x00100000,
+ .name = "user",
+ .offset = 0x00100000,
+ .size = 0x00100000,
},
{
- name: "initrd",
- offset: 0x00200000,
- size: 0x00200000,
+ .name = "initrd",
+ .offset = 0x00200000,
+ .size = 0x00200000,
}
};
-/* partition definition for second flahs bank */
+/* partition definition for second flash bank */
static struct mtd_partition tqm8xxl_fs_partitions[] = {
{
- name: "cramfs",
- offset: 0x00000000,
- size: 0x00200000,
+ .name = "cramfs",
+ .offset = 0x00000000,
+ .size = 0x00200000,
},
{
- name: "jffs",
- offset: 0x00200000,
- size: 0x00200000,
- //size: MTDPART_SIZ_FULL,
+ .name = "jffs",
+ .offset = 0x00200000,
+ .size = 0x00200000,
+ //.size = MTDPART_SIZ_FULL,
}
};
#endif
-#define NB_OF(x) (sizeof(x)/sizeof(x[0]))
-
int __init init_tqm_mtd(void)
{
int idx = 0, ret = 0;
@@ -174,66 +121,73 @@ int __init init_tqm_mtd(void)
flash_addr = bd->bi_flashstart;
flash_size = bd->bi_flashsize;
- //request maximum flash size address spzce
- start_scan_addr = (unsigned long)ioremap(flash_addr, flash_size);
+
+ //request maximum flash size address space
+ start_scan_addr = ioremap(flash_addr, flash_size);
if (!start_scan_addr) {
- //printk("%s:Failed to ioremap address:0x%x\n", __FUNCTION__, FLASH_ADDR);
- printk("%s:Failed to ioremap address:0x%x\n", __FUNCTION__, flash_addr);
+ printk(KERN_WARNING "%s:Failed to ioremap address:0x%x\n", __FUNCTION__, flash_addr);
return -EIO;
}
- for(idx = 0 ; idx < FLASH_BANK_MAX ; idx++)
- {
+
+ for (idx = 0 ; idx < FLASH_BANK_MAX ; idx++) {
if(mtd_size >= flash_size)
break;
-
- printk("%s: chip probing count %d\n", __FUNCTION__, idx);
-
+
+ printk(KERN_INFO "%s: chip probing count %d\n", __FUNCTION__, idx);
+
map_banks[idx] = (struct map_info *)kmalloc(sizeof(struct map_info), GFP_KERNEL);
- if(map_banks[idx] == NULL)
- {
- //return -ENOMEM;
+ if(map_banks[idx] == NULL) {
ret = -ENOMEM;
+ /* FIXME: What if some MTD devices were probed already? */
goto error_mem;
}
+
memset((void *)map_banks[idx], 0, sizeof(struct map_info));
map_banks[idx]->name = (char *)kmalloc(16, GFP_KERNEL);
- if(map_banks[idx]->name == NULL)
- {
- //return -ENOMEM;
+
+ if (!map_banks[idx]->name) {
ret = -ENOMEM;
+ /* FIXME: What if some MTD devices were probed already? */
goto error_mem;
}
- memset((void *)map_banks[idx]->name, 0, 16);
-
sprintf(map_banks[idx]->name, "TQM8xxL%d", idx);
- map_banks[idx]->buswidth = 4;
- map_banks[idx]->read8 = tqm8xxl_read8;
- map_banks[idx]->read16 = tqm8xxl_read16;
- map_banks[idx]->read32 = tqm8xxl_read32;
- map_banks[idx]->copy_from = tqm8xxl_copy_from;
- map_banks[idx]->write8 = tqm8xxl_write8;
- map_banks[idx]->write16 = tqm8xxl_write16;
- map_banks[idx]->write32 = tqm8xxl_write32;
- map_banks[idx]->copy_to = tqm8xxl_copy_to;
- map_banks[idx]->map_priv_1 =
- start_scan_addr + ((idx > 0) ?
+
+ map_banks[idx]->size = flash_size;
+ map_banks[idx]->bankwidth = 4;
+
+ simple_map_init(map_banks[idx]);
+
+ map_banks[idx]->virt = start_scan_addr;
+ map_banks[idx]->phys = flash_addr;
+ /* FIXME: This looks utterly bogus, but I'm trying to
+ preserve the behaviour of the original (shown here)...
+
+ map_banks[idx]->map_priv_1 =
+ start_scan_addr + ((idx > 0) ?
(mtd_banks[idx-1] ? mtd_banks[idx-1]->size : 0) : 0);
+ */
+
+ if (idx && mtd_banks[idx-1]) {
+ map_banks[idx]->virt += mtd_banks[idx-1]->size;
+ map_banks[idx]->phys += mtd_banks[idx-1]->size;
+ }
+
//start to probe flash chips
mtd_banks[idx] = do_map_probe("cfi_probe", map_banks[idx]);
- if(mtd_banks[idx])
- {
- mtd_banks[idx]->module = THIS_MODULE;
+
+ if (mtd_banks[idx]) {
+ mtd_banks[idx]->owner = THIS_MODULE;
mtd_size += mtd_banks[idx]->size;
num_banks++;
- printk("%s: bank%d, name:%s, size:%dbytes \n", __FUNCTION__, num_banks,
+
+ printk(KERN_INFO "%s: bank%d, name:%s, size:%dbytes \n", __FUNCTION__, num_banks,
mtd_banks[idx]->name, mtd_banks[idx]->size);
}
}
/* no supported flash chips found */
- if(!num_banks)
- {
- printk("TQM8xxL: No support flash chips found!\n");
+ if (!num_banks) {
+ printk(KERN_NOTICE "TQM8xxL: No support flash chips found!\n");
ret = -ENXIO;
goto error_mem;
}
@@ -244,19 +198,20 @@ int __init init_tqm_mtd(void)
*/
part_banks[0].mtd_part = tqm8xxl_partitions;
part_banks[0].type = "Static image";
- part_banks[0].nums = NB_OF(tqm8xxl_partitions);
+ part_banks[0].nums = ARRAY_SIZE(tqm8xxl_partitions);
+
part_banks[1].mtd_part = tqm8xxl_fs_partitions;
part_banks[1].type = "Static file system";
- part_banks[1].nums = NB_OF(tqm8xxl_fs_partitions);
- for(idx = 0; idx < num_banks ; idx++)
- {
+ part_banks[1].nums = ARRAY_SIZE(tqm8xxl_fs_partitions);
+
+ for(idx = 0; idx < num_banks ; idx++) {
if (part_banks[idx].nums == 0) {
printk(KERN_NOTICE "TQM flash%d: no partition info available, registering whole flash at once\n", idx);
add_mtd_device(mtd_banks[idx]);
} else {
printk(KERN_NOTICE "TQM flash%d: Using %s partition definition\n",
idx, part_banks[idx].type);
- add_mtd_partitions(mtd_banks[idx], part_banks[idx].mtd_part,
+ add_mtd_partitions(mtd_banks[idx], part_banks[idx].mtd_part,
part_banks[idx].nums);
}
}
@@ -267,31 +222,23 @@ int __init init_tqm_mtd(void)
#endif
return 0;
error_mem:
- for(idx = 0 ; idx < FLASH_BANK_MAX ; idx++)
- {
- if(map_banks[idx] != NULL)
- {
- if(map_banks[idx]->name != NULL)
- {
- kfree(map_banks[idx]->name);
- map_banks[idx]->name = NULL;
- }
+ for(idx = 0 ; idx < FLASH_BANK_MAX ; idx++) {
+ if(map_banks[idx] != NULL) {
+ kfree(map_banks[idx]->name);
+ map_banks[idx]->name = NULL;
kfree(map_banks[idx]);
map_banks[idx] = NULL;
}
}
- //return -ENOMEM;
error:
- iounmap((void *)start_scan_addr);
- //return -ENXIO;
+ iounmap(start_scan_addr);
return ret;
}
static void __exit cleanup_tqm_mtd(void)
{
unsigned int idx = 0;
- for(idx = 0 ; idx < num_banks ; idx++)
- {
+ for(idx = 0 ; idx < num_banks ; idx++) {
/* destroy mtd_info previously allocated */
if (mtd_banks[idx]) {
del_mtd_partitions(mtd_banks[idx]);
@@ -301,8 +248,9 @@ static void __exit cleanup_tqm_mtd(void)
kfree(map_banks[idx]->name);
kfree(map_banks[idx]);
}
+
if (start_scan_addr) {
- iounmap((void *)start_scan_addr);
+ iounmap(start_scan_addr);
start_scan_addr = 0;
}
}
diff --git a/linux-2.4.x/drivers/mtd/maps/ts5500_flash.c b/linux-2.4.x/drivers/mtd/maps/ts5500_flash.c
new file mode 100644
index 0000000..16a8545
--- /dev/null
+++ b/linux-2.4.x/drivers/mtd/maps/ts5500_flash.c
@@ -0,0 +1,125 @@
+/*
+ * ts5500_flash.c -- MTD map driver for Technology Systems TS-5500 board
+ *
+ * Copyright (C) 2004 Sean Young <sean@mess.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
+ *
+ * Note:
+ * - In order for detection to work, jumper 3 must be set.
+ * - Drive A and B use the resident flash disk (RFD) flash translation layer.
+ * - If you have created your own jffs file system and the bios overwrites
+ * it during boot, try disabling Drive A: and B: in the boot order.
+ *
+ * $Id: ts5500_flash.c,v 1.7 2006/03/29 08:31:12 dwmw2 Exp $
+ */
+
+#include <linux/config.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <linux/types.h>
+
+
+#define WINDOW_ADDR 0x09400000
+#define WINDOW_SIZE 0x00200000
+
+static struct map_info ts5500_map = {
+ .name = "TS-5500 Flash",
+ .size = WINDOW_SIZE,
+ .bankwidth = 1,
+ .phys = WINDOW_ADDR
+};
+
+static struct mtd_partition ts5500_partitions[] = {
+ {
+ .name = "Drive A",
+ .offset = 0,
+ .size = 0x0e0000
+ },
+ {
+ .name = "BIOS",
+ .offset = 0x0e0000,
+ .size = 0x020000,
+ },
+ {
+ .name = "Drive B",
+ .offset = 0x100000,
+ .size = 0x100000
+ }
+};
+
+#define NUM_PARTITIONS ARRAY_SIZE(ts5500_partitions)
+
+static struct mtd_info *mymtd;
+
+static int __init init_ts5500_map(void)
+{
+ int rc = 0;
+
+ ts5500_map.virt = ioremap_nocache(ts5500_map.phys, ts5500_map.size);
+
+ if (!ts5500_map.virt) {
+ printk(KERN_ERR "Failed to ioremap_nocache\n");
+ rc = -EIO;
+ goto err2;
+ }
+
+ simple_map_init(&ts5500_map);
+
+ mymtd = do_map_probe("jedec_probe", &ts5500_map);
+ if (!mymtd)
+ mymtd = do_map_probe("map_rom", &ts5500_map);
+
+ if (!mymtd) {
+ rc = -ENXIO;
+ goto err1;
+ }
+
+ mymtd->owner = THIS_MODULE;
+ add_mtd_partitions(mymtd, ts5500_partitions, NUM_PARTITIONS);
+
+ return 0;
+
+err1:
+ map_destroy(mymtd);
+ iounmap(ts5500_map.virt);
+err2:
+ return rc;
+}
+
+static void __exit cleanup_ts5500_map(void)
+{
+ if (mymtd) {
+ del_mtd_partitions(mymtd);
+ map_destroy(mymtd);
+ }
+
+ if (ts5500_map.virt) {
+ iounmap(ts5500_map.virt);
+ ts5500_map.virt = NULL;
+ }
+}
+
+module_init(init_ts5500_map);
+module_exit(cleanup_ts5500_map);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Sean Young <sean@mess.org>");
+MODULE_DESCRIPTION("MTD map driver for Techology Systems TS-5500 board");
+
diff --git a/linux-2.4.x/drivers/mtd/maps/tsunami_flash.c b/linux-2.4.x/drivers/mtd/maps/tsunami_flash.c
index 33b6350..9e21e6c 100644
--- a/linux-2.4.x/drivers/mtd/maps/tsunami_flash.c
+++ b/linux-2.4.x/drivers/mtd/maps/tsunami_flash.c
@@ -2,25 +2,29 @@
* tsunami_flash.c
*
* flash chip on alpha ds10...
- * $Id: tsunami_flash.c,v 1.1 2002/01/10 22:59:13 eric Exp $
+ * $Id: tsunami_flash.c,v 1.10 2005/11/07 11:14:29 gleixner Exp $
*/
#include <asm/io.h>
#include <asm/core_tsunami.h>
+#include <linux/init.h>
#include <linux/mtd/map.h>
+#include <linux/mtd/mtd.h>
#define FLASH_ENABLE_PORT 0x00C00001
#define FLASH_ENABLE_BYTE 0x01
#define FLASH_DISABLE_BYTE 0x00
#define MAX_TIG_FLASH_SIZE (12*1024*1024)
-static inline __u8 tsunami_flash_read8(struct map_info *map, unsigned long offset)
+static inline map_word tsunami_flash_read8(struct map_info *map, unsigned long offset)
{
- return tsunami_tig_readb(offset);
+ map_word val;
+ val.x[0] = tsunami_tig_readb(offset);
+ return val;
}
-static void tsunami_flash_write8(struct map_info *map, __u8 value, unsigned long offset)
+static void tsunami_flash_write8(struct map_info *map, map_word value, unsigned long offset)
{
- tsunami_tig_writeb(value, offset);
+ tsunami_tig_writeb(value.x[0], offset);
}
static void tsunami_flash_copy_from(
@@ -37,7 +41,7 @@ static void tsunami_flash_copy_from(
}
static void tsunami_flash_copy_to(
- struct map_info *map, unsigned long offset,
+ struct map_info *map, unsigned long offset,
const void *addr, ssize_t len)
{
const unsigned char *src;
@@ -58,18 +62,12 @@ static void tsunami_flash_copy_to(
static struct map_info tsunami_flash_map = {
.name = "flash chip on the Tsunami TIG bus",
.size = MAX_TIG_FLASH_SIZE,
- .buswidth = 1,
- .read8 = tsunami_flash_read8,
- .read16 = 0,
- .read32 = 0,
+ .phys = NO_XIP;
+ .bankwidth = 1,
+ .read = tsunami_flash_read8,
.copy_from = tsunami_flash_copy_from,
- .write8 = tsunami_flash_write8,
- .write16 = 0,
- .write32 = 0,
+ .write = tsunami_flash_write8,
.copy_to = tsunami_flash_copy_to,
- .set_vpp = 0,
- .map_priv_1 = 0,
-
};
static struct mtd_info *tsunami_flash_mtd;
@@ -88,18 +86,18 @@ static void __exit cleanup_tsunami_flash(void)
static int __init init_tsunami_flash(void)
{
- static const char *rom_probe_types[] = { "cfi_probe", "jedec_probe", "map_rom", 0 };
+ static const char *rom_probe_types[] = { "cfi_probe", "jedec_probe", "map_rom", NULL };
char **type;
tsunami_tig_writeb(FLASH_ENABLE_BYTE, FLASH_ENABLE_PORT);
-
+
tsunami_flash_mtd = 0;
type = rom_probe_types;
for(; !tsunami_flash_mtd && *type; type++) {
tsunami_flash_mtd = do_map_probe(*type, &tsunami_flash_map);
}
if (tsunami_flash_mtd) {
- tsunami_flash_mtd->module = THIS_MODULE;
+ tsunami_flash_mtd->owner = THIS_MODULE;
add_mtd_device(tsunami_flash_mtd);
return 0;
}
diff --git a/linux-2.4.x/drivers/mtd/maps/uclinux.c b/linux-2.4.x/drivers/mtd/maps/uclinux.c
index 3c7ff64..977a8ea 100644
--- a/linux-2.4.x/drivers/mtd/maps/uclinux.c
+++ b/linux-2.4.x/drivers/mtd/maps/uclinux.c
@@ -4,6 +4,8 @@
* uclinux.c -- generic memory mapped MTD driver for uclinux
*
* (C) Copyright 2002, Greg Ungerer (gerg@snapgear.com)
+ *
+ * $Id: uclinux.c,v 1.13 2006/03/29 08:31:12 dwmw2 Exp $
*/
/****************************************************************************/
@@ -15,6 +17,7 @@
#include <linux/kernel.h>
#include <linux/fs.h>
#include <linux/major.h>
+#include <linux/root_dev.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/map.h>
#include <linux/mtd/partitions.h>
@@ -22,58 +25,8 @@
/****************************************************************************/
-__u8 uclinux_read8(struct map_info *map, unsigned long ofs)
-{
- return(*((__u8 *) (map->map_priv_1 + ofs)));
-}
-
-__u16 uclinux_read16(struct map_info *map, unsigned long ofs)
-{
- return(*((__u16 *) (map->map_priv_1 + ofs)));
-}
-
-__u32 uclinux_read32(struct map_info *map, unsigned long ofs)
-{
- return(*((__u32 *) (map->map_priv_1 + ofs)));
-}
-
-void uclinux_copy_from(struct map_info *map, void *to, unsigned long from, ssize_t len)
-{
- memcpy(to, (void *)(map->map_priv_1 + from), len);
-}
-
-void uclinux_write8(struct map_info *map, __u8 d, unsigned long adr)
-{
- *((__u8 *) (map->map_priv_1 + adr)) = d;
-}
-
-void uclinux_write16(struct map_info *map, __u16 d, unsigned long adr)
-{
- *((__u16 *) (map->map_priv_1 + adr)) = d;
-}
-
-void uclinux_write32(struct map_info *map, __u32 d, unsigned long adr)
-{
- *((__u32 *) (map->map_priv_1 + adr)) = d;
-}
-
-void uclinux_copy_to(struct map_info *map, unsigned long to, const void *from, ssize_t len)
-{
- memcpy((void *) (map->map_priv_1 + to), from, len);
-}
-
-/****************************************************************************/
-
struct map_info uclinux_ram_map = {
- name: "RAM",
- read8: uclinux_read8,
- read16: uclinux_read16,
- read32: uclinux_read32,
- copy_from: uclinux_copy_from,
- write8: uclinux_write8,
- write16: uclinux_write16,
- write32: uclinux_write32,
- copy_to: uclinux_copy_to,
+ .name = "RAM",
};
struct mtd_info *uclinux_ram_mtdinfo;
@@ -81,18 +34,18 @@ struct mtd_info *uclinux_ram_mtdinfo;
/****************************************************************************/
struct mtd_partition uclinux_romfs[] = {
- { name: "ROMfs", offset: 0 }
+ { .name = "ROMfs" }
};
-#define NUM_PARTITIONS (sizeof(uclinux_romfs) / sizeof(uclinux_romfs[0]))
+#define NUM_PARTITIONS ARRAY_SIZE(uclinux_romfs)
/****************************************************************************/
int uclinux_point(struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, u_char **mtdbuf)
{
- struct map_info *map = (struct map_info *) mtd->priv;
- *mtdbuf = (u_char *) (map->map_priv_1 + ((int) from));
+ struct map_info *map = mtd->priv;
+ *mtdbuf = (u_char *) (map->virt + ((int) from));
*retlen = len;
return(0);
}
@@ -104,31 +57,33 @@ int __init uclinux_mtd_init(void)
struct mtd_info *mtd;
struct map_info *mapp;
extern char _ebss;
+ unsigned long addr = (unsigned long) &_ebss;
mapp = &uclinux_ram_map;
- mapp->map_priv_2 = (unsigned long) &_ebss;
- mapp->size = PAGE_ALIGN(*((unsigned long *)((&_ebss) + 8)));
- mapp->buswidth = 4;
+ mapp->phys = addr;
+ mapp->size = PAGE_ALIGN(ntohl(*((unsigned long *)(addr + 8))));
+ mapp->bankwidth = 4;
printk("uclinux[mtd]: RAM probe address=0x%x size=0x%x\n",
- (int) mapp->map_priv_2, (int) mapp->size);
+ (int) mapp->phys, (int) mapp->size);
- mapp->map_priv_1 = (unsigned long)
- ioremap_nocache(mapp->map_priv_2, mapp->size);
+ mapp->virt = ioremap_nocache(mapp->phys, mapp->size);
- if (mapp->map_priv_1 == 0) {
+ if (mapp->virt == 0) {
printk("uclinux[mtd]: ioremap_nocache() failed\n");
return(-EIO);
}
+ simple_map_init(mapp);
+
mtd = do_map_probe("map_ram", mapp);
if (!mtd) {
printk("uclinux[mtd]: failed to find a mapping?\n");
- iounmap((void *) mapp->map_priv_1);
+ iounmap(mapp->virt);
return(-ENXIO);
}
-
- mtd->module = THIS_MODULE;
+
+ mtd->owner = THIS_MODULE;
mtd->point = uclinux_point;
mtd->priv = mapp;
@@ -138,7 +93,6 @@ int __init uclinux_mtd_init(void)
printk("uclinux[mtd]: set %s to be root filesystem\n",
uclinux_romfs[0].name);
ROOT_DEV = MKDEV(MTD_BLOCK_MAJOR, 0);
- put_mtd_device(mtd);
return(0);
}
@@ -152,9 +106,9 @@ void __exit uclinux_mtd_cleanup(void)
map_destroy(uclinux_ram_mtdinfo);
uclinux_ram_mtdinfo = NULL;
}
- if (uclinux_ram_map.map_priv_1) {
- iounmap((void *) uclinux_ram_map.map_priv_1);
- uclinux_ram_map.map_priv_1 = 0;
+ if (uclinux_ram_map.virt) {
+ iounmap((void *) uclinux_ram_map.virt);
+ uclinux_ram_map.virt = 0;
}
}
diff --git a/linux-2.4.x/drivers/mtd/maps/vmax301.c b/linux-2.4.x/drivers/mtd/maps/vmax301.c
index 4fd2f63..299aa9b 100644
--- a/linux-2.4.x/drivers/mtd/maps/vmax301.c
+++ b/linux-2.4.x/drivers/mtd/maps/vmax301.c
@@ -1,19 +1,19 @@
-// $Id: vmax301.c,v 1.24 2001/10/02 15:05:14 dwmw2 Exp $
+// $Id: vmax301.c,v 1.33 2006/03/29 08:44:35 dwmw2 Exp $
/* ######################################################################
Tempustech VMAX SBC301 MTD Driver.
-
+
The VMAx 301 is a SBC based on . It
comes with three builtin AMD 29F016B flash chips and a socket for SRAM or
- more flash. Each unit has it's own 8k mapping into a settable region
+ more flash. Each unit has it's own 8k mapping into a settable region
(0xD8000). There are two 8k mappings for each MTD, the first is always set
to the lower 8k of the device the second is paged. Writing a 16 bit page
value to anywhere in the first 8k will cause the second 8k to page around.
- To boot the device a bios extension must be installed into the first 8k
- of flash that is smart enough to copy itself down, page in the rest of
+ To boot the device a bios extension must be installed into the first 8k
+ of flash that is smart enough to copy itself down, page in the rest of
itself and begin executing.
-
+
##################################################################### */
#include <linux/module.h>
@@ -24,6 +24,7 @@
#include <asm/io.h>
#include <linux/mtd/map.h>
+#include <linux/mtd/mtd.h>
#define WINDOW_START 0xd8000
@@ -34,10 +35,10 @@
/* Actually we could use two spinlocks, but we'd have to have
more private space in the struct map_info. We lose a little
performance like this, but we'd probably lose more by having
- the extra indirection from having one of the map->map_priv
+ the extra indirection from having one of the map->map_priv
fields pointing to yet another private struct.
*/
-static spinlock_t vmax301_spin = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(vmax301_spin);
static void __vmax301_page(struct map_info *map, unsigned long page)
{
@@ -53,32 +54,12 @@ static inline void vmax301_page(struct map_info *map,
__vmax301_page(map, page);
}
-static __u8 vmax301_read8(struct map_info *map, unsigned long ofs)
-{
- __u8 ret;
- spin_lock(&vmax301_spin);
- vmax301_page(map, ofs);
- ret = readb(map->map_priv_2 + (ofs & WINDOW_MASK));
- spin_unlock(&vmax301_spin);
- return ret;
-}
-
-static __u16 vmax301_read16(struct map_info *map, unsigned long ofs)
-{
- __u16 ret;
- spin_lock(&vmax301_spin);
- vmax301_page(map, ofs);
- ret = readw(map->map_priv_2 + (ofs & WINDOW_MASK));
- spin_unlock(&vmax301_spin);
- return ret;
-}
-
-static __u32 vmax301_read32(struct map_info *map, unsigned long ofs)
+static map_word vmax301_read8(struct map_info *map, unsigned long ofs)
{
- __u32 ret;
+ map_word ret;
spin_lock(&vmax301_spin);
vmax301_page(map, ofs);
- ret = readl(map->map_priv_2 + (ofs & WINDOW_MASK));
+ ret.x[0] = readb(map->map_priv_2 + (ofs & WINDOW_MASK));
spin_unlock(&vmax301_spin);
return ret;
}
@@ -99,27 +80,11 @@ static void vmax301_copy_from(struct map_info *map, void *to, unsigned long from
}
}
-static void vmax301_write8(struct map_info *map, __u8 d, unsigned long adr)
+static void vmax301_write8(struct map_info *map, map_word d, unsigned long adr)
{
spin_lock(&vmax301_spin);
vmax301_page(map, adr);
- writeb(d, map->map_priv_2 + (adr & WINDOW_MASK));
- spin_unlock(&vmax301_spin);
-}
-
-static void vmax301_write16(struct map_info *map, __u16 d, unsigned long adr)
-{
- spin_lock(&vmax301_spin);
- vmax301_page(map, adr);
- writew(d, map->map_priv_2 + (adr & WINDOW_MASK));
- spin_unlock(&vmax301_spin);
-}
-
-static void vmax301_write32(struct map_info *map, __u32 d, unsigned long adr)
-{
- spin_lock(&vmax301_spin);
- vmax301_page(map, adr);
- writel(d, map->map_priv_2 + (adr & WINDOW_MASK));
+ writeb(d.x[0], map->map_priv_2 + (adr & WINDOW_MASK));
spin_unlock(&vmax301_spin);
}
@@ -133,7 +98,7 @@ static void vmax301_copy_to(struct map_info *map, unsigned long to, const void *
spin_lock(&vmax301_spin);
vmax301_page(map, to);
memcpy_toio(map->map_priv_2 + to, from, thislen);
- spin_unlock(&vmax301_spin);
+ spin_unlock(&vmax301_spin);
to += thislen;
from += thislen;
len -= thislen;
@@ -142,34 +107,28 @@ static void vmax301_copy_to(struct map_info *map, unsigned long to, const void *
static struct map_info vmax_map[2] = {
{
- name: "VMAX301 Internal Flash",
- size: 3*2*1024*1024,
- buswidth: 1,
- read8: vmax301_read8,
- read16: vmax301_read16,
- read32: vmax301_read32,
- copy_from: vmax301_copy_from,
- write8: vmax301_write8,
- write16: vmax301_write16,
- write32: vmax301_write32,
- copy_to: vmax301_copy_to,
- map_priv_1: WINDOW_START + WINDOW_LENGTH,
- map_priv_2: 0xFFFFFFFF
+ .name = "VMAX301 Internal Flash",
+ .phys = NO_XIP,
+ .size = 3*2*1024*1024,
+ .bankwidth = 1,
+ .read = vmax301_read8,
+ .copy_from = vmax301_copy_from,
+ .write = vmax301_write8,
+ .copy_to = vmax301_copy_to,
+ .map_priv_1 = WINDOW_START + WINDOW_LENGTH,
+ .map_priv_2 = 0xFFFFFFFF
},
{
- name: "VMAX301 Socket",
- size: 0,
- buswidth: 1,
- read8: vmax301_read8,
- read16: vmax301_read16,
- read32: vmax301_read32,
- copy_from: vmax301_copy_from,
- write8: vmax301_write8,
- write16: vmax301_write16,
- write32: vmax301_write32,
- copy_to: vmax301_copy_to,
- map_priv_1: WINDOW_START + (3*WINDOW_LENGTH),
- map_priv_2: 0xFFFFFFFF
+ .name = "VMAX301 Socket",
+ .phys = NO_XIP,
+ .size = 0,
+ .bankwidth = 1,
+ .read = vmax301_read8,
+ .copy_from = vmax301_copy_from,
+ .write = vmax301_write8,
+ .copy_to = vmax301_copy_to,
+ .map_priv_1 = WINDOW_START + (3*WINDOW_LENGTH),
+ .map_priv_2 = 0xFFFFFFFF
}
};
@@ -178,7 +137,7 @@ static struct mtd_info *vmax_mtd[2] = {NULL, NULL};
static void __exit cleanup_vmax301(void)
{
int i;
-
+
for (i=0; i<2; i++) {
if (vmax_mtd[i]) {
del_mtd_device(vmax_mtd[i]);
@@ -202,13 +161,13 @@ int __init init_vmax301(void)
return -EIO;
}
/* Put the address in the map's private data area.
- We store the actual MTD IO address rather than the
+ We store the actual MTD IO address rather than the
address of the first half, because it's used more
- often.
+ often.
*/
- vmax_map[0].map_priv_1 = iomapadr + WINDOW_START;
- vmax_map[1].map_priv_1 = iomapadr + (3*WINDOW_START);
-
+ vmax_map[0].map_priv_2 = iomapadr + WINDOW_START;
+ vmax_map[1].map_priv_2 = iomapadr + (3*WINDOW_START);
+
for (i=0; i<2; i++) {
vmax_mtd[i] = do_map_probe("cfi_probe", &vmax_map[i]);
if (!vmax_mtd[i])
@@ -218,12 +177,12 @@ int __init init_vmax301(void)
if (!vmax_mtd[i])
vmax_mtd[i] = do_map_probe("map_rom", &vmax_map[i]);
if (vmax_mtd[i]) {
- vmax_mtd[i]->module = THIS_MODULE;
+ vmax_mtd[i]->owner = THIS_MODULE;
add_mtd_device(vmax_mtd[i]);
}
}
- if (!vmax_mtd[1] && !vmax_mtd[2]) {
+ if (!vmax_mtd[0] && !vmax_mtd[1]) {
iounmap((void *)iomapadr);
return -ENXIO;
}
diff --git a/linux-2.4.x/drivers/mtd/maps/walnut.c b/linux-2.4.x/drivers/mtd/maps/walnut.c
new file mode 100644
index 0000000..0429429
--- /dev/null
+++ b/linux-2.4.x/drivers/mtd/maps/walnut.c
@@ -0,0 +1,121 @@
+/*
+ * $Id: walnut.c,v 1.4 2005/11/29 20:01:28 gleixner Exp $
+ *
+ * Mapping for Walnut flash
+ * (used ebony.c as a "framework")
+ *
+ * Heikki Lindholm <holindho@infradead.org>
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/partitions.h>
+#include <linux/config.h>
+#include <asm/io.h>
+#include <asm/ibm4xx.h>
+#include <platforms/4xx/walnut.h>
+
+/* these should be in platforms/4xx/walnut.h ? */
+#define WALNUT_FLASH_ONBD_N(x) (x & 0x02)
+#define WALNUT_FLASH_SRAM_SEL(x) (x & 0x01)
+#define WALNUT_FLASH_LOW 0xFFF00000
+#define WALNUT_FLASH_HIGH 0xFFF80000
+#define WALNUT_FLASH_SIZE 0x80000
+
+static struct mtd_info *flash;
+
+static struct map_info walnut_map = {
+ .name = "Walnut flash",
+ .size = WALNUT_FLASH_SIZE,
+ .bankwidth = 1,
+};
+
+/* Actually, OpenBIOS is the last 128 KiB of the flash - better
+ * partitioning could be made */
+static struct mtd_partition walnut_partitions[] = {
+ {
+ .name = "OpenBIOS",
+ .offset = 0x0,
+ .size = WALNUT_FLASH_SIZE,
+ /*.mask_flags = MTD_WRITEABLE, */ /* force read-only */
+ }
+};
+
+int __init init_walnut(void)
+{
+ u8 fpga_brds1;
+ void *fpga_brds1_adr;
+ void *fpga_status_adr;
+ unsigned long flash_base;
+
+ /* this should already be mapped (platform/4xx/walnut.c) */
+ fpga_status_adr = ioremap(WALNUT_FPGA_BASE, 8);
+ if (!fpga_status_adr)
+ return -ENOMEM;
+
+ fpga_brds1_adr = fpga_status_adr+5;
+ fpga_brds1 = readb(fpga_brds1_adr);
+ /* iounmap(fpga_status_adr); */
+
+ if (WALNUT_FLASH_ONBD_N(fpga_brds1)) {
+ printk("The on-board flash is disabled (U79 sw 5)!");
+ return -EIO;
+ }
+ if (WALNUT_FLASH_SRAM_SEL(fpga_brds1))
+ flash_base = WALNUT_FLASH_LOW;
+ else
+ flash_base = WALNUT_FLASH_HIGH;
+
+ walnut_map.phys = flash_base;
+ walnut_map.virt =
+ (void __iomem *)ioremap(flash_base, walnut_map.size);
+
+ if (!walnut_map.virt) {
+ printk("Failed to ioremap flash.\n");
+ return -EIO;
+ }
+
+ simple_map_init(&walnut_map);
+
+ flash = do_map_probe("jedec_probe", &walnut_map);
+ if (flash) {
+ flash->owner = THIS_MODULE;
+ add_mtd_partitions(flash, walnut_partitions,
+ ARRAY_SIZE(walnut_partitions));
+ } else {
+ printk("map probe failed for flash\n");
+ return -ENXIO;
+ }
+
+ return 0;
+}
+
+static void __exit cleanup_walnut(void)
+{
+ if (flash) {
+ del_mtd_partitions(flash);
+ map_destroy(flash);
+ }
+
+ if (walnut_map.virt) {
+ iounmap((void *)walnut_map.virt);
+ walnut_map.virt = 0;
+ }
+}
+
+module_init(init_walnut);
+module_exit(cleanup_walnut);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Heikki Lindholm <holindho@infradead.org>");
+MODULE_DESCRIPTION("MTD map and partitions for IBM 405GP Walnut boards");
diff --git a/linux-2.4.x/drivers/mtd/maps/wr_sbc82xx_flash.c b/linux-2.4.x/drivers/mtd/maps/wr_sbc82xx_flash.c
new file mode 100644
index 0000000..60c197e
--- /dev/null
+++ b/linux-2.4.x/drivers/mtd/maps/wr_sbc82xx_flash.c
@@ -0,0 +1,181 @@
+/*
+ * $Id: wr_sbc82xx_flash.c,v 1.8 2005/11/07 11:14:29 gleixner Exp $
+ *
+ * Map for flash chips on Wind River PowerQUICC II SBC82xx board.
+ *
+ * Copyright (C) 2004 Red Hat, Inc.
+ *
+ * Author: David Woodhouse <dwmw2@infradead.org>
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <asm/io.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/map.h>
+#include <linux/config.h>
+#include <linux/mtd/partitions.h>
+
+#include <asm/immap_cpm2.h>
+
+static struct mtd_info *sbcmtd[3];
+static struct mtd_partition *sbcmtd_parts[3];
+
+struct map_info sbc82xx_flash_map[3] = {
+ {.name = "Boot flash"},
+ {.name = "Alternate boot flash"},
+ {.name = "User flash"}
+};
+
+static struct mtd_partition smallflash_parts[] = {
+ {
+ .name = "space",
+ .size = 0x100000,
+ .offset = 0,
+ }, {
+ .name = "bootloader",
+ .size = MTDPART_SIZ_FULL,
+ .offset = MTDPART_OFS_APPEND,
+ }
+};
+
+static struct mtd_partition bigflash_parts[] = {
+ {
+ .name = "bootloader",
+ .size = 0x00100000,
+ .offset = 0,
+ }, {
+ .name = "file system",
+ .size = 0x01f00000,
+ .offset = MTDPART_OFS_APPEND,
+ }, {
+ .name = "boot config",
+ .size = 0x00100000,
+ .offset = MTDPART_OFS_APPEND,
+ }, {
+ .name = "space",
+ .size = 0x01f00000,
+ .offset = MTDPART_OFS_APPEND,
+ }
+};
+
+static const char *part_probes[] __initdata = {"cmdlinepart", "RedBoot", NULL};
+
+#define init_sbc82xx_one_flash(map, br, or) \
+do { \
+ (map).phys = (br & 1) ? (br & 0xffff8000) : 0; \
+ (map).size = (br & 1) ? (~(or & 0xffff8000) + 1) : 0; \
+ switch (br & 0x00001800) { \
+ case 0x00000000: \
+ case 0x00000800: (map).bankwidth = 1; break; \
+ case 0x00001000: (map).bankwidth = 2; break; \
+ case 0x00001800: (map).bankwidth = 4; break; \
+ } \
+} while (0);
+
+int __init init_sbc82xx_flash(void)
+{
+ volatile memctl_cpm2_t *mc = &cpm2_immr->im_memctl;
+ int bigflash;
+ int i;
+
+#ifdef CONFIG_SBC8560
+ mc = ioremap(0xff700000 + 0x5000, sizeof(memctl_cpm2_t));
+#else
+ mc = &cpm2_immr->im_memctl;
+#endif
+
+ bigflash = 1;
+ if ((mc->memc_br0 & 0x00001800) == 0x00001800)
+ bigflash = 0;
+
+ init_sbc82xx_one_flash(sbc82xx_flash_map[0], mc->memc_br0, mc->memc_or0);
+ init_sbc82xx_one_flash(sbc82xx_flash_map[1], mc->memc_br6, mc->memc_or6);
+ init_sbc82xx_one_flash(sbc82xx_flash_map[2], mc->memc_br1, mc->memc_or1);
+
+#ifdef CONFIG_SBC8560
+ iounmap((void *) mc);
+#endif
+
+ for (i=0; i<3; i++) {
+ int8_t flashcs[3] = { 0, 6, 1 };
+ int nr_parts;
+
+ printk(KERN_NOTICE "PowerQUICC II %s (%ld MiB on CS%d",
+ sbc82xx_flash_map[i].name,
+ (sbc82xx_flash_map[i].size >> 20),
+ flashcs[i]);
+ if (!sbc82xx_flash_map[i].phys) {
+ /* We know it can't be at zero. */
+ printk("): disabled by bootloader.\n");
+ continue;
+ }
+ printk(" at %08lx)\n", sbc82xx_flash_map[i].phys);
+
+ sbc82xx_flash_map[i].virt = ioremap(sbc82xx_flash_map[i].phys, sbc82xx_flash_map[i].size);
+
+ if (!sbc82xx_flash_map[i].virt) {
+ printk("Failed to ioremap\n");
+ continue;
+ }
+
+ simple_map_init(&sbc82xx_flash_map[i]);
+
+ sbcmtd[i] = do_map_probe("cfi_probe", &sbc82xx_flash_map[i]);
+
+ if (!sbcmtd[i])
+ continue;
+
+ sbcmtd[i]->owner = THIS_MODULE;
+
+ nr_parts = parse_mtd_partitions(sbcmtd[i], part_probes,
+ &sbcmtd_parts[i], 0);
+ if (nr_parts > 0) {
+ add_mtd_partitions (sbcmtd[i], sbcmtd_parts[i], nr_parts);
+ continue;
+ }
+
+ /* No partitioning detected. Use default */
+ if (i == 2) {
+ add_mtd_device(sbcmtd[i]);
+ } else if (i == bigflash) {
+ add_mtd_partitions (sbcmtd[i], bigflash_parts, ARRAY_SIZE(bigflash_parts));
+ } else {
+ add_mtd_partitions (sbcmtd[i], smallflash_parts, ARRAY_SIZE(smallflash_parts));
+ }
+ }
+ return 0;
+}
+
+static void __exit cleanup_sbc82xx_flash(void)
+{
+ int i;
+
+ for (i=0; i<3; i++) {
+ if (!sbcmtd[i])
+ continue;
+
+ if (i<2 || sbcmtd_parts[i])
+ del_mtd_partitions(sbcmtd[i]);
+ else
+ del_mtd_device(sbcmtd[i]);
+
+ kfree(sbcmtd_parts[i]);
+ map_destroy(sbcmtd[i]);
+
+ iounmap((void *)sbc82xx_flash_map[i].virt);
+ sbc82xx_flash_map[i].virt = 0;
+ }
+}
+
+module_init(init_sbc82xx_flash);
+module_exit(cleanup_sbc82xx_flash);
+
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
+MODULE_DESCRIPTION("Flash map driver for WindRiver PowerQUICC II");
diff --git a/linux-2.4.x/drivers/mtd/mtd_blkdevs-24.c b/linux-2.4.x/drivers/mtd/mtd_blkdevs-24.c
new file mode 100644
index 0000000..688028b
--- /dev/null
+++ b/linux-2.4.x/drivers/mtd/mtd_blkdevs-24.c
@@ -0,0 +1,692 @@
+/*
+ * $Id: mtd_blkdevs-24.c,v 1.18 2005/11/07 11:14:20 gleixner Exp $
+ *
+ * (C) 2003 David Woodhouse <dwmw2@infradead.org>
+ *
+ * Interface to Linux 2.4 block layer for MTD 'translation layers'.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/list.h>
+#include <linux/fs.h>
+#include <linux/mtd/blktrans.h>
+#include <linux/mtd/mtd.h>
+#include <linux/blkdev.h>
+#include <linux/blk.h>
+#include <linux/blkpg.h>
+#include <linux/spinlock.h>
+#include <linux/hdreg.h>
+#include <linux/init.h>
+#include <asm/semaphore.h>
+#include <asm/uaccess.h>
+
+static LIST_HEAD(blktrans_majors);
+
+extern struct semaphore mtd_table_mutex;
+extern struct mtd_info *mtd_table[];
+
+struct mtd_blkcore_priv {
+ devfs_handle_t devfs_dir_handle;
+ int blksizes[256];
+ int sizes[256];
+ struct hd_struct part_table[256];
+ struct gendisk gd;
+ spinlock_t devs_lock; /* See comment in _request function */
+ struct completion thread_dead;
+ int exiting;
+ wait_queue_head_t thread_wq;
+};
+
+static inline struct mtd_blktrans_dev *tr_get_dev(struct mtd_blktrans_ops *tr,
+ int devnum)
+{
+ struct list_head *this;
+ struct mtd_blktrans_dev *d;
+
+ list_for_each(this, &tr->devs) {
+ d = list_entry(this, struct mtd_blktrans_dev, list);
+
+ if (d->devnum == devnum)
+ return d;
+ }
+ return NULL;
+}
+
+static inline struct mtd_blktrans_ops *get_tr(int major)
+{
+ struct list_head *this;
+ struct mtd_blktrans_ops *t;
+
+ list_for_each(this, &blktrans_majors) {
+ t = list_entry(this, struct mtd_blktrans_ops, list);
+
+ if (t->major == major)
+ return t;
+ }
+ return NULL;
+}
+
+static int do_blktrans_request(struct mtd_blktrans_ops *tr,
+ struct mtd_blktrans_dev *dev,
+ struct request *req)
+{
+ unsigned long block, nsect;
+ char *buf;
+ int minor;
+
+ minor = MINOR(req->rq_dev);
+ block = req->sector;
+ nsect = req->current_nr_sectors;
+ buf = req->buffer;
+
+ if (block + nsect > tr->blkcore_priv->part_table[minor].nr_sects) {
+ printk(KERN_WARNING "Access beyond end of device.\n");
+ return 0;
+ }
+ block += tr->blkcore_priv->part_table[minor].start_sect;
+
+ switch(req->cmd) {
+ case READ:
+ for (; nsect > 0; nsect--, block++, buf += 512)
+ if (tr->readsect(dev, block, buf))
+ return 0;
+ return 1;
+
+ case WRITE:
+ if (!tr->writesect)
+ return 0;
+
+ for (; nsect > 0; nsect--, block++, buf += 512)
+ if (tr->writesect(dev, block, buf))
+ return 0;
+ return 1;
+
+ default:
+ printk(KERN_NOTICE "Unknown request cmd %d\n", req->cmd);
+ return 0;
+ }
+}
+
+static int mtd_blktrans_thread(void *arg)
+{
+ struct mtd_blktrans_ops *tr = arg;
+ struct request_queue *rq = BLK_DEFAULT_QUEUE(tr->major);
+
+ /* we might get involved when memory gets low, so use PF_MEMALLOC */
+ current->flags |= PF_MEMALLOC;
+
+ snprintf(current->comm, sizeof(current->comm), "%sd", tr->name);
+
+ /* daemonize() doesn't do this for us since some kernel threads
+ actually want to deal with signals. We can't just call
+ exit_sighand() since that'll cause an oops when we finally
+ do exit. */
+ spin_lock_irq(&current->sigmask_lock);
+ sigfillset(&current->blocked);
+ recalc_sigpending();
+ spin_unlock_irq(&current->sigmask_lock);
+
+ daemonize("%sd", tr->name);
+
+ while (!tr->blkcore_priv->exiting) {
+ struct request *req;
+ struct mtd_blktrans_dev *dev;
+ int devnum;
+ int res = 0;
+ DECLARE_WAITQUEUE(wait, current);
+
+ spin_lock_irq(&io_request_lock);
+
+ if (list_empty(&rq->queue_head)) {
+
+ add_wait_queue(&tr->blkcore_priv->thread_wq, &wait);
+ set_current_state(TASK_INTERRUPTIBLE);
+
+ spin_unlock_irq(&io_request_lock);
+
+ schedule();
+ remove_wait_queue(&tr->blkcore_priv->thread_wq, &wait);
+
+ continue;
+ }
+
+ req = blkdev_entry_next_request(&rq->queue_head);
+
+ devnum = MINOR(req->rq_dev) >> tr->part_bits;
+
+ /* The ll_rw_blk code knows not to touch the request
+ at the head of the queue */
+ spin_unlock_irq(&io_request_lock);
+
+ /* FIXME: Where can we store the dev, on which
+ we already have a refcount anyway? We need to
+ lock against concurrent addition/removal of devices,
+ but if we use the mtd_table_mutex we deadlock when
+ grok_partitions is called from the registration
+ callbacks. */
+ spin_lock(&tr->blkcore_priv->devs_lock);
+ dev = tr_get_dev(tr, devnum);
+ spin_unlock(&tr->blkcore_priv->devs_lock);
+
+ BUG_ON(!dev);
+
+ /* Ensure serialisation of requests */
+ down(&dev->sem);
+
+ res = do_blktrans_request(tr, dev, req);
+ up(&dev->sem);
+
+ if (!end_that_request_first(req, res, tr->name)) {
+ spin_lock_irq(&io_request_lock);
+ blkdev_dequeue_request(req);
+ end_that_request_last(req);
+ spin_unlock_irq(&io_request_lock);
+ }
+ }
+ complete_and_exit(&tr->blkcore_priv->thread_dead, 0);
+}
+
+static void mtd_blktrans_request(struct request_queue *rq)
+{
+ struct mtd_blktrans_ops *tr = rq->queuedata;
+ wake_up(&tr->blkcore_priv->thread_wq);
+}
+
+int blktrans_open(struct inode *i, struct file *f)
+{
+ struct mtd_blktrans_ops *tr = NULL;
+ struct mtd_blktrans_dev *dev = NULL;
+ int major_nr = MAJOR(i->i_rdev);
+ int minor_nr = MINOR(i->i_rdev);
+ int devnum;
+ int ret = -ENODEV;
+
+ if (is_read_only(i->i_rdev) && (f->f_mode & FMODE_WRITE))
+ return -EROFS;
+
+ down(&mtd_table_mutex);
+
+ tr = get_tr(major_nr);
+
+ if (!tr)
+ goto out;
+
+ devnum = minor_nr >> tr->part_bits;
+
+ dev = tr_get_dev(tr, devnum);
+
+ if (!dev)
+ goto out;
+
+ if (!tr->blkcore_priv->part_table[minor_nr].nr_sects) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ if (!try_inc_mod_count(dev->mtd->owner))
+ goto out;
+
+ if (!try_inc_mod_count(tr->owner))
+ goto out_tr;
+
+ dev->mtd->usecount++;
+
+ ret = 0;
+ if (tr->open && (ret = tr->open(dev))) {
+ dev->mtd->usecount--;
+ if (dev->mtd->owner)
+ __MOD_DEC_USE_COUNT(dev->mtd->owner);
+ out_tr:
+ if (tr->owner)
+ __MOD_DEC_USE_COUNT(tr->owner);
+ }
+ out:
+ up(&mtd_table_mutex);
+
+ return ret;
+}
+
+int blktrans_release(struct inode *i, struct file *f)
+{
+ struct mtd_blktrans_dev *dev;
+ struct mtd_blktrans_ops *tr;
+ int ret = 0;
+ int devnum;
+
+ down(&mtd_table_mutex);
+
+ tr = get_tr(MAJOR(i->i_rdev));
+ if (!tr) {
+ up(&mtd_table_mutex);
+ return -ENODEV;
+ }
+
+ devnum = MINOR(i->i_rdev) >> tr->part_bits;
+ dev = tr_get_dev(tr, devnum);
+
+ if (!dev) {
+ up(&mtd_table_mutex);
+ return -ENODEV;
+ }
+
+ if (tr->release)
+ ret = tr->release(dev);
+
+ if (!ret) {
+ dev->mtd->usecount--;
+ if (dev->mtd->owner)
+ __MOD_DEC_USE_COUNT(dev->mtd->owner);
+ if (tr->owner)
+ __MOD_DEC_USE_COUNT(tr->owner);
+ }
+
+ up(&mtd_table_mutex);
+
+ return ret;
+}
+
+static int mtd_blktrans_rrpart(kdev_t rdev, struct mtd_blktrans_ops *tr,
+ struct mtd_blktrans_dev *dev)
+{
+ struct gendisk *gd = &(tr->blkcore_priv->gd);
+ int i;
+ int minor = MINOR(rdev);
+
+ if (minor & ((1<<tr->part_bits)-1) || !tr->part_bits) {
+ /* BLKRRPART on a partition. Go away. */
+ return -ENOTTY;
+ }
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+
+ /* We are required to prevent simultaneous open() ourselves.
+ The core doesn't do that for us. Did I ever mention how
+ much the Linux block layer sucks? Sledgehammer approach... */
+ down(&mtd_table_mutex);
+
+ for (i=0; i < (1<<tr->part_bits); i++) {
+ invalidate_device(MKDEV(tr->major, minor+i), 1);
+ gd->part[minor + i].start_sect = 0;
+ gd->part[minor + i].nr_sects = 0;
+ }
+
+ grok_partitions(gd, minor, 1 << tr->part_bits,
+ dev->size);
+ up(&mtd_table_mutex);
+
+ return 0;
+}
+
+static int blktrans_ioctl(struct inode *inode, struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ struct mtd_blktrans_dev *dev;
+ struct mtd_blktrans_ops *tr;
+ int devnum;
+
+ switch(cmd) {
+ case BLKGETSIZE:
+ case BLKGETSIZE64:
+ case BLKBSZSET:
+ case BLKBSZGET:
+ case BLKROSET:
+ case BLKROGET:
+ case BLKRASET:
+ case BLKRAGET:
+ case BLKPG:
+ case BLKELVGET:
+ case BLKELVSET:
+ return blk_ioctl(inode->i_rdev, cmd, arg);
+ }
+
+ down(&mtd_table_mutex);
+
+ tr = get_tr(MAJOR(inode->i_rdev));
+ if (!tr) {
+ up(&mtd_table_mutex);
+ return -ENODEV;
+ }
+
+ devnum = MINOR(inode->i_rdev) >> tr->part_bits;
+ dev = tr_get_dev(tr, devnum);
+
+ up(&mtd_table_mutex);
+
+ if (!dev)
+ return -ENODEV;
+
+ switch(cmd) {
+ case BLKRRPART:
+ return mtd_blktrans_rrpart(inode->i_rdev, tr, dev);
+
+ case BLKFLSBUF:
+ blk_ioctl(inode->i_rdev, cmd, arg);
+ if (tr->flush)
+ return tr->flush(dev);
+ /* The core code did the work, we had nothing to do. */
+ return 0;
+
+ case HDIO_GETGEO:
+ if (tr->getgeo) {
+ struct hd_geometry g;
+ struct gendisk *gd = &(tr->blkcore_priv->gd);
+ int ret;
+
+ memset(&g, 0, sizeof(g));
+ ret = tr->getgeo(dev, &g);
+ if (ret)
+ return ret;
+
+ g.start = gd->part[MINOR(inode->i_rdev)].start_sect;
+ if (copy_to_user((void *)arg, &g, sizeof(g)))
+ return -EFAULT;
+ return 0;
+ } /* else */
+ default:
+ return -ENOTTY;
+ }
+}
+
+struct block_device_operations mtd_blktrans_ops = {
+ .owner = THIS_MODULE,
+ .open = blktrans_open,
+ .release = blktrans_release,
+ .ioctl = blktrans_ioctl,
+};
+
+int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
+{
+ struct mtd_blktrans_ops *tr = new->tr;
+ struct list_head *this;
+ int last_devnum = -1;
+ int i;
+
+ if (!down_trylock(&mtd_table_mutex)) {
+ up(&mtd_table_mutex);
+ BUG();
+ }
+
+ spin_lock(&tr->blkcore_priv->devs_lock);
+
+ list_for_each(this, &tr->devs) {
+ struct mtd_blktrans_dev *d = list_entry(this, struct mtd_blktrans_dev, list);
+ if (new->devnum == -1) {
+ /* Use first free number */
+ if (d->devnum != last_devnum+1) {
+ /* Found a free devnum. Plug it in here */
+ new->devnum = last_devnum+1;
+ list_add_tail(&new->list, &d->list);
+ goto added;
+ }
+ } else if (d->devnum == new->devnum) {
+ /* Required number taken */
+ spin_unlock(&tr->blkcore_priv->devs_lock);
+ return -EBUSY;
+ } else if (d->devnum > new->devnum) {
+ /* Required number was free */
+ list_add_tail(&new->list, &d->list);
+ goto added;
+ }
+ last_devnum = d->devnum;
+ }
+ if (new->devnum == -1)
+ new->devnum = last_devnum+1;
+
+ if ((new->devnum << tr->part_bits) > 256) {
+ spin_unlock(&tr->blkcore_priv->devs_lock);
+ return -EBUSY;
+ }
+
+ init_MUTEX(&new->sem);
+ list_add_tail(&new->list, &tr->devs);
+ added:
+ spin_unlock(&tr->blkcore_priv->devs_lock);
+
+ if (!tr->writesect)
+ new->readonly = 1;
+
+ for (i = new->devnum << tr->part_bits;
+ i < (new->devnum+1) << tr->part_bits;
+ i++) {
+ set_device_ro(MKDEV(tr->major, i), new->readonly);
+ tr->blkcore_priv->blksizes[i] = new->blksize;
+ tr->blkcore_priv->sizes[i] = 0;
+ tr->blkcore_priv->part_table[i].nr_sects = 0;
+ tr->blkcore_priv->part_table[i].start_sect = 0;
+ }
+
+ /*
+ <viro_zzz> dwmw2: BLOCK_SIZE_BITS has nothing to do with block devices
+ <viro> dwmw2: any code which sets blk_size[][] should be
+ size >> 10 /+ 2.4 and its dumb units */
+
+ tr->blkcore_priv->sizes[new->devnum << tr->part_bits] =
+ (new->size * new->blksize) >> 10; /* 2.4 and its dumb units */
+
+ /* But this is still in device's sectors? $DEITY knows */
+ tr->blkcore_priv->part_table[new->devnum << tr->part_bits].nr_sects = new->size;
+
+ if (tr->part_bits) {
+ grok_partitions(&tr->blkcore_priv->gd, new->devnum,
+ 1 << tr->part_bits, new->size);
+ }
+#ifdef CONFIG_DEVFS_FS
+ if (!tr->part_bits) {
+ char name[2];
+
+ name[0] = '0' + new->devnum;
+ name[1] = 0;
+
+ new->blkcore_priv =
+ devfs_register(tr->blkcore_priv->devfs_dir_handle,
+ name, DEVFS_FL_DEFAULT, tr->major,
+ new->devnum, S_IFBLK|S_IRUGO|S_IWUGO,
+ &mtd_blktrans_ops, NULL);
+ }
+#endif
+ return 0;
+}
+
+int del_mtd_blktrans_dev(struct mtd_blktrans_dev *old)
+{
+ struct mtd_blktrans_ops *tr = old->tr;
+ int i;
+
+ if (!down_trylock(&mtd_table_mutex)) {
+ up(&mtd_table_mutex);
+ BUG();
+ }
+
+#ifdef CONFIG_DEVFS_FS
+ if (!tr->part_bits) {
+ devfs_unregister(old->blkcore_priv);
+ old->blkcore_priv = NULL;
+ } else {
+ devfs_register_partitions(&tr->blkcore_priv->gd,
+ old->devnum << tr->part_bits, 1);
+ }
+#endif
+ spin_lock(&tr->blkcore_priv->devs_lock);
+ list_del(&old->list);
+ spin_unlock(&tr->blkcore_priv->devs_lock);
+
+ for (i = (old->devnum << tr->part_bits);
+ i < ((old->devnum+1) << tr->part_bits); i++) {
+ tr->blkcore_priv->sizes[i] = 0;
+ tr->blkcore_priv->part_table[i].nr_sects = 0;
+ tr->blkcore_priv->part_table[i].start_sect = 0;
+ }
+
+ return 0;
+}
+
+void blktrans_notify_remove(struct mtd_info *mtd)
+{
+ struct list_head *this, *this2, *next;
+
+ list_for_each(this, &blktrans_majors) {
+ struct mtd_blktrans_ops *tr = list_entry(this, struct mtd_blktrans_ops, list);
+
+ list_for_each_safe(this2, next, &tr->devs) {
+ struct mtd_blktrans_dev *dev = list_entry(this2, struct mtd_blktrans_dev, list);
+
+ if (dev->mtd == mtd)
+ tr->remove_dev(dev);
+ }
+ }
+}
+
+void blktrans_notify_add(struct mtd_info *mtd)
+{
+ struct list_head *this;
+
+ if (mtd->type == MTD_ABSENT)
+ return;
+
+ list_for_each(this, &blktrans_majors) {
+ struct mtd_blktrans_ops *tr = list_entry(this, struct mtd_blktrans_ops, list);
+
+ tr->add_mtd(tr, mtd);
+ }
+
+}
+
+static struct mtd_notifier blktrans_notifier = {
+ .add = blktrans_notify_add,
+ .remove = blktrans_notify_remove,
+};
+
+int register_mtd_blktrans(struct mtd_blktrans_ops *tr)
+{
+ int ret, i;
+
+ /* Register the notifier if/when the first device type is
+ registered, to prevent the link/init ordering from fucking
+ us over. */
+ if (!blktrans_notifier.list.next)
+ register_mtd_user(&blktrans_notifier);
+
+ tr->blkcore_priv = kmalloc(sizeof(*tr->blkcore_priv), GFP_KERNEL);
+ if (!tr->blkcore_priv)
+ return -ENOMEM;
+
+ memset(tr->blkcore_priv, 0, sizeof(*tr->blkcore_priv));
+
+ down(&mtd_table_mutex);
+
+ ret = devfs_register_blkdev(tr->major, tr->name, &mtd_blktrans_ops);
+ if (ret) {
+ printk(KERN_WARNING "Unable to register %s block device on major %d: %d\n",
+ tr->name, tr->major, ret);
+ kfree(tr->blkcore_priv);
+ up(&mtd_table_mutex);
+ return ret;
+ }
+
+ blk_init_queue(BLK_DEFAULT_QUEUE(tr->major), &mtd_blktrans_request);
+ (BLK_DEFAULT_QUEUE(tr->major))->queuedata = tr;
+
+ init_completion(&tr->blkcore_priv->thread_dead);
+ init_waitqueue_head(&tr->blkcore_priv->thread_wq);
+
+ ret = kernel_thread(mtd_blktrans_thread, tr,
+ CLONE_FS|CLONE_FILES|CLONE_SIGHAND);
+ if (ret < 0) {
+ blk_cleanup_queue(BLK_DEFAULT_QUEUE(tr->major));
+ devfs_unregister_blkdev(tr->major, tr->name);
+ kfree(tr->blkcore_priv);
+ up(&mtd_table_mutex);
+ return ret;
+ }
+
+ tr->blkcore_priv->devfs_dir_handle =
+ devfs_mk_dir(NULL, tr->name, NULL);
+
+ blksize_size[tr->major] = tr->blkcore_priv->blksizes;
+ blk_size[tr->major] = tr->blkcore_priv->sizes;
+
+ tr->blkcore_priv->gd.major = tr->major;
+ tr->blkcore_priv->gd.major_name = tr->name;
+ tr->blkcore_priv->gd.minor_shift = tr->part_bits;
+ tr->blkcore_priv->gd.max_p = (1<<tr->part_bits) - 1;
+ tr->blkcore_priv->gd.part = tr->blkcore_priv->part_table;
+ tr->blkcore_priv->gd.sizes = tr->blkcore_priv->sizes;
+ tr->blkcore_priv->gd.nr_real = 256 >> tr->part_bits;
+
+ spin_lock_init(&tr->blkcore_priv->devs_lock);
+
+ add_gendisk(&tr->blkcore_priv->gd);
+
+ INIT_LIST_HEAD(&tr->devs);
+ list_add(&tr->list, &blktrans_majors);
+
+ for (i=0; i<MAX_MTD_DEVICES; i++) {
+ if (mtd_table[i] && mtd_table[i]->type != MTD_ABSENT)
+ tr->add_mtd(tr, mtd_table[i]);
+ }
+ up(&mtd_table_mutex);
+
+ return 0;
+}
+
+int deregister_mtd_blktrans(struct mtd_blktrans_ops *tr)
+{
+ struct list_head *this, *next;
+
+ down(&mtd_table_mutex);
+
+ /* Clean up the kernel thread */
+ tr->blkcore_priv->exiting = 1;
+ wake_up(&tr->blkcore_priv->thread_wq);
+ wait_for_completion(&tr->blkcore_priv->thread_dead);
+
+ /* Remove it from the list of active majors */
+ list_del(&tr->list);
+
+ /* Remove each of its devices */
+ list_for_each_safe(this, next, &tr->devs) {
+ struct mtd_blktrans_dev *dev = list_entry(this, struct mtd_blktrans_dev, list);
+ tr->remove_dev(dev);
+ }
+
+ blksize_size[tr->major] = NULL;
+ blk_size[tr->major] = NULL;
+
+ del_gendisk(&tr->blkcore_priv->gd);
+
+ blk_cleanup_queue(BLK_DEFAULT_QUEUE(tr->major));
+ devfs_unregister_blkdev(tr->major, tr->name);
+
+ devfs_unregister(tr->blkcore_priv->devfs_dir_handle);
+
+ up(&mtd_table_mutex);
+
+ kfree(tr->blkcore_priv);
+
+ if (!list_empty(&tr->devs))
+ BUG();
+ return 0;
+}
+
+static void __exit mtd_blktrans_exit(void)
+{
+ /* No race here -- if someone's currently in register_mtd_blktrans
+ we're screwed anyway. */
+ if (blktrans_notifier.list.next)
+ unregister_mtd_user(&blktrans_notifier);
+}
+
+module_exit(mtd_blktrans_exit);
+
+EXPORT_SYMBOL_GPL(register_mtd_blktrans);
+EXPORT_SYMBOL_GPL(deregister_mtd_blktrans);
+EXPORT_SYMBOL_GPL(add_mtd_blktrans_dev);
+EXPORT_SYMBOL_GPL(del_mtd_blktrans_dev);
+
+MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Common interface to block layer for MTD 'translation layers'");
diff --git a/linux-2.4.x/drivers/mtd/mtd_blkdevs.c b/linux-2.4.x/drivers/mtd/mtd_blkdevs.c
new file mode 100644
index 0000000..32f4250
--- /dev/null
+++ b/linux-2.4.x/drivers/mtd/mtd_blkdevs.c
@@ -0,0 +1,482 @@
+/*
+ * $Id: mtd_blkdevs.c,v 1.28 2006/03/29 08:26:27 dwmw2 Exp $
+ *
+ * (C) 2003 David Woodhouse <dwmw2@infradead.org>
+ *
+ * Interface to Linux 2.5 block layer for MTD 'translation layers'.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/list.h>
+#include <linux/fs.h>
+#include <linux/mtd/blktrans.h>
+#include <linux/mtd/mtd.h>
+#include <linux/blkdev.h>
+#include <linux/blkpg.h>
+#include <linux/spinlock.h>
+#include <linux/hdreg.h>
+#include <linux/init.h>
+#include <linux/mutex.h>
+#include <asm/uaccess.h>
+
+static LIST_HEAD(blktrans_majors);
+
+extern struct mutex mtd_table_mutex;
+extern struct mtd_info *mtd_table[];
+
+struct mtd_blkcore_priv {
+ struct completion thread_dead;
+ int exiting;
+ wait_queue_head_t thread_wq;
+ struct request_queue *rq;
+ spinlock_t queue_lock;
+};
+
+static int do_blktrans_request(struct mtd_blktrans_ops *tr,
+ struct mtd_blktrans_dev *dev,
+ struct request *req)
+{
+ unsigned long block, nsect;
+ char *buf;
+
+ block = req->sector;
+ nsect = req->current_nr_sectors;
+ buf = req->buffer;
+
+ if (!(req->flags & REQ_CMD))
+ return 0;
+
+ if (block + nsect > get_capacity(req->rq_disk))
+ return 0;
+
+ switch(rq_data_dir(req)) {
+ case READ:
+ for (; nsect > 0; nsect--, block++, buf += 512)
+ if (tr->readsect(dev, block, buf))
+ return 0;
+ return 1;
+
+ case WRITE:
+ if (!tr->writesect)
+ return 0;
+
+ for (; nsect > 0; nsect--, block++, buf += 512)
+ if (tr->writesect(dev, block, buf))
+ return 0;
+ return 1;
+
+ default:
+ printk(KERN_NOTICE "Unknown request %ld\n", rq_data_dir(req));
+ return 0;
+ }
+}
+
+static int mtd_blktrans_thread(void *arg)
+{
+ struct mtd_blktrans_ops *tr = arg;
+ struct request_queue *rq = tr->blkcore_priv->rq;
+
+ /* we might get involved when memory gets low, so use PF_MEMALLOC */
+ current->flags |= PF_MEMALLOC | PF_NOFREEZE;
+
+ daemonize("%sd", tr->name);
+
+ /* daemonize() doesn't do this for us since some kernel threads
+ actually want to deal with signals. We can't just call
+ exit_sighand() since that'll cause an oops when we finally
+ do exit. */
+ spin_lock_irq(&current->sighand->siglock);
+ sigfillset(&current->blocked);
+ recalc_sigpending();
+ spin_unlock_irq(&current->sighand->siglock);
+
+ spin_lock_irq(rq->queue_lock);
+
+ while (!tr->blkcore_priv->exiting) {
+ struct request *req;
+ struct mtd_blktrans_dev *dev;
+ int res = 0;
+ DECLARE_WAITQUEUE(wait, current);
+
+ req = elv_next_request(rq);
+
+ if (!req) {
+ add_wait_queue(&tr->blkcore_priv->thread_wq, &wait);
+ set_current_state(TASK_INTERRUPTIBLE);
+
+ spin_unlock_irq(rq->queue_lock);
+
+ schedule();
+ remove_wait_queue(&tr->blkcore_priv->thread_wq, &wait);
+
+ spin_lock_irq(rq->queue_lock);
+
+ continue;
+ }
+
+ dev = req->rq_disk->private_data;
+ tr = dev->tr;
+
+ spin_unlock_irq(rq->queue_lock);
+
+ mutex_lock(&dev->lock);
+ res = do_blktrans_request(tr, dev, req);
+ mutex_unlock(&dev->lock);
+
+ spin_lock_irq(rq->queue_lock);
+
+ end_request(req, res);
+ }
+ spin_unlock_irq(rq->queue_lock);
+
+ complete_and_exit(&tr->blkcore_priv->thread_dead, 0);
+}
+
+static void mtd_blktrans_request(struct request_queue *rq)
+{
+ struct mtd_blktrans_ops *tr = rq->queuedata;
+ wake_up(&tr->blkcore_priv->thread_wq);
+}
+
+
+static int blktrans_open(struct inode *i, struct file *f)
+{
+ struct mtd_blktrans_dev *dev;
+ struct mtd_blktrans_ops *tr;
+ int ret = -ENODEV;
+
+ dev = i->i_bdev->bd_disk->private_data;
+ tr = dev->tr;
+
+ if (!try_module_get(dev->mtd->owner))
+ goto out;
+
+ if (!try_module_get(tr->owner))
+ goto out_tr;
+
+ /* FIXME: Locking. A hot pluggable device can go away
+ (del_mtd_device can be called for it) without its module
+ being unloaded. */
+ dev->mtd->usecount++;
+
+ ret = 0;
+ if (tr->open && (ret = tr->open(dev))) {
+ dev->mtd->usecount--;
+ module_put(dev->mtd->owner);
+ out_tr:
+ module_put(tr->owner);
+ }
+ out:
+ return ret;
+}
+
+static int blktrans_release(struct inode *i, struct file *f)
+{
+ struct mtd_blktrans_dev *dev;
+ struct mtd_blktrans_ops *tr;
+ int ret = 0;
+
+ dev = i->i_bdev->bd_disk->private_data;
+ tr = dev->tr;
+
+ if (tr->release)
+ ret = tr->release(dev);
+
+ if (!ret) {
+ dev->mtd->usecount--;
+ module_put(dev->mtd->owner);
+ module_put(tr->owner);
+ }
+
+ return ret;
+}
+
+
+static int blktrans_ioctl(struct inode *inode, struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ struct mtd_blktrans_dev *dev = inode->i_bdev->bd_disk->private_data;
+ struct mtd_blktrans_ops *tr = dev->tr;
+
+ switch (cmd) {
+ case BLKFLSBUF:
+ if (tr->flush)
+ return tr->flush(dev);
+ /* The core code did the work, we had nothing to do. */
+ return 0;
+
+ case HDIO_GETGEO:
+ if (tr->getgeo) {
+ struct hd_geometry g;
+ int ret;
+
+ memset(&g, 0, sizeof(g));
+ ret = tr->getgeo(dev, &g);
+ if (ret)
+ return ret;
+
+ g.start = get_start_sect(inode->i_bdev);
+ if (copy_to_user((void __user *)arg, &g, sizeof(g)))
+ return -EFAULT;
+ return 0;
+ } /* else */
+ default:
+ return -ENOTTY;
+ }
+}
+
+struct block_device_operations mtd_blktrans_ops = {
+ .owner = THIS_MODULE,
+ .open = blktrans_open,
+ .release = blktrans_release,
+ .ioctl = blktrans_ioctl,
+};
+
+int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
+{
+ struct mtd_blktrans_ops *tr = new->tr;
+ struct list_head *this;
+ int last_devnum = -1;
+ struct gendisk *gd;
+
+ if (!!mutex_trylock(&mtd_table_mutex)) {
+ mutex_unlock(&mtd_table_mutex);
+ BUG();
+ }
+
+ list_for_each(this, &tr->devs) {
+ struct mtd_blktrans_dev *d = list_entry(this, struct mtd_blktrans_dev, list);
+ if (new->devnum == -1) {
+ /* Use first free number */
+ if (d->devnum != last_devnum+1) {
+ /* Found a free devnum. Plug it in here */
+ new->devnum = last_devnum+1;
+ list_add_tail(&new->list, &d->list);
+ goto added;
+ }
+ } else if (d->devnum == new->devnum) {
+ /* Required number taken */
+ return -EBUSY;
+ } else if (d->devnum > new->devnum) {
+ /* Required number was free */
+ list_add_tail(&new->list, &d->list);
+ goto added;
+ }
+ last_devnum = d->devnum;
+ }
+ if (new->devnum == -1)
+ new->devnum = last_devnum+1;
+
+ if ((new->devnum << tr->part_bits) > 256) {
+ return -EBUSY;
+ }
+
+ mutex_init(&new->lock);
+ list_add_tail(&new->list, &tr->devs);
+ added:
+ if (!tr->writesect)
+ new->readonly = 1;
+
+ gd = alloc_disk(1 << tr->part_bits);
+ if (!gd) {
+ list_del(&new->list);
+ return -ENOMEM;
+ }
+ gd->major = tr->major;
+ gd->first_minor = (new->devnum) << tr->part_bits;
+ gd->fops = &mtd_blktrans_ops;
+
+ if (tr->part_bits)
+ if (new->devnum < 26)
+ snprintf(gd->disk_name, sizeof(gd->disk_name),
+ "%s%c", tr->name, 'a' + new->devnum);
+ else
+ snprintf(gd->disk_name, sizeof(gd->disk_name),
+ "%s%c%c", tr->name,
+ 'a' - 1 + new->devnum / 26,
+ 'a' + new->devnum % 26);
+ else
+ snprintf(gd->disk_name, sizeof(gd->disk_name),
+ "%s%d", tr->name, new->devnum);
+
+ /* 2.5 has capacity in units of 512 bytes while still
+ having BLOCK_SIZE_BITS set to 10. Just to keep us amused. */
+ set_capacity(gd, (new->size * new->blksize) >> 9);
+
+ gd->private_data = new;
+ new->blkcore_priv = gd;
+ gd->queue = tr->blkcore_priv->rq;
+
+ if (new->readonly)
+ set_disk_ro(gd, 1);
+
+ add_disk(gd);
+
+ return 0;
+}
+
+int del_mtd_blktrans_dev(struct mtd_blktrans_dev *old)
+{
+ if (!!mutex_trylock(&mtd_table_mutex)) {
+ mutex_unlock(&mtd_table_mutex);
+ BUG();
+ }
+
+ list_del(&old->list);
+
+ del_gendisk(old->blkcore_priv);
+ put_disk(old->blkcore_priv);
+
+ return 0;
+}
+
+static void blktrans_notify_remove(struct mtd_info *mtd)
+{
+ struct list_head *this, *this2, *next;
+
+ list_for_each(this, &blktrans_majors) {
+ struct mtd_blktrans_ops *tr = list_entry(this, struct mtd_blktrans_ops, list);
+
+ list_for_each_safe(this2, next, &tr->devs) {
+ struct mtd_blktrans_dev *dev = list_entry(this2, struct mtd_blktrans_dev, list);
+
+ if (dev->mtd == mtd)
+ tr->remove_dev(dev);
+ }
+ }
+}
+
+static void blktrans_notify_add(struct mtd_info *mtd)
+{
+ struct list_head *this;
+
+ if (mtd->type == MTD_ABSENT)
+ return;
+
+ list_for_each(this, &blktrans_majors) {
+ struct mtd_blktrans_ops *tr = list_entry(this, struct mtd_blktrans_ops, list);
+
+ tr->add_mtd(tr, mtd);
+ }
+
+}
+
+static struct mtd_notifier blktrans_notifier = {
+ .add = blktrans_notify_add,
+ .remove = blktrans_notify_remove,
+};
+
+int register_mtd_blktrans(struct mtd_blktrans_ops *tr)
+{
+ int ret, i;
+
+ /* Register the notifier if/when the first device type is
+ registered, to prevent the link/init ordering from fucking
+ us over. */
+ if (!blktrans_notifier.list.next)
+ register_mtd_user(&blktrans_notifier);
+
+ tr->blkcore_priv = kmalloc(sizeof(*tr->blkcore_priv), GFP_KERNEL);
+ if (!tr->blkcore_priv)
+ return -ENOMEM;
+
+ memset(tr->blkcore_priv, 0, sizeof(*tr->blkcore_priv));
+
+ mutex_lock(&mtd_table_mutex);
+
+ ret = register_blkdev(tr->major, tr->name);
+ if (ret) {
+ printk(KERN_WARNING "Unable to register %s block device on major %d: %d\n",
+ tr->name, tr->major, ret);
+ kfree(tr->blkcore_priv);
+ mutex_unlock(&mtd_table_mutex);
+ return ret;
+ }
+ spin_lock_init(&tr->blkcore_priv->queue_lock);
+ init_completion(&tr->blkcore_priv->thread_dead);
+ init_waitqueue_head(&tr->blkcore_priv->thread_wq);
+
+ tr->blkcore_priv->rq = blk_init_queue(mtd_blktrans_request, &tr->blkcore_priv->queue_lock);
+ if (!tr->blkcore_priv->rq) {
+ unregister_blkdev(tr->major, tr->name);
+ kfree(tr->blkcore_priv);
+ mutex_unlock(&mtd_table_mutex);
+ return -ENOMEM;
+ }
+
+ tr->blkcore_priv->rq->queuedata = tr;
+
+ ret = kernel_thread(mtd_blktrans_thread, tr, CLONE_KERNEL);
+ if (ret < 0) {
+ blk_cleanup_queue(tr->blkcore_priv->rq);
+ unregister_blkdev(tr->major, tr->name);
+ kfree(tr->blkcore_priv);
+ mutex_unlock(&mtd_table_mutex);
+ return ret;
+ }
+
+ INIT_LIST_HEAD(&tr->devs);
+ list_add(&tr->list, &blktrans_majors);
+
+ for (i=0; i<MAX_MTD_DEVICES; i++) {
+ if (mtd_table[i] && mtd_table[i]->type != MTD_ABSENT)
+ tr->add_mtd(tr, mtd_table[i]);
+ }
+
+ mutex_unlock(&mtd_table_mutex);
+
+ return 0;
+}
+
+int deregister_mtd_blktrans(struct mtd_blktrans_ops *tr)
+{
+ struct list_head *this, *next;
+
+ mutex_lock(&mtd_table_mutex);
+
+ /* Clean up the kernel thread */
+ tr->blkcore_priv->exiting = 1;
+ wake_up(&tr->blkcore_priv->thread_wq);
+ wait_for_completion(&tr->blkcore_priv->thread_dead);
+
+ /* Remove it from the list of active majors */
+ list_del(&tr->list);
+
+ list_for_each_safe(this, next, &tr->devs) {
+ struct mtd_blktrans_dev *dev = list_entry(this, struct mtd_blktrans_dev, list);
+ tr->remove_dev(dev);
+ }
+
+ blk_cleanup_queue(tr->blkcore_priv->rq);
+ unregister_blkdev(tr->major, tr->name);
+
+ mutex_unlock(&mtd_table_mutex);
+
+ kfree(tr->blkcore_priv);
+
+ if (!list_empty(&tr->devs))
+ BUG();
+ return 0;
+}
+
+static void __exit mtd_blktrans_exit(void)
+{
+ /* No race here -- if someone's currently in register_mtd_blktrans
+ we're screwed anyway. */
+ if (blktrans_notifier.list.next)
+ unregister_mtd_user(&blktrans_notifier);
+}
+
+module_exit(mtd_blktrans_exit);
+
+EXPORT_SYMBOL_GPL(register_mtd_blktrans);
+EXPORT_SYMBOL_GPL(deregister_mtd_blktrans);
+EXPORT_SYMBOL_GPL(add_mtd_blktrans_dev);
+EXPORT_SYMBOL_GPL(del_mtd_blktrans_dev);
+
+MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Common interface to block layer for MTD 'translation layers'");
diff --git a/linux-2.4.x/drivers/mtd/mtdblock.c b/linux-2.4.x/drivers/mtd/mtdblock.c
index fdea52d..3605286 100644
--- a/linux-2.4.x/drivers/mtd/mtdblock.c
+++ b/linux-2.4.x/drivers/mtd/mtdblock.c
@@ -1,76 +1,40 @@
-/*
+/*
* Direct MTD block device access
*
- * $Id: mtdblock.c,v 1.51 2001/11/20 11:42:33 dwmw2 Exp $
+ * $Id: mtdblock.c,v 1.69 2006/03/29 08:26:27 dwmw2 Exp $
*
- * 02-nov-2000 Nicolas Pitre Added read-modify-write with cache
+ * (C) 2000-2003 Nicolas Pitre <nico@cam.org>
+ * (C) 1999-2003 David Woodhouse <dwmw2@infradead.org>
*/
#include <linux/config.h>
-#include <linux/types.h>
-#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/init.h>
#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/sched.h>
#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/vmalloc.h>
+
#include <linux/mtd/mtd.h>
-#include <linux/mtd/compatmac.h>
-
-#define MAJOR_NR MTD_BLOCK_MAJOR
-#define DEVICE_NAME "mtdblock"
-#define DEVICE_REQUEST mtdblock_request
-#define DEVICE_NR(device) (device)
-#define DEVICE_ON(device)
-#define DEVICE_OFF(device)
-#define DEVICE_NO_RANDOM
-#include <linux/blk.h>
-/* for old kernels... */
-#ifndef QUEUE_EMPTY
-#define QUEUE_EMPTY (!CURRENT)
-#endif
-#if LINUX_VERSION_CODE < 0x20300
-#define QUEUE_PLUGGED (blk_dev[MAJOR_NR].plug_tq.sync)
-#else
-#define QUEUE_PLUGGED (blk_dev[MAJOR_NR].request_queue.plugged)
-#endif
-
-#ifdef CONFIG_DEVFS_FS
-#include <linux/devfs_fs_kernel.h>
-static void mtd_notify_add(struct mtd_info* mtd);
-static void mtd_notify_remove(struct mtd_info* mtd);
-static struct mtd_notifier notifier = {
- mtd_notify_add,
- mtd_notify_remove,
- NULL
-};
-static devfs_handle_t devfs_dir_handle = NULL;
-static devfs_handle_t devfs_rw_handle[MAX_MTD_DEVICES];
-#endif
+#include <linux/mtd/blktrans.h>
+#include <linux/mutex.h>
+
static struct mtdblk_dev {
- struct mtd_info *mtd; /* Locked */
+ struct mtd_info *mtd;
int count;
- struct semaphore cache_sem;
+ struct mutex cache_mutex;
unsigned char *cache_data;
unsigned long cache_offset;
unsigned int cache_size;
enum { STATE_EMPTY, STATE_CLEAN, STATE_DIRTY } cache_state;
} *mtdblks[MAX_MTD_DEVICES];
-static spinlock_t mtdblks_lock;
-
-static int mtd_sizes[MAX_MTD_DEVICES];
-static int mtd_blksizes[MAX_MTD_DEVICES];
-
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,14)
-#define BLK_INC_USE_COUNT MOD_INC_USE_COUNT
-#define BLK_DEC_USE_COUNT MOD_DEC_USE_COUNT
-#else
-#define BLK_INC_USE_COUNT do {} while(0)
-#define BLK_DEC_USE_COUNT do {} while(0)
-#endif
-
/*
* Cache stuff...
- *
+ *
* Since typical flash erasable sectors are much larger than what Linux's
* buffer cache can handle, we must implement read-modify-write on flash
* sectors for each block write requests. To avoid over-erasing flash sectors
@@ -84,7 +48,7 @@ static void erase_callback(struct erase_info *done)
wake_up(wait_q);
}
-static int erase_write (struct mtd_info *mtd, unsigned long pos,
+static int erase_write (struct mtd_info *mtd, unsigned long pos,
int len, const char *buf)
{
struct erase_info erase;
@@ -142,18 +106,18 @@ static int write_cached_data (struct mtdblk_dev *mtdblk)
return 0;
DEBUG(MTD_DEBUG_LEVEL2, "mtdblock: writing cached data for \"%s\" "
- "at 0x%lx, size 0x%x\n", mtd->name,
+ "at 0x%lx, size 0x%x\n", mtd->name,
mtdblk->cache_offset, mtdblk->cache_size);
-
- ret = erase_write (mtd, mtdblk->cache_offset,
+
+ ret = erase_write (mtd, mtdblk->cache_offset,
mtdblk->cache_size, mtdblk->cache_data);
if (ret)
return ret;
/*
- * Here we could argably set the cache state to STATE_CLEAN.
- * However this could lead to inconsistency since we will not
- * be notified if this content is altered on the flash by other
+ * Here we could argubly set the cache state to STATE_CLEAN.
+ * However this could lead to inconsistency since we will not
+ * be notified if this content is altered on the flash by other
* means. Let's declare it empty and leave buffering tasks to
* the buffer cache instead.
*/
@@ -162,7 +126,7 @@ static int write_cached_data (struct mtdblk_dev *mtdblk)
}
-static int do_cached_write (struct mtdblk_dev *mtdblk, unsigned long pos,
+static int do_cached_write (struct mtdblk_dev *mtdblk, unsigned long pos,
int len, const char *buf)
{
struct mtd_info *mtd = mtdblk->mtd;
@@ -172,7 +136,7 @@ static int do_cached_write (struct mtdblk_dev *mtdblk, unsigned long pos,
DEBUG(MTD_DEBUG_LEVEL2, "mtdblock: write on \"%s\" at 0x%lx, size 0x%x\n",
mtd->name, pos, len);
-
+
if (!sect_size)
return MTD_WRITE (mtd, pos, len, &retlen, buf);
@@ -180,11 +144,11 @@ static int do_cached_write (struct mtdblk_dev *mtdblk, unsigned long pos,
unsigned long sect_start = (pos/sect_size)*sect_size;
unsigned int offset = pos - sect_start;
unsigned int size = sect_size - offset;
- if( size > len )
+ if( size > len )
size = len;
if (size == sect_size) {
- /*
+ /*
* We are covering a whole sector. Thus there is no
* need to bother with the cache while it may still be
* useful for other partial writes.
@@ -198,7 +162,7 @@ static int do_cached_write (struct mtdblk_dev *mtdblk, unsigned long pos,
if (mtdblk->cache_state == STATE_DIRTY &&
mtdblk->cache_offset != sect_start) {
ret = write_cached_data(mtdblk);
- if (ret)
+ if (ret)
return ret;
}
@@ -231,7 +195,7 @@ static int do_cached_write (struct mtdblk_dev *mtdblk, unsigned long pos,
}
-static int do_cached_read (struct mtdblk_dev *mtdblk, unsigned long pos,
+static int do_cached_read (struct mtdblk_dev *mtdblk, unsigned long pos,
int len, char *buf)
{
struct mtd_info *mtd = mtdblk->mtd;
@@ -239,9 +203,9 @@ static int do_cached_read (struct mtdblk_dev *mtdblk, unsigned long pos,
size_t retlen;
int ret;
- DEBUG(MTD_DEBUG_LEVEL2, "mtdblock: read on \"%s\" at 0x%lx, size 0x%x\n",
+ DEBUG(MTD_DEBUG_LEVEL2, "mtdblock: read on \"%s\" at 0x%lx, size 0x%x\n",
mtd->name, pos, len);
-
+
if (!sect_size)
return MTD_READ (mtd, pos, len, &retlen, buf);
@@ -249,7 +213,7 @@ static int do_cached_read (struct mtdblk_dev *mtdblk, unsigned long pos,
unsigned long sect_start = (pos/sect_size)*sect_size;
unsigned int offset = pos - sect_start;
unsigned int size = sect_size - offset;
- if (size > len)
+ if (size > len)
size = len;
/*
@@ -277,433 +241,152 @@ static int do_cached_read (struct mtdblk_dev *mtdblk, unsigned long pos,
return 0;
}
+static int mtdblock_readsect(struct mtd_blktrans_dev *dev,
+ unsigned long block, char *buf)
+{
+ struct mtdblk_dev *mtdblk = mtdblks[dev->devnum];
+ return do_cached_read(mtdblk, block<<9, 512, buf);
+}
+static int mtdblock_writesect(struct mtd_blktrans_dev *dev,
+ unsigned long block, char *buf)
+{
+ struct mtdblk_dev *mtdblk = mtdblks[dev->devnum];
+ if (unlikely(!mtdblk->cache_data && mtdblk->cache_size)) {
+ mtdblk->cache_data = vmalloc(mtdblk->mtd->erasesize);
+ if (!mtdblk->cache_data)
+ return -EINTR;
+ /* -EINTR is not really correct, but it is the best match
+ * documented in man 2 write for all cases. We could also
+ * return -EAGAIN sometimes, but why bother?
+ */
+ }
+ return do_cached_write(mtdblk, block<<9, 512, buf);
+}
-static int mtdblock_open(struct inode *inode, struct file *file)
+static int mtdblock_open(struct mtd_blktrans_dev *mbd)
{
struct mtdblk_dev *mtdblk;
- struct mtd_info *mtd;
- int dev;
+ struct mtd_info *mtd = mbd->mtd;
+ int dev = mbd->devnum;
DEBUG(MTD_DEBUG_LEVEL1,"mtdblock_open\n");
-
- if (!inode)
- return -EINVAL;
-
- dev = MINOR(inode->i_rdev);
- if (dev >= MAX_MTD_DEVICES)
- return -EINVAL;
-
- BLK_INC_USE_COUNT;
-
- mtd = get_mtd_device(NULL, dev);
- if (!mtd)
- return -ENODEV;
- if (MTD_ABSENT == mtd->type) {
- put_mtd_device(mtd);
- BLK_DEC_USE_COUNT;
- return -ENODEV;
- }
-
- spin_lock(&mtdblks_lock);
- /* If it's already open, no need to piss about. */
if (mtdblks[dev]) {
mtdblks[dev]->count++;
- spin_unlock(&mtdblks_lock);
return 0;
}
-
- /* OK, it's not open. Try to find it */
-
- /* First we have to drop the lock, because we have to
- to things which might sleep.
- */
- spin_unlock(&mtdblks_lock);
+ /* OK, it's not open. Create cache info for it */
mtdblk = kmalloc(sizeof(struct mtdblk_dev), GFP_KERNEL);
- if (!mtdblk) {
- put_mtd_device(mtd);
- BLK_DEC_USE_COUNT;
+ if (!mtdblk)
return -ENOMEM;
- }
+
memset(mtdblk, 0, sizeof(*mtdblk));
mtdblk->count = 1;
mtdblk->mtd = mtd;
- init_MUTEX (&mtdblk->cache_sem);
+ mutex_init(&mtdblk->cache_mutex);
mtdblk->cache_state = STATE_EMPTY;
if ((mtdblk->mtd->flags & MTD_CAP_RAM) != MTD_CAP_RAM &&
mtdblk->mtd->erasesize) {
mtdblk->cache_size = mtdblk->mtd->erasesize;
- mtdblk->cache_data = vmalloc(mtdblk->mtd->erasesize);
- if (!mtdblk->cache_data) {
- put_mtd_device(mtdblk->mtd);
- kfree(mtdblk);
- BLK_DEC_USE_COUNT;
- return -ENOMEM;
- }
- }
-
- /* OK, we've created a new one. Add it to the list. */
-
- spin_lock(&mtdblks_lock);
-
- if (mtdblks[dev]) {
- /* Another CPU made one at the same time as us. */
- mtdblks[dev]->count++;
- spin_unlock(&mtdblks_lock);
- put_mtd_device(mtdblk->mtd);
- vfree(mtdblk->cache_data);
- kfree(mtdblk);
- return 0;
+ mtdblk->cache_data = NULL;
}
mtdblks[dev] = mtdblk;
- mtd_sizes[dev] = mtdblk->mtd->size/1024;
- if (mtdblk->mtd->erasesize)
- mtd_blksizes[dev] = mtdblk->mtd->erasesize;
- if (mtd_blksizes[dev] > PAGE_SIZE)
- mtd_blksizes[dev] = PAGE_SIZE;
- set_device_ro (inode->i_rdev, !(mtdblk->mtd->flags & MTD_WRITEABLE));
-
- spin_unlock(&mtdblks_lock);
-
+
DEBUG(MTD_DEBUG_LEVEL1, "ok\n");
return 0;
}
-static release_t mtdblock_release(struct inode *inode, struct file *file)
+static int mtdblock_release(struct mtd_blktrans_dev *mbd)
{
- int dev;
- struct mtdblk_dev *mtdblk;
- DEBUG(MTD_DEBUG_LEVEL1, "mtdblock_release\n");
-
- if (inode == NULL)
- release_return(-ENODEV);
+ int dev = mbd->devnum;
+ struct mtdblk_dev *mtdblk = mtdblks[dev];
- dev = MINOR(inode->i_rdev);
- mtdblk = mtdblks[dev];
+ DEBUG(MTD_DEBUG_LEVEL1, "mtdblock_release\n");
- down(&mtdblk->cache_sem);
+ mutex_lock(&mtdblk->cache_mutex);
write_cached_data(mtdblk);
- up(&mtdblk->cache_sem);
+ mutex_unlock(&mtdblk->cache_mutex);
- spin_lock(&mtdblks_lock);
if (!--mtdblk->count) {
/* It was the last usage. Free the device */
mtdblks[dev] = NULL;
- spin_unlock(&mtdblks_lock);
if (mtdblk->mtd->sync)
mtdblk->mtd->sync(mtdblk->mtd);
- put_mtd_device(mtdblk->mtd);
vfree(mtdblk->cache_data);
kfree(mtdblk);
- } else {
- spin_unlock(&mtdblks_lock);
}
-
DEBUG(MTD_DEBUG_LEVEL1, "ok\n");
- BLK_DEC_USE_COUNT;
- release_return(0);
-}
-
-
-/*
- * This is a special request_fn because it is executed in a process context
- * to be able to sleep independently of the caller. The io_request_lock
- * is held upon entry and exit.
- * The head of our request queue is considered active so there is no need
- * to dequeue requests before we are done.
- */
-static void handle_mtdblock_request(void)
-{
- struct request *req;
- struct mtdblk_dev *mtdblk;
- unsigned int res;
-
- for (;;) {
- INIT_REQUEST;
- req = CURRENT;
- spin_unlock_irq(&io_request_lock);
- mtdblk = mtdblks[MINOR(req->rq_dev)];
- res = 0;
-
- if (MINOR(req->rq_dev) >= MAX_MTD_DEVICES)
- panic(__FUNCTION__": minor out of bound");
-
- if ((req->sector + req->current_nr_sectors) > (mtdblk->mtd->size >> 9))
- goto end_req;
-
- // Handle the request
- switch (req->cmd)
- {
- int err;
-
- case READ:
- down(&mtdblk->cache_sem);
- err = do_cached_read (mtdblk, req->sector << 9,
- req->current_nr_sectors << 9,
- req->buffer);
- up(&mtdblk->cache_sem);
- if (!err)
- res = 1;
- break;
-
- case WRITE:
- // Read only device
- if ( !(mtdblk->mtd->flags & MTD_WRITEABLE) )
- break;
-
- // Do the write
- down(&mtdblk->cache_sem);
- err = do_cached_write (mtdblk, req->sector << 9,
- req->current_nr_sectors << 9,
- req->buffer);
- up(&mtdblk->cache_sem);
- if (!err)
- res = 1;
- break;
- }
-
-end_req:
- spin_lock_irq(&io_request_lock);
- end_request(res);
- }
+ return 0;
}
-static volatile int leaving = 0;
-static DECLARE_MUTEX_LOCKED(thread_sem);
-static DECLARE_WAIT_QUEUE_HEAD(thr_wq);
-
-int mtdblock_thread(void *dummy)
+static int mtdblock_flush(struct mtd_blktrans_dev *dev)
{
- struct task_struct *tsk = current;
- DECLARE_WAITQUEUE(wait, tsk);
-
- tsk->session = 1;
- tsk->pgrp = 1;
- /* we might get involved when memory gets low, so use PF_MEMALLOC */
- tsk->flags |= PF_MEMALLOC;
- strcpy(tsk->comm, "mtdblockd");
- tsk->tty = NULL;
- spin_lock_irq(&tsk->sigmask_lock);
- sigfillset(&tsk->blocked);
- recalc_sigpending(tsk);
- spin_unlock_irq(&tsk->sigmask_lock);
- exit_mm(tsk);
- exit_files(tsk);
- exit_sighand(tsk);
- exit_fs(tsk);
-
- while (!leaving) {
- add_wait_queue(&thr_wq, &wait);
- set_current_state(TASK_INTERRUPTIBLE);
- spin_lock_irq(&io_request_lock);
- if (QUEUE_EMPTY || QUEUE_PLUGGED) {
- spin_unlock_irq(&io_request_lock);
- schedule();
- remove_wait_queue(&thr_wq, &wait);
- } else {
- remove_wait_queue(&thr_wq, &wait);
- set_current_state(TASK_RUNNING);
- handle_mtdblock_request();
- spin_unlock_irq(&io_request_lock);
- }
- }
+ struct mtdblk_dev *mtdblk = mtdblks[dev->devnum];
+
+ mutex_lock(&mtdblk->cache_mutex);
+ write_cached_data(mtdblk);
+ mutex_unlock(&mtdblk->cache_mutex);
- up(&thread_sem);
+ if (mtdblk->mtd->sync)
+ mtdblk->mtd->sync(mtdblk->mtd);
return 0;
}
-#if LINUX_VERSION_CODE < 0x20300
-#define RQFUNC_ARG void
-#else
-#define RQFUNC_ARG request_queue_t *q
-#endif
-
-static void mtdblock_request(RQFUNC_ARG)
+static void mtdblock_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
{
- /* Don't do anything, except wake the thread if necessary */
- wake_up(&thr_wq);
-}
+ struct mtd_blktrans_dev *dev = kmalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return;
-static int mtdblock_ioctl(struct inode * inode, struct file * file,
- unsigned int cmd, unsigned long arg)
-{
- struct mtdblk_dev *mtdblk;
+ memset(dev, 0, sizeof(*dev));
- mtdblk = mtdblks[MINOR(inode->i_rdev)];
+ dev->mtd = mtd;
+ dev->devnum = mtd->index;
+ dev->blksize = 512;
+ dev->size = mtd->size >> 9;
+ dev->tr = tr;
-#ifdef PARANOIA
- if (!mtdblk)
- BUG();
-#endif
-
- switch (cmd) {
- case BLKGETSIZE: /* Return device size */
- return put_user((mtdblk->mtd->size >> 9), (unsigned long *) arg);
-
-#ifdef BLKGETSIZE64
- case BLKGETSIZE64:
- return put_user((u64)mtdblk->mtd->size, (u64 *)arg);
-#endif
-
- case BLKFLSBUF:
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,2,0)
- if(!capable(CAP_SYS_ADMIN))
- return -EACCES;
-#endif
- fsync_dev(inode->i_rdev);
- invalidate_buffers(inode->i_rdev);
- down(&mtdblk->cache_sem);
- write_cached_data(mtdblk);
- up(&mtdblk->cache_sem);
- if (mtdblk->mtd->sync)
- mtdblk->mtd->sync(mtdblk->mtd);
- return 0;
+ if (!(mtd->flags & MTD_WRITEABLE))
+ dev->readonly = 1;
- default:
- return -EINVAL;
- }
+ add_mtd_blktrans_dev(dev);
}
-
-#ifdef MAGIC_ROM_PTR
-static int
-mtdblock_romptr(kdev_t dev, struct vm_area_struct * vma)
+static void mtdblock_remove_dev(struct mtd_blktrans_dev *dev)
{
- struct mtd_info *mtd;
- u_char *ptr;
- size_t len;
-
- mtd = __get_mtd_device(NULL, MINOR(dev));
-
- if (!mtd->point)
- return -ENOSYS; /* Can't do it, No function to point to correct addr */
-
- if ((*mtd->point)(mtd,vma->vm_offset,vma->vm_end-vma->vm_start,&len,&ptr) != 0)
- return -ENOSYS;
-
- vma->vm_start = (unsigned long) ptr;
- vma->vm_end = vma->vm_start + len;
- return 0;
+ del_mtd_blktrans_dev(dev);
+ kfree(dev);
}
-#endif
-
-#if LINUX_VERSION_CODE < 0x20326
-static struct file_operations mtd_fops =
-{
- open: mtdblock_open,
- ioctl: mtdblock_ioctl,
- release: mtdblock_release,
- read: block_read,
-#ifdef MAGIC_ROM_PTR
- romptr: mtdblock_romptr,
-#endif
- write: block_write
-};
-#else
-static struct block_device_operations mtd_fops =
-{
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,14)
- owner: THIS_MODULE,
-#endif
- open: mtdblock_open,
- release: mtdblock_release,
-#ifdef MAGIC_ROM_PTR
- romptr: mtdblock_romptr,
-#endif
- ioctl: mtdblock_ioctl
+static struct mtd_blktrans_ops mtdblock_tr = {
+ .name = "mtdblock",
+ .major = 31,
+ .part_bits = 0,
+ .open = mtdblock_open,
+ .flush = mtdblock_flush,
+ .release = mtdblock_release,
+ .readsect = mtdblock_readsect,
+ .writesect = mtdblock_writesect,
+ .add_mtd = mtdblock_add_mtd,
+ .remove_dev = mtdblock_remove_dev,
+ .owner = THIS_MODULE,
};
-#endif
-
-#ifdef CONFIG_DEVFS_FS
-/* Notification that a new device has been added. Create the devfs entry for
- * it. */
-
-static void mtd_notify_add(struct mtd_info* mtd)
-{
- char name[8];
- if (!mtd || mtd->type == MTD_ABSENT)
- return;
-
- sprintf(name, "%d", mtd->index);
- devfs_rw_handle[mtd->index] = devfs_register(devfs_dir_handle, name,
- DEVFS_FL_DEFAULT, MTD_BLOCK_MAJOR, mtd->index,
- S_IFBLK | S_IRUGO | S_IWUGO,
- &mtd_fops, NULL);
-}
-
-static void mtd_notify_remove(struct mtd_info* mtd)
-{
- if (!mtd || mtd->type == MTD_ABSENT)
- return;
-
- devfs_unregister(devfs_rw_handle[mtd->index]);
-}
-#endif
-
-int __init init_mtdblock(void)
+static int __init init_mtdblock(void)
{
- int i;
-
- spin_lock_init(&mtdblks_lock);
-#ifdef CONFIG_DEVFS_FS
- if (devfs_register_blkdev(MTD_BLOCK_MAJOR, DEVICE_NAME, &mtd_fops))
- {
- printk(KERN_NOTICE "Can't allocate major number %d for Memory Technology Devices.\n",
- MTD_BLOCK_MAJOR);
- return -EAGAIN;
- }
-
- devfs_dir_handle = devfs_mk_dir(NULL, DEVICE_NAME, NULL);
- register_mtd_user(&notifier);
-#else
- if (register_blkdev(MAJOR_NR,DEVICE_NAME,&mtd_fops)) {
- printk(KERN_NOTICE "Can't allocate major number %d for Memory Technology Devices.\n",
- MTD_BLOCK_MAJOR);
- return -EAGAIN;
- }
-#endif
- DEBUG(MTD_DEBUG_LEVEL3,
- "init_mtdblock: allocated major number %d.\n", MTD_BLOCK_MAJOR);
-
- /* We fill it in at open() time. */
- for (i=0; i< MAX_MTD_DEVICES; i++) {
- mtd_sizes[i] = 0;
- mtd_blksizes[i] = BLOCK_SIZE;
- }
- init_waitqueue_head(&thr_wq);
- /* Allow the block size to default to BLOCK_SIZE. */
- blksize_size[MAJOR_NR] = mtd_blksizes;
- blk_size[MAJOR_NR] = mtd_sizes;
-
- blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), &mtdblock_request);
- kernel_thread (mtdblock_thread, NULL, CLONE_FS|CLONE_FILES|CLONE_SIGHAND);
- return 0;
+ return register_mtd_blktrans(&mtdblock_tr);
}
static void __exit cleanup_mtdblock(void)
{
- leaving = 1;
- wake_up(&thr_wq);
- down(&thread_sem);
-#ifdef CONFIG_DEVFS_FS
- unregister_mtd_user(&notifier);
- devfs_unregister(devfs_dir_handle);
- devfs_unregister_blkdev(MTD_BLOCK_MAJOR, DEVICE_NAME);
-#else
- unregister_blkdev(MAJOR_NR,DEVICE_NAME);
-#endif
- blk_cleanup_queue(BLK_DEFAULT_QUEUE(MAJOR_NR));
- blksize_size[MAJOR_NR] = NULL;
- blk_size[MAJOR_NR] = NULL;
+ deregister_mtd_blktrans(&mtdblock_tr);
}
module_init(init_mtdblock);
diff --git a/linux-2.4.x/drivers/mtd/mtdblock_ro.c b/linux-2.4.x/drivers/mtd/mtdblock_ro.c
index a66b852..0c830ba 100644
--- a/linux-2.4.x/drivers/mtd/mtdblock_ro.c
+++ b/linux-2.4.x/drivers/mtd/mtdblock_ro.c
@@ -1,333 +1,87 @@
/*
- * $Id: mtdblock_ro.c,v 1.12 2001/11/20 11:42:33 dwmw2 Exp $
+ * $Id: mtdblock_ro.c,v 1.19 2004/11/16 18:28:59 dwmw2 Exp $
*
- * Read-only version of the mtdblock device, without the
- * read/erase/modify/writeback stuff
+ * (C) 2003 David Woodhouse <dwmw2@infradead.org>
+ *
+ * Simple read-only (writable only for RAM) mtdblock driver
*/
-#ifdef MTDBLOCK_DEBUG
-#define DEBUGLVL debug
-#endif
-
-
-#include <linux/module.h>
-#include <linux/types.h>
-
+#include <linux/init.h>
+#include <linux/slab.h>
#include <linux/mtd/mtd.h>
-#include <linux/mtd/compatmac.h>
-
-#define MAJOR_NR MTD_BLOCK_MAJOR
-#define DEVICE_NAME "mtdblock"
-#define DEVICE_REQUEST mtdblock_request
-#define DEVICE_NR(device) (device)
-#define DEVICE_ON(device)
-#define DEVICE_OFF(device)
-#define DEVICE_NO_RANDOM
-#include <linux/blk.h>
-
-#if LINUX_VERSION_CODE < 0x20300
-#define RQFUNC_ARG void
-#define blkdev_dequeue_request(req) do {CURRENT = req->next;} while (0)
-#else
-#define RQFUNC_ARG request_queue_t *q
-#endif
-
-#ifdef MTDBLOCK_DEBUG
-static int debug = MTDBLOCK_DEBUG;
-MODULE_PARM(debug, "i");
-#endif
+#include <linux/mtd/blktrans.h>
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,14)
-#define BLK_INC_USE_COUNT MOD_INC_USE_COUNT
-#define BLK_DEC_USE_COUNT MOD_DEC_USE_COUNT
-#else
-#define BLK_INC_USE_COUNT do {} while(0)
-#define BLK_DEC_USE_COUNT do {} while(0)
-#endif
-
-static int mtd_sizes[MAX_MTD_DEVICES];
-
-
-static int mtdblock_open(struct inode *inode, struct file *file)
+static int mtdblock_readsect(struct mtd_blktrans_dev *dev,
+ unsigned long block, char *buf)
{
- struct mtd_info *mtd = NULL;
-
- int dev;
-
- DEBUG(1,"mtdblock_open\n");
-
- if (inode == 0)
- return -EINVAL;
-
- dev = MINOR(inode->i_rdev);
-
- mtd = get_mtd_device(NULL, dev);
- if (!mtd)
- return -EINVAL;
- if (MTD_ABSENT == mtd->type) {
- put_mtd_device(mtd);
- return -EINVAL;
- }
-
- BLK_INC_USE_COUNT;
-
- mtd_sizes[dev] = mtd->size>>9;
-
- DEBUG(1, "ok\n");
+ size_t retlen;
+ if (dev->mtd->read(dev->mtd, (block * 512), 512, &retlen, buf))
+ return 1;
return 0;
}
-static release_t mtdblock_release(struct inode *inode, struct file *file)
-{
- int dev;
- struct mtd_info *mtd;
-
- DEBUG(1, "mtdblock_release\n");
-
- if (inode == NULL)
- release_return(-ENODEV);
-
- dev = MINOR(inode->i_rdev);
- mtd = __get_mtd_device(NULL, dev);
-
- if (!mtd) {
- printk(KERN_WARNING "MTD device is absent on mtd_release!\n");
- BLK_DEC_USE_COUNT;
- release_return(-ENODEV);
- }
-
- if (mtd->sync)
- mtd->sync(mtd);
-
- put_mtd_device(mtd);
-
- DEBUG(1, "ok\n");
-
- BLK_DEC_USE_COUNT;
- release_return(0);
-}
-
-
-static void mtdblock_request(RQFUNC_ARG)
+static int mtdblock_writesect(struct mtd_blktrans_dev *dev,
+ unsigned long block, char *buf)
{
- struct request *current_request;
- unsigned int res = 0;
- struct mtd_info *mtd;
-
- while (1)
- {
- /* Grab the Request and unlink it from the request list, INIT_REQUEST
- will execute a return if we are done. */
- INIT_REQUEST;
- current_request = CURRENT;
-
- if (MINOR(current_request->rq_dev) >= MAX_MTD_DEVICES)
- {
- printk("mtd: Unsupported device!\n");
- end_request(0);
- continue;
- }
-
- // Grab our MTD structure
+ size_t retlen;
- mtd = __get_mtd_device(NULL, MINOR(current_request->rq_dev));
- if (!mtd) {
- printk("MTD device %d doesn't appear to exist any more\n", CURRENT_DEV);
- end_request(0);
- }
-
- if (current_request->sector << 9 > mtd->size ||
- (current_request->sector + current_request->nr_sectors) << 9 > mtd->size)
- {
- printk("mtd: Attempt to read past end of device!\n");
- printk("size: %x, sector: %lx, nr_sectors %lx\n", mtd->size, current_request->sector, current_request->nr_sectors);
- end_request(0);
- continue;
- }
-
- /* Remove the request we are handling from the request list so nobody messes
- with it */
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,2,0)
- /* Now drop the lock that the ll_rw_blk functions grabbed for us
- and process the request. This is necessary due to the extreme time
- we spend processing it. */
- spin_unlock_irq(&io_request_lock);
-#endif
-
- // Handle the request
- switch (current_request->cmd)
- {
- size_t retlen;
-
- case READ:
- if (MTD_READ(mtd,current_request->sector<<9,
- current_request->nr_sectors << 9,
- &retlen, current_request->buffer) == 0)
- res = 1;
- else
- res = 0;
- break;
-
- case WRITE:
-
- /* printk("mtdblock_request WRITE sector=%d(%d)\n",current_request->sector,
- current_request->nr_sectors);
- */
-
- // Read only device
- if ((mtd->flags & MTD_CAP_RAM) == 0)
- {
- res = 0;
- break;
- }
-
- // Do the write
- if (MTD_WRITE(mtd,current_request->sector<<9,
- current_request->nr_sectors << 9,
- &retlen, current_request->buffer) == 0)
- res = 1;
- else
- res = 0;
- break;
-
- // Shouldn't happen
- default:
- printk("mtd: unknown request\n");
- break;
- }
-
- // Grab the lock and re-thread the item onto the linked list
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,2,0)
- spin_lock_irq(&io_request_lock);
-#endif
- end_request(res);
- }
+ if (dev->mtd->write(dev->mtd, (block * 512), 512, &retlen, buf))
+ return 1;
+ return 0;
}
-
-
-static int mtdblock_ioctl(struct inode * inode, struct file * file,
- unsigned int cmd, unsigned long arg)
+static void mtdblock_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
{
- struct mtd_info *mtd;
+ struct mtd_blktrans_dev *dev = kmalloc(sizeof(*dev), GFP_KERNEL);
- mtd = __get_mtd_device(NULL, MINOR(inode->i_rdev));
+ if (!dev)
+ return;
- if (!mtd) return -EINVAL;
+ memset(dev, 0, sizeof(*dev));
- switch (cmd) {
- case BLKGETSIZE: /* Return device size */
- return put_user((mtd->size >> 9), (unsigned long *) arg);
+ dev->mtd = mtd;
+ dev->devnum = mtd->index;
+ dev->blksize = 512;
+ dev->size = mtd->size >> 9;
+ dev->tr = tr;
+ if ((mtd->flags & (MTD_CLEAR_BITS|MTD_SET_BITS|MTD_WRITEABLE)) !=
+ (MTD_CLEAR_BITS|MTD_SET_BITS|MTD_WRITEABLE))
+ dev->readonly = 1;
-#ifdef BLKGETSIZE64
- case BLKGETSIZE64:
- return put_user((u64)mtd->size, (u64 *)arg);
-#endif
-
- case BLKFLSBUF:
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,2,0)
- if(!capable(CAP_SYS_ADMIN)) return -EACCES;
-#endif
- fsync_dev(inode->i_rdev);
- invalidate_buffers(inode->i_rdev);
- if (mtd->sync)
- mtd->sync(mtd);
- return 0;
-
- default:
- return -ENOTTY;
- }
+ add_mtd_blktrans_dev(dev);
}
-
-#ifdef MAGIC_ROM_PTR
-static int
-mtdblock_romptr(kdev_t dev, struct vm_area_struct * vma)
+static void mtdblock_remove_dev(struct mtd_blktrans_dev *dev)
{
- struct mtd_info *mtd;
- u_char *ptr;
- size_t len;
-
- mtd = __get_mtd_device(NULL, MINOR(dev));
-
- if (!mtd->point)
- return -ENOSYS; /* Can't do it, No function to point to correct addr */
-
- if ((*mtd->point)(mtd,vma->vm_offset,vma->vm_end-vma->vm_start,&len,&ptr) != 0)
- return -ENOSYS;
-
- vma->vm_start = (unsigned long) ptr;
- vma->vm_end = vma->vm_start + len;
- return 0;
+ del_mtd_blktrans_dev(dev);
+ kfree(dev);
}
-#endif
-
-#if LINUX_VERSION_CODE < 0x20326
-static struct file_operations mtd_fops =
-{
- open: mtdblock_open,
- ioctl: mtdblock_ioctl,
- release: mtdblock_release,
- read: block_read,
-#ifdef MAGIC_ROM_PTR
- romptr: mtdblock_romptr,
-#endif
- write: block_write
-};
-#else
-static struct block_device_operations mtd_fops =
-{
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,14)
- owner: THIS_MODULE,
-#endif
- open: mtdblock_open,
- release: mtdblock_release,
-#ifdef MAGIC_ROM_PTR
- romptr: mtdblock_romptr,
-#endif
- ioctl: mtdblock_ioctl
+static struct mtd_blktrans_ops mtdblock_tr = {
+ .name = "mtdblock",
+ .major = 31,
+ .part_bits = 0,
+ .readsect = mtdblock_readsect,
+ .writesect = mtdblock_writesect,
+ .add_mtd = mtdblock_add_mtd,
+ .remove_dev = mtdblock_remove_dev,
+ .owner = THIS_MODULE,
};
-#endif
-int __init init_mtdblock(void)
+static int __init mtdblock_init(void)
{
- int i;
-
- if (register_blkdev(MAJOR_NR,DEVICE_NAME,&mtd_fops)) {
- printk(KERN_NOTICE "Can't allocate major number %d for Memory Technology Devices.\n",
- MTD_BLOCK_MAJOR);
- return -EAGAIN;
- }
- DEBUG(MTD_DEBUG_LEVEL3,
- "init_mtdblock: allocated major number %d (read only).\n",
- MTD_BLOCK_MAJOR);
-
- /* We fill it in at open() time. */
- for (i=0; i< MAX_MTD_DEVICES; i++) {
- mtd_sizes[i] = 0;
- }
-
- /* Allow the block size to default to BLOCK_SIZE. */
- blksize_size[MAJOR_NR] = NULL;
- blk_size[MAJOR_NR] = mtd_sizes;
-
- blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), &mtdblock_request);
- return 0;
+ return register_mtd_blktrans(&mtdblock_tr);
}
-static void __exit cleanup_mtdblock(void)
+static void __exit mtdblock_exit(void)
{
- unregister_blkdev(MAJOR_NR,DEVICE_NAME);
- blksize_size[MAJOR_NR] = NULL;
- blk_cleanup_queue(BLK_DEFAULT_QUEUE(MAJOR_NR));
+ deregister_mtd_blktrans(&mtdblock_tr);
}
-module_init(init_mtdblock);
-module_exit(cleanup_mtdblock);
-
+module_init(mtdblock_init);
+module_exit(mtdblock_exit);
MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Erwin Authried <eauth@softsys.co.at> et al.");
+MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
MODULE_DESCRIPTION("Simple read-only block device emulation access to MTD devices");
diff --git a/linux-2.4.x/drivers/mtd/mtdchar.c b/linux-2.4.x/drivers/mtd/mtdchar.c
index 1780055..fdb7e66 100644
--- a/linux-2.4.x/drivers/mtd/mtdchar.c
+++ b/linux-2.4.x/drivers/mtd/mtdchar.c
@@ -1,66 +1,113 @@
/*
- * $Id: mtdchar.c,v 1.44 2001/10/02 15:05:11 dwmw2 Exp $
+ * $Id: mtdchar.c,v 1.80 2006/02/22 00:26:36 kmpark Exp $
*
* Character-device access to raw MTD devices.
- * Pure 2.4 version - compatibility cruft removed to mtdchar-compat.c
*
*/
#include <linux/config.h>
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/mtd/mtd.h>
#include <linux/slab.h>
+#include <linux/sched.h>
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/compatmac.h>
+
+#include <asm/uaccess.h>
+
+static struct class *mtd_class;
+
+static void mtd_notify_add(struct mtd_info* mtd)
+{
+ if (!mtd)
+ return;
-#ifdef CONFIG_DEVFS_FS
-#include <linux/devfs_fs_kernel.h>
-static void mtd_notify_add(struct mtd_info* mtd);
-static void mtd_notify_remove(struct mtd_info* mtd);
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)
+ /*
+ * In 2.6.13 and 2.6.14 kernels class_device_create() had different
+ * prototype (they had no 'parent' paremater).
+ * No idea how to fix it in compatmac.h, so this is a hotfix. Must go sometime.
+ */
+ class_device_create(mtd_class, MKDEV(MTD_CHAR_MAJOR, mtd->index*2),
+ NULL, "mtd%d", mtd->index);
+
+ class_device_create(mtd_class, MKDEV(MTD_CHAR_MAJOR, mtd->index*2+1),
+ NULL, "mtd%dro", mtd->index);
+#else
+ class_device_create(mtd_class, NULL, MKDEV(MTD_CHAR_MAJOR, mtd->index*2),
+ NULL, "mtd%d", mtd->index);
+
+ class_device_create(mtd_class, NULL,
+ MKDEV(MTD_CHAR_MAJOR, mtd->index*2+1),
+ NULL, "mtd%dro", mtd->index);
+#endif
+}
+
+static void mtd_notify_remove(struct mtd_info* mtd)
+{
+ if (!mtd)
+ return;
+
+ class_device_destroy(mtd_class, MKDEV(MTD_CHAR_MAJOR, mtd->index*2));
+ class_device_destroy(mtd_class, MKDEV(MTD_CHAR_MAJOR, mtd->index*2+1));
+}
static struct mtd_notifier notifier = {
- add: mtd_notify_add,
- remove: mtd_notify_remove,
+ .add = mtd_notify_add,
+ .remove = mtd_notify_remove,
};
-static devfs_handle_t devfs_dir_handle;
-static devfs_handle_t devfs_rw_handle[MAX_MTD_DEVICES];
-static devfs_handle_t devfs_ro_handle[MAX_MTD_DEVICES];
-#endif
+/*
+ * We use file->private_data to store a pointer to the MTDdevice.
+ * Since alighment is at least 32 bits, we have 2 bits free for OTP
+ * modes as well.
+ */
+
+#define TO_MTD(file) (struct mtd_info *)((long)((file)->private_data) & ~3L)
+
+#define MTD_MODE_OTP_FACT 1
+#define MTD_MODE_OTP_USER 2
+#define MTD_MODE(file) ((long)((file)->private_data) & 3)
+
+#define SET_MTD_MODE(file, mode) \
+ do { long __p = (long)((file)->private_data); \
+ (file)->private_data = (void *)((__p & ~3L) | mode); } while (0)
static loff_t mtd_lseek (struct file *file, loff_t offset, int orig)
{
- struct mtd_info *mtd=(struct mtd_info *)file->private_data;
+ struct mtd_info *mtd = TO_MTD(file);
switch (orig) {
case 0:
/* SEEK_SET */
- file->f_pos = offset;
break;
case 1:
/* SEEK_CUR */
- file->f_pos += offset;
+ offset += file->f_pos;
break;
case 2:
/* SEEK_END */
- file->f_pos =mtd->size + offset;
+ offset += mtd->size;
break;
default:
return -EINVAL;
}
- if (file->f_pos < 0)
- file->f_pos = 0;
- else if (file->f_pos >= mtd->size)
- file->f_pos = mtd->size - 1;
+ if (offset >= 0 && offset < mtd->size)
+ return file->f_pos = offset;
- return file->f_pos;
+ return -EINVAL;
}
static int mtd_open(struct inode *inode, struct file *file)
{
- int minor = MINOR(inode->i_rdev);
+ int minor = iminor(inode);
int devnum = minor >> 1;
struct mtd_info *mtd;
@@ -74,23 +121,23 @@ static int mtd_open(struct inode *inode, struct file *file)
return -EACCES;
mtd = get_mtd_device(NULL, devnum);
-
+
if (!mtd)
return -ENODEV;
-
+
if (MTD_ABSENT == mtd->type) {
put_mtd_device(mtd);
return -ENODEV;
}
file->private_data = mtd;
-
+
/* You can't open it RW if it's not a writeable device */
if ((file->f_mode & 2) && !(mtd->flags & MTD_WRITEABLE)) {
put_mtd_device(mtd);
return -EACCES;
}
-
+
return 0;
} /* mtd_open */
@@ -102,11 +149,11 @@ static int mtd_close(struct inode *inode, struct file *file)
DEBUG(MTD_DEBUG_LEVEL0, "MTD_close\n");
- mtd = (struct mtd_info *)file->private_data;
-
+ mtd = TO_MTD(file);
+
if (mtd->sync)
mtd->sync(mtd);
-
+
put_mtd_device(mtd);
return 0;
@@ -117,15 +164,15 @@ static int mtd_close(struct inode *inode, struct file *file)
*/
#define MAX_KMALLOC_SIZE 0x20000
-static ssize_t mtd_read(struct file *file, char *buf, size_t count,loff_t *ppos)
+static ssize_t mtd_read(struct file *file, char __user *buf, size_t count,loff_t *ppos)
{
- struct mtd_info *mtd = (struct mtd_info *)file->private_data;
+ struct mtd_info *mtd = TO_MTD(file);
size_t retlen=0;
size_t total_retlen=0;
int ret=0;
int len;
char *kbuf;
-
+
DEBUG(MTD_DEBUG_LEVEL0,"MTD_read\n");
if (*ppos + count > mtd->size)
@@ -133,11 +180,11 @@ static ssize_t mtd_read(struct file *file, char *buf, size_t count,loff_t *ppos)
if (!count)
return 0;
-
+
/* FIXME: Use kiovec in 2.5 to lock down the user's buffers
and pass them directly to the MTD functions */
while (count) {
- if (count > MAX_KMALLOC_SIZE)
+ if (count > MAX_KMALLOC_SIZE)
len = MAX_KMALLOC_SIZE;
else
len = count;
@@ -145,9 +192,24 @@ static ssize_t mtd_read(struct file *file, char *buf, size_t count,loff_t *ppos)
kbuf=kmalloc(len,GFP_KERNEL);
if (!kbuf)
return -ENOMEM;
-
- ret = MTD_READ(mtd, *ppos, len, &retlen, kbuf);
- if (!ret) {
+
+ switch (MTD_MODE(file)) {
+ case MTD_MODE_OTP_FACT:
+ ret = mtd->read_fact_prot_reg(mtd, *ppos, len, &retlen, kbuf);
+ break;
+ case MTD_MODE_OTP_USER:
+ ret = mtd->read_user_prot_reg(mtd, *ppos, len, &retlen, kbuf);
+ break;
+ default:
+ ret = MTD_READ(mtd, *ppos, len, &retlen, kbuf);
+ }
+ /* Nand returns -EBADMSG on ecc errors, but it returns
+ * the data. For our userspace tools it is important
+ * to dump areas with ecc errors !
+ * Userspace software which accesses NAND this way
+ * must be aware of the fact that it deals with NAND
+ */
+ if (!ret || (ret == -EBADMSG)) {
*ppos += retlen;
if (copy_to_user(buf, kbuf, retlen)) {
kfree(kbuf);
@@ -158,21 +220,23 @@ static ssize_t mtd_read(struct file *file, char *buf, size_t count,loff_t *ppos)
count -= retlen;
buf += retlen;
+ if (retlen == 0)
+ count = 0;
}
else {
kfree(kbuf);
return ret;
}
-
+
kfree(kbuf);
}
-
+
return total_retlen;
} /* mtd_read */
-static ssize_t mtd_write(struct file *file, const char *buf, size_t count,loff_t *ppos)
+static ssize_t mtd_write(struct file *file, const char __user *buf, size_t count,loff_t *ppos)
{
- struct mtd_info *mtd = (struct mtd_info *)file->private_data;
+ struct mtd_info *mtd = TO_MTD(file);
char *kbuf;
size_t retlen;
size_t total_retlen=0;
@@ -180,10 +244,10 @@ static ssize_t mtd_write(struct file *file, const char *buf, size_t count,loff_t
int len;
DEBUG(MTD_DEBUG_LEVEL0,"MTD_write\n");
-
+
if (*ppos == mtd->size)
return -ENOSPC;
-
+
if (*ppos + count > mtd->size)
count = mtd->size - *ppos;
@@ -191,7 +255,7 @@ static ssize_t mtd_write(struct file *file, const char *buf, size_t count,loff_t
return 0;
while (count) {
- if (count > MAX_KMALLOC_SIZE)
+ if (count > MAX_KMALLOC_SIZE)
len = MAX_KMALLOC_SIZE;
else
len = count;
@@ -206,8 +270,21 @@ static ssize_t mtd_write(struct file *file, const char *buf, size_t count,loff_t
kfree(kbuf);
return -EFAULT;
}
-
- ret = (*(mtd->write))(mtd, *ppos, len, &retlen, kbuf);
+
+ switch (MTD_MODE(file)) {
+ case MTD_MODE_OTP_FACT:
+ ret = -EROFS;
+ break;
+ case MTD_MODE_OTP_USER:
+ if (!mtd->write_user_prot_reg) {
+ ret = -EOPNOTSUPP;
+ break;
+ }
+ ret = mtd->write_user_prot_reg(mtd, *ppos, len, &retlen, kbuf);
+ break;
+ default:
+ ret = (*(mtd->write))(mtd, *ppos, len, &retlen, kbuf);
+ }
if (!ret) {
*ppos += retlen;
total_retlen += retlen;
@@ -218,7 +295,7 @@ static ssize_t mtd_write(struct file *file, const char *buf, size_t count,loff_t
kfree(kbuf);
return ret;
}
-
+
kfree(kbuf);
}
@@ -230,7 +307,7 @@ static ssize_t mtd_write(struct file *file, const char *buf, size_t count,loff_t
IOCTL calls for getting device parameters.
======================================================================*/
-static void mtd_erase_callback (struct erase_info *instr)
+static void mtdchar_erase_callback (struct erase_info *instr)
{
wake_up((wait_queue_head_t *)instr->priv);
}
@@ -238,25 +315,26 @@ static void mtd_erase_callback (struct erase_info *instr)
static int mtd_ioctl(struct inode *inode, struct file *file,
u_int cmd, u_long arg)
{
- struct mtd_info *mtd = (struct mtd_info *)file->private_data;
+ struct mtd_info *mtd = TO_MTD(file);
+ void __user *argp = (void __user *)arg;
int ret = 0;
u_long size;
-
+
DEBUG(MTD_DEBUG_LEVEL0, "MTD_ioctl\n");
size = (cmd & IOCSIZE_MASK) >> IOCSIZE_SHIFT;
if (cmd & IOC_IN) {
- ret = verify_area(VERIFY_READ, (char *)arg, size);
- if (ret) return ret;
+ if (!access_ok(VERIFY_READ, argp, size))
+ return -EFAULT;
}
if (cmd & IOC_OUT) {
- ret = verify_area(VERIFY_WRITE, (char *)arg, size);
- if (ret) return ret;
+ if (!access_ok(VERIFY_WRITE, argp, size))
+ return -EFAULT;
}
-
+
switch (cmd) {
case MEMGETREGIONCOUNT:
- if (copy_to_user((int *) arg, &(mtd->numeraseregions), sizeof(int)))
+ if (copy_to_user(argp, &(mtd->numeraseregions), sizeof(int)))
return -EFAULT;
break;
@@ -264,30 +342,30 @@ static int mtd_ioctl(struct inode *inode, struct file *file,
{
struct region_info_user ur;
- if (copy_from_user( &ur,
- (struct region_info_user *)arg,
- sizeof(struct region_info_user))) {
+ if (copy_from_user(&ur, argp, sizeof(struct region_info_user)))
return -EFAULT;
- }
if (ur.regionindex >= mtd->numeraseregions)
return -EINVAL;
- if (copy_to_user((struct mtd_erase_region_info *) arg,
- &(mtd->eraseregions[ur.regionindex]),
+ if (copy_to_user(argp, &(mtd->eraseregions[ur.regionindex]),
sizeof(struct mtd_erase_region_info)))
return -EFAULT;
break;
}
case MEMGETINFO:
- if (copy_to_user((struct mtd_info *)arg, mtd,
- sizeof(struct mtd_info_user)))
+ if (copy_to_user(argp, mtd, sizeof(struct mtd_info_user)))
return -EFAULT;
break;
case MEMERASE:
{
- struct erase_info *erase=kmalloc(sizeof(struct erase_info),GFP_KERNEL);
+ struct erase_info *erase;
+
+ if(!(file->f_mode & 2))
+ return -EPERM;
+
+ erase=kmalloc(sizeof(struct erase_info),GFP_KERNEL);
if (!erase)
ret = -ENOMEM;
else {
@@ -297,19 +375,19 @@ static int mtd_ioctl(struct inode *inode, struct file *file,
init_waitqueue_head(&waitq);
memset (erase,0,sizeof(struct erase_info));
- if (copy_from_user(&erase->addr, (u_long *)arg,
- 2 * sizeof(u_long))) {
+ if (copy_from_user(&erase->addr, argp,
+ sizeof(struct erase_info_user))) {
kfree(erase);
return -EFAULT;
}
erase->mtd = mtd;
- erase->callback = mtd_erase_callback;
+ erase->callback = mtdchar_erase_callback;
erase->priv = (unsigned long)&waitq;
-
+
/*
FIXME: Allow INTERRUPTIBLE. Which means
not having the wait_queue head on the stack.
-
+
If the wq_head is on the stack, and we
leave because we got interrupted, then the
wq_head is no longer there when the
@@ -337,17 +415,21 @@ static int mtd_ioctl(struct inode *inode, struct file *file,
struct mtd_oob_buf buf;
void *databuf;
ssize_t retlen;
-
- if (copy_from_user(&buf, (struct mtd_oob_buf *)arg, sizeof(struct mtd_oob_buf)))
+
+ if(!(file->f_mode & 2))
+ return -EPERM;
+
+ if (copy_from_user(&buf, argp, sizeof(struct mtd_oob_buf)))
return -EFAULT;
-
+
if (buf.length > 0x4096)
return -EINVAL;
if (!mtd->write_oob)
ret = -EOPNOTSUPP;
else
- ret = verify_area(VERIFY_READ, (char *)buf.ptr, buf.length);
+ ret = access_ok(VERIFY_READ, buf.ptr,
+ buf.length) ? 0 : EFAULT;
if (ret)
return ret;
@@ -355,7 +437,7 @@ static int mtd_ioctl(struct inode *inode, struct file *file,
databuf = kmalloc(buf.length, GFP_KERNEL);
if (!databuf)
return -ENOMEM;
-
+
if (copy_from_user(databuf, buf.ptr, buf.length)) {
kfree(databuf);
return -EFAULT;
@@ -363,7 +445,7 @@ static int mtd_ioctl(struct inode *inode, struct file *file,
ret = (mtd->write_oob)(mtd, buf.start, buf.length, &retlen, databuf);
- if (copy_to_user((void *)arg + sizeof(u_int32_t), &retlen, sizeof(u_int32_t)))
+ if (copy_to_user(argp + sizeof(uint32_t), &retlen, sizeof(uint32_t)))
ret = -EFAULT;
kfree(databuf);
@@ -377,16 +459,17 @@ static int mtd_ioctl(struct inode *inode, struct file *file,
void *databuf;
ssize_t retlen;
- if (copy_from_user(&buf, (struct mtd_oob_buf *)arg, sizeof(struct mtd_oob_buf)))
+ if (copy_from_user(&buf, argp, sizeof(struct mtd_oob_buf)))
return -EFAULT;
-
+
if (buf.length > 0x4096)
return -EINVAL;
if (!mtd->read_oob)
ret = -EOPNOTSUPP;
else
- ret = verify_area(VERIFY_WRITE, (char *)buf.ptr, buf.length);
+ ret = access_ok(VERIFY_WRITE, buf.ptr,
+ buf.length) ? 0 : -EFAULT;
if (ret)
return ret;
@@ -394,123 +477,161 @@ static int mtd_ioctl(struct inode *inode, struct file *file,
databuf = kmalloc(buf.length, GFP_KERNEL);
if (!databuf)
return -ENOMEM;
-
+
ret = (mtd->read_oob)(mtd, buf.start, buf.length, &retlen, databuf);
- if (copy_to_user((void *)arg + sizeof(u_int32_t), &retlen, sizeof(u_int32_t)))
+ if (put_user(retlen, (uint32_t __user *)argp))
ret = -EFAULT;
else if (retlen && copy_to_user(buf.ptr, databuf, retlen))
ret = -EFAULT;
-
+
kfree(databuf);
break;
}
case MEMLOCK:
{
- unsigned long adrs[2];
+ struct erase_info_user info;
- if (copy_from_user(adrs ,(void *)arg, 2* sizeof(unsigned long)))
+ if (copy_from_user(&info, argp, sizeof(info)))
return -EFAULT;
if (!mtd->lock)
ret = -EOPNOTSUPP;
else
- ret = mtd->lock(mtd, adrs[0], adrs[1]);
+ ret = mtd->lock(mtd, info.start, info.length);
break;
}
case MEMUNLOCK:
{
- unsigned long adrs[2];
+ struct erase_info_user info;
- if (copy_from_user(adrs, (void *)arg, 2* sizeof(unsigned long)))
+ if (copy_from_user(&info, argp, sizeof(info)))
return -EFAULT;
if (!mtd->unlock)
ret = -EOPNOTSUPP;
else
- ret = mtd->unlock(mtd, adrs[0], adrs[1]);
+ ret = mtd->unlock(mtd, info.start, info.length);
break;
}
- case MEMWRITEDATA:
+ case MEMSETOOBSEL:
{
- struct mtd_oob_buf buf;
- void *databuf;
- ssize_t retlen;
-
- if (copy_from_user(&buf, (struct mtd_oob_buf *)arg, sizeof(struct mtd_oob_buf)))
+ if (copy_from_user(&mtd->oobinfo, argp, sizeof(struct nand_oobinfo)))
return -EFAULT;
-
- if (buf.length > 0x4096)
- return -EINVAL;
-
- if (!mtd->write_ecc)
- ret = -EOPNOTSUPP;
- else
- ret = verify_area(VERIFY_READ, (char *)buf.ptr, buf.length);
-
- if (ret)
- return ret;
+ break;
+ }
- databuf = kmalloc(buf.length, GFP_KERNEL);
- if (!databuf)
- return -ENOMEM;
-
- if (copy_from_user(databuf, buf.ptr, buf.length)) {
- kfree(databuf);
+ case MEMGETOOBSEL:
+ {
+ if (copy_to_user(argp, &(mtd->oobinfo), sizeof(struct nand_oobinfo)))
return -EFAULT;
- }
-
- ret = (mtd->write_ecc)(mtd, buf.start, buf.length, &retlen, databuf, NULL);
+ break;
+ }
- if (copy_to_user((void *)arg + sizeof(u_int32_t), &retlen, sizeof(u_int32_t)))
- ret = -EFAULT;
+ case MEMGETBADBLOCK:
+ {
+ loff_t offs;
- kfree(databuf);
+ if (copy_from_user(&offs, argp, sizeof(loff_t)))
+ return -EFAULT;
+ if (!mtd->block_isbad)
+ ret = -EOPNOTSUPP;
+ else
+ return mtd->block_isbad(mtd, offs);
break;
-
}
- case MEMREADDATA:
+ case MEMSETBADBLOCK:
{
- struct mtd_oob_buf buf;
- void *databuf;
- ssize_t retlen = 0;
+ loff_t offs;
- if (copy_from_user(&buf, (struct mtd_oob_buf *)arg, sizeof(struct mtd_oob_buf)))
+ if (copy_from_user(&offs, argp, sizeof(loff_t)))
return -EFAULT;
-
- if (buf.length > 0x4096)
- return -EINVAL;
-
- if (!mtd->read_ecc)
+ if (!mtd->block_markbad)
ret = -EOPNOTSUPP;
else
- ret = verify_area(VERIFY_WRITE, (char *)buf.ptr, buf.length);
+ return mtd->block_markbad(mtd, offs);
+ break;
+ }
- if (ret)
- return ret;
+#if defined(CONFIG_MTD_OTP) || defined(CONFIG_MTD_ONENAND_OTP)
+ case OTPSELECT:
+ {
+ int mode;
+ if (copy_from_user(&mode, argp, sizeof(int)))
+ return -EFAULT;
+ SET_MTD_MODE(file, 0);
+ switch (mode) {
+ case MTD_OTP_FACTORY:
+ if (!mtd->read_fact_prot_reg)
+ ret = -EOPNOTSUPP;
+ else
+ SET_MTD_MODE(file, MTD_MODE_OTP_FACT);
+ break;
+ case MTD_OTP_USER:
+ if (!mtd->read_fact_prot_reg)
+ ret = -EOPNOTSUPP;
+ else
+ SET_MTD_MODE(file, MTD_MODE_OTP_USER);
+ break;
+ default:
+ ret = -EINVAL;
+ case MTD_OTP_OFF:
+ break;
+ }
+ file->f_pos = 0;
+ break;
+ }
- databuf = kmalloc(buf.length, GFP_KERNEL);
- if (!databuf)
+ case OTPGETREGIONCOUNT:
+ case OTPGETREGIONINFO:
+ {
+ struct otp_info *buf = kmalloc(4096, GFP_KERNEL);
+ if (!buf)
return -ENOMEM;
-
- ret = (mtd->read_ecc)(mtd, buf.start, buf.length, &retlen, databuf, NULL);
+ ret = -EOPNOTSUPP;
+ switch (MTD_MODE(file)) {
+ case MTD_MODE_OTP_FACT:
+ if (mtd->get_fact_prot_info)
+ ret = mtd->get_fact_prot_info(mtd, buf, 4096);
+ break;
+ case MTD_MODE_OTP_USER:
+ if (mtd->get_user_prot_info)
+ ret = mtd->get_user_prot_info(mtd, buf, 4096);
+ break;
+ }
+ if (ret >= 0) {
+ if (cmd == OTPGETREGIONCOUNT) {
+ int nbr = ret / sizeof(struct otp_info);
+ ret = copy_to_user(argp, &nbr, sizeof(int));
+ } else
+ ret = copy_to_user(argp, buf, ret);
+ if (ret)
+ ret = -EFAULT;
+ }
+ kfree(buf);
+ break;
+ }
- if (copy_to_user((void *)arg + sizeof(u_int32_t), &retlen, sizeof(u_int32_t)))
- ret = -EFAULT;
- else if (retlen && copy_to_user(buf.ptr, databuf, retlen))
- ret = -EFAULT;
-
- kfree(databuf);
+ case OTPLOCK:
+ {
+ struct otp_info info;
+
+ if (MTD_MODE(file) != MTD_MODE_OTP_USER)
+ return -EINVAL;
+ if (copy_from_user(&info, argp, sizeof(info)))
+ return -EFAULT;
+ if (!mtd->lock_user_prot_reg)
+ return -EOPNOTSUPP;
+ ret = mtd->lock_user_prot_reg(mtd, info.start, info.length);
break;
}
+#endif
-
default:
- DEBUG(MTD_DEBUG_LEVEL0, "Invalid ioctl %x (MEMGETINFO = %lx)\n", cmd, MEMGETINFO);
ret = -ENOTTY;
}
@@ -518,86 +639,40 @@ static int mtd_ioctl(struct inode *inode, struct file *file,
} /* memory_ioctl */
static struct file_operations mtd_fops = {
- owner: THIS_MODULE,
- llseek: mtd_lseek, /* lseek */
- read: mtd_read, /* read */
- write: mtd_write, /* write */
- ioctl: mtd_ioctl, /* ioctl */
- open: mtd_open, /* open */
- release: mtd_close, /* release */
+ .owner = THIS_MODULE,
+ .llseek = mtd_lseek,
+ .read = mtd_read,
+ .write = mtd_write,
+ .ioctl = mtd_ioctl,
+ .open = mtd_open,
+ .release = mtd_close,
};
-
-#ifdef CONFIG_DEVFS_FS
-/* Notification that a new device has been added. Create the devfs entry for
- * it. */
-
-static void mtd_notify_add(struct mtd_info* mtd)
-{
- char name[8];
-
- if (!mtd)
- return;
-
- sprintf(name, "%d", mtd->index);
- devfs_rw_handle[mtd->index] = devfs_register(devfs_dir_handle, name,
- DEVFS_FL_DEFAULT, MTD_CHAR_MAJOR, mtd->index*2,
- S_IFCHR | S_IRUGO | S_IWUGO,
- &mtd_fops, NULL);
-
- sprintf(name, "%dro", mtd->index);
- devfs_ro_handle[mtd->index] = devfs_register(devfs_dir_handle, name,
- DEVFS_FL_DEFAULT, MTD_CHAR_MAJOR, mtd->index*2+1,
- S_IFCHR | S_IRUGO | S_IWUGO,
- &mtd_fops, NULL);
-}
-
-static void mtd_notify_remove(struct mtd_info* mtd)
-{
- if (!mtd)
- return;
-
- devfs_unregister(devfs_rw_handle[mtd->index]);
- devfs_unregister(devfs_ro_handle[mtd->index]);
-}
-#endif
-
static int __init init_mtdchar(void)
{
-#ifdef CONFIG_DEVFS_FS
- if (devfs_register_chrdev(MTD_CHAR_MAJOR, "mtd", &mtd_fops))
- {
+ if (register_chrdev(MTD_CHAR_MAJOR, "mtd", &mtd_fops)) {
printk(KERN_NOTICE "Can't allocate major number %d for Memory Technology Devices.\n",
MTD_CHAR_MAJOR);
return -EAGAIN;
}
- devfs_dir_handle = devfs_mk_dir(NULL, "mtd", NULL);
+ mtd_class = class_create(THIS_MODULE, "mtd");
- register_mtd_user(&notifier);
-#else
- if (register_chrdev(MTD_CHAR_MAJOR, "mtd", &mtd_fops))
- {
- printk(KERN_NOTICE "Can't allocate major number %d for Memory Technology Devices.\n",
- MTD_CHAR_MAJOR);
- return -EAGAIN;
+ if (IS_ERR(mtd_class)) {
+ printk(KERN_ERR "Error creating mtd class.\n");
+ unregister_chrdev(MTD_CHAR_MAJOR, "mtd");
+ return PTR_ERR(mtd_class);
}
-#endif
- DEBUG(MTD_DEBUG_LEVEL3,
- "init_mtdchar: allocated major number %d.\n", MTD_CHAR_MAJOR);
+ register_mtd_user(&notifier);
return 0;
}
static void __exit cleanup_mtdchar(void)
{
-#ifdef CONFIG_DEVFS_FS
unregister_mtd_user(&notifier);
- devfs_unregister(devfs_dir_handle);
- devfs_unregister_chrdev(MTD_CHAR_MAJOR, "mtd");
-#else
+ class_destroy(mtd_class);
unregister_chrdev(MTD_CHAR_MAJOR, "mtd");
-#endif
}
module_init(init_mtdchar);
diff --git a/linux-2.4.x/drivers/mtd/mtdconcat.c b/linux-2.4.x/drivers/mtd/mtdconcat.c
index 176f54d..b1bf8c4 100644
--- a/linux-2.4.x/drivers/mtd/mtdconcat.c
+++ b/linux-2.4.x/drivers/mtd/mtdconcat.c
@@ -3,15 +3,18 @@
*
* (C) 2002 Robert Kaiser <rkaiser@sysgo.de>
*
+ * NAND support by Christian Gan <cgan@iders.ca>
+ *
* This code is GPL
*
- * $Id: mtdconcat.c,v 1.2 2002/03/22 08:45:22 dwmw2 Exp $
+ * $Id: mtdconcat.c,v 1.11 2005/11/07 11:14:20 gleixner Exp $
*/
-#include <linux/module.h>
-#include <linux/types.h>
#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/types.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/concat.h>
@@ -24,7 +27,7 @@
*/
struct mtd_concat {
struct mtd_info mtd;
- int num_subdev;
+ int num_subdev;
struct mtd_info **subdev;
};
@@ -35,21 +38,20 @@ struct mtd_concat {
#define SIZEOF_STRUCT_MTD_CONCAT(num_subdev) \
((sizeof(struct mtd_concat) + (num_subdev) * sizeof(struct mtd_info *)))
-
/*
* Given a pointer to the MTD object in the mtd_concat structure,
* we can retrieve the pointer to that structure with this macro.
*/
#define CONCAT(x) ((struct mtd_concat *)(x))
-
-/*
+/*
* MTD methods which look up the relevant subdevice, translate the
* effective address and pass through to the subdevice.
*/
-static int concat_read (struct mtd_info *mtd, loff_t from, size_t len,
- size_t *retlen, u_char *buf)
+static int
+concat_read(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t * retlen, u_char * buf)
{
struct mtd_concat *concat = CONCAT(mtd);
int err = -EINVAL;
@@ -57,43 +59,148 @@ static int concat_read (struct mtd_info *mtd, loff_t from, size_t len,
*retlen = 0;
- for(i = 0; i < concat->num_subdev; i++)
- {
+ for (i = 0; i < concat->num_subdev; i++) {
struct mtd_info *subdev = concat->subdev[i];
size_t size, retsize;
- if (from >= subdev->size)
- {
- size = 0;
+ if (from >= subdev->size) {
+ /* Not destined for this subdev */
+ size = 0;
from -= subdev->size;
+ continue;
}
+ if (from + len > subdev->size)
+ /* First part goes into this subdev */
+ size = subdev->size - from;
else
- {
- if (from + len > subdev->size)
- size = subdev->size - from;
- else
- size = len;
+ /* Entire transaction goes into this subdev */
+ size = len;
- err = subdev->read(subdev, from, size, &retsize, buf);
+ err = subdev->read(subdev, from, size, &retsize, buf);
- if(err)
- break;
+ if (err)
+ break;
- *retlen += retsize;
- len -= size;
- if(len == 0)
- break;
+ *retlen += retsize;
+ len -= size;
+ if (len == 0)
+ break;
+
+ err = -EINVAL;
+ buf += size;
+ from = 0;
+ }
+ return err;
+}
+
+static int
+concat_write(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t * retlen, const u_char * buf)
+{
+ struct mtd_concat *concat = CONCAT(mtd);
+ int err = -EINVAL;
+ int i;
+
+ if (!(mtd->flags & MTD_WRITEABLE))
+ return -EROFS;
+ *retlen = 0;
+
+ for (i = 0; i < concat->num_subdev; i++) {
+ struct mtd_info *subdev = concat->subdev[i];
+ size_t size, retsize;
+
+ if (to >= subdev->size) {
+ size = 0;
+ to -= subdev->size;
+ continue;
+ }
+ if (to + len > subdev->size)
+ size = subdev->size - to;
+ else
+ size = len;
+
+ if (!(subdev->flags & MTD_WRITEABLE))
+ err = -EROFS;
+ else
+ err = subdev->write(subdev, to, size, &retsize, buf);
+
+ if (err)
+ break;
+
+ *retlen += retsize;
+ len -= size;
+ if (len == 0)
+ break;
+
+ err = -EINVAL;
+ buf += size;
+ to = 0;
+ }
+ return err;
+}
+
+static int
+concat_read_ecc(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t * retlen, u_char * buf, u_char * eccbuf,
+ struct nand_oobinfo *oobsel)
+{
+ struct mtd_concat *concat = CONCAT(mtd);
+ int err = -EINVAL;
+ int i;
+
+ *retlen = 0;
+
+ for (i = 0; i < concat->num_subdev; i++) {
+ struct mtd_info *subdev = concat->subdev[i];
+ size_t size, retsize;
+
+ if (from >= subdev->size) {
+ /* Not destined for this subdev */
+ size = 0;
+ from -= subdev->size;
+ continue;
+ }
+
+ if (from + len > subdev->size)
+ /* First part goes into this subdev */
+ size = subdev->size - from;
+ else
+ /* Entire transaction goes into this subdev */
+ size = len;
+
+ if (subdev->read_ecc)
+ err = subdev->read_ecc(subdev, from, size,
+ &retsize, buf, eccbuf, oobsel);
+ else
err = -EINVAL;
- buf += size;
- from = 0;
+
+ if (err)
+ break;
+
+ *retlen += retsize;
+ len -= size;
+ if (len == 0)
+ break;
+
+ err = -EINVAL;
+ buf += size;
+ if (eccbuf) {
+ eccbuf += subdev->oobsize;
+ /* in nand.c at least, eccbufs are
+ tagged with 2 (int)eccstatus'; we
+ must account for these */
+ eccbuf += 2 * (sizeof (int));
}
+ from = 0;
}
return err;
}
-static int concat_write (struct mtd_info *mtd, loff_t to, size_t len,
- size_t *retlen, const u_char *buf)
+static int
+concat_write_ecc(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t * retlen, const u_char * buf, u_char * eccbuf,
+ struct nand_oobinfo *oobsel)
{
struct mtd_concat *concat = CONCAT(mtd);
int err = -EINVAL;
@@ -104,47 +211,146 @@ static int concat_write (struct mtd_info *mtd, loff_t to, size_t len,
*retlen = 0;
- for(i = 0; i < concat->num_subdev; i++)
- {
+ for (i = 0; i < concat->num_subdev; i++) {
struct mtd_info *subdev = concat->subdev[i];
size_t size, retsize;
- if (to >= subdev->size)
- {
- size = 0;
+ if (to >= subdev->size) {
+ size = 0;
to -= subdev->size;
+ continue;
}
+ if (to + len > subdev->size)
+ size = subdev->size - to;
+ else
+ size = len;
+
+ if (!(subdev->flags & MTD_WRITEABLE))
+ err = -EROFS;
+ else if (subdev->write_ecc)
+ err = subdev->write_ecc(subdev, to, size,
+ &retsize, buf, eccbuf, oobsel);
else
- {
- if (to + len > subdev->size)
- size = subdev->size - to;
- else
- size = len;
+ err = -EINVAL;
+
+ if (err)
+ break;
+
+ *retlen += retsize;
+ len -= size;
+ if (len == 0)
+ break;
- if (!(subdev->flags & MTD_WRITEABLE))
- err = -EROFS;
- else
- err = subdev->write(subdev, to, size, &retsize, buf);
+ err = -EINVAL;
+ buf += size;
+ if (eccbuf)
+ eccbuf += subdev->oobsize;
+ to = 0;
+ }
+ return err;
+}
- if(err)
- break;
+static int
+concat_read_oob(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t * retlen, u_char * buf)
+{
+ struct mtd_concat *concat = CONCAT(mtd);
+ int err = -EINVAL;
+ int i;
- *retlen += retsize;
- len -= size;
- if(len == 0)
- break;
+ *retlen = 0;
+ for (i = 0; i < concat->num_subdev; i++) {
+ struct mtd_info *subdev = concat->subdev[i];
+ size_t size, retsize;
+
+ if (from >= subdev->size) {
+ /* Not destined for this subdev */
+ size = 0;
+ from -= subdev->size;
+ continue;
+ }
+ if (from + len > subdev->size)
+ /* First part goes into this subdev */
+ size = subdev->size - from;
+ else
+ /* Entire transaction goes into this subdev */
+ size = len;
+
+ if (subdev->read_oob)
+ err = subdev->read_oob(subdev, from, size,
+ &retsize, buf);
+ else
err = -EINVAL;
- buf += size;
- to = 0;
+
+ if (err)
+ break;
+
+ *retlen += retsize;
+ len -= size;
+ if (len == 0)
+ break;
+
+ err = -EINVAL;
+ buf += size;
+ from = 0;
+ }
+ return err;
+}
+
+static int
+concat_write_oob(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t * retlen, const u_char * buf)
+{
+ struct mtd_concat *concat = CONCAT(mtd);
+ int err = -EINVAL;
+ int i;
+
+ if (!(mtd->flags & MTD_WRITEABLE))
+ return -EROFS;
+
+ *retlen = 0;
+
+ for (i = 0; i < concat->num_subdev; i++) {
+ struct mtd_info *subdev = concat->subdev[i];
+ size_t size, retsize;
+
+ if (to >= subdev->size) {
+ size = 0;
+ to -= subdev->size;
+ continue;
}
+ if (to + len > subdev->size)
+ size = subdev->size - to;
+ else
+ size = len;
+
+ if (!(subdev->flags & MTD_WRITEABLE))
+ err = -EROFS;
+ else if (subdev->write_oob)
+ err = subdev->write_oob(subdev, to, size, &retsize,
+ buf);
+ else
+ err = -EINVAL;
+
+ if (err)
+ break;
+
+ *retlen += retsize;
+ len -= size;
+ if (len == 0)
+ break;
+
+ err = -EINVAL;
+ buf += size;
+ to = 0;
}
return err;
}
-static void concat_erase_callback (struct erase_info *instr)
+static void concat_erase_callback(struct erase_info *instr)
{
- wake_up((wait_queue_head_t *)instr->priv);
+ wake_up((wait_queue_head_t *) instr->priv);
}
static int concat_dev_erase(struct mtd_info *mtd, struct erase_info *erase)
@@ -160,18 +366,18 @@ static int concat_dev_erase(struct mtd_info *mtd, struct erase_info *erase)
erase->mtd = mtd;
erase->callback = concat_erase_callback;
- erase->priv = (unsigned long)&waitq;
-
+ erase->priv = (unsigned long) &waitq;
+
/*
* FIXME: Allow INTERRUPTIBLE. Which means
* not having the wait_queue head on the stack.
*/
err = mtd->erase(mtd, erase);
- if (!err)
- {
+ if (!err) {
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&waitq, &wait);
- if (erase->state != MTD_ERASE_DONE && erase->state != MTD_ERASE_FAILED)
+ if (erase->state != MTD_ERASE_DONE
+ && erase->state != MTD_ERASE_FAILED)
schedule();
remove_wait_queue(&waitq, &wait);
set_current_state(TASK_RUNNING);
@@ -181,21 +387,21 @@ static int concat_dev_erase(struct mtd_info *mtd, struct erase_info *erase)
return err;
}
-static int concat_erase (struct mtd_info *mtd, struct erase_info *instr)
+static int concat_erase(struct mtd_info *mtd, struct erase_info *instr)
{
struct mtd_concat *concat = CONCAT(mtd);
struct mtd_info *subdev;
int i, err;
- u_int32_t length;
+ u_int32_t length, offset = 0;
struct erase_info *erase;
if (!(mtd->flags & MTD_WRITEABLE))
return -EROFS;
- if(instr->addr > concat->mtd.size)
+ if (instr->addr > concat->mtd.size)
return -EINVAL;
- if(instr->len + instr->addr > concat->mtd.size)
+ if (instr->len + instr->addr > concat->mtd.size)
return -EINVAL;
/*
@@ -204,23 +410,22 @@ static int concat_erase (struct mtd_info *mtd, struct erase_info *instr)
* region info rather than looking at each particular sub-device
* in turn.
*/
- if (!concat->mtd.numeraseregions)
- { /* the easy case: device has uniform erase block size */
- if(instr->addr & (concat->mtd.erasesize - 1))
+ if (!concat->mtd.numeraseregions) {
+ /* the easy case: device has uniform erase block size */
+ if (instr->addr & (concat->mtd.erasesize - 1))
return -EINVAL;
- if(instr->len & (concat->mtd.erasesize - 1))
+ if (instr->len & (concat->mtd.erasesize - 1))
return -EINVAL;
- }
- else
- { /* device has variable erase size */
- struct mtd_erase_region_info *erase_regions = concat->mtd.eraseregions;
+ } else {
+ /* device has variable erase size */
+ struct mtd_erase_region_info *erase_regions =
+ concat->mtd.eraseregions;
/*
* Find the erase region where the to-be-erased area begins:
*/
- for(i = 0; i < concat->mtd.numeraseregions &&
- instr->addr >= erase_regions[i].offset; i++)
- ;
+ for (i = 0; i < concat->mtd.numeraseregions &&
+ instr->addr >= erase_regions[i].offset; i++) ;
--i;
/*
@@ -228,25 +433,28 @@ static int concat_erase (struct mtd_info *mtd, struct erase_info *instr)
* to-be-erased area begins. Verify that the starting
* offset is aligned to this region's erase size:
*/
- if (instr->addr & (erase_regions[i].erasesize-1))
+ if (instr->addr & (erase_regions[i].erasesize - 1))
return -EINVAL;
/*
* now find the erase region where the to-be-erased area ends:
*/
- for(; i < concat->mtd.numeraseregions &&
- (instr->addr + instr->len) >= erase_regions[i].offset ; ++i)
- ;
+ for (; i < concat->mtd.numeraseregions &&
+ (instr->addr + instr->len) >= erase_regions[i].offset;
+ ++i) ;
--i;
/*
* check if the ending offset is aligned to this region's erase size
*/
- if ((instr->addr + instr->len) & (erase_regions[i].erasesize-1))
+ if ((instr->addr + instr->len) & (erase_regions[i].erasesize -
+ 1))
return -EINVAL;
}
+ instr->fail_addr = 0xffffffff;
+
/* make a local copy of instr to avoid modifying the caller's struct */
- erase = kmalloc(sizeof(struct erase_info),GFP_KERNEL);
+ erase = kmalloc(sizeof (struct erase_info), GFP_KERNEL);
if (!erase)
return -ENOMEM;
@@ -258,39 +466,44 @@ static int concat_erase (struct mtd_info *mtd, struct erase_info *instr)
* find the subdevice where the to-be-erased area begins, adjust
* starting offset to be relative to the subdevice start
*/
- for(i = 0; i < concat->num_subdev; i++)
- {
+ for (i = 0; i < concat->num_subdev; i++) {
subdev = concat->subdev[i];
- if(subdev->size <= erase->addr)
+ if (subdev->size <= erase->addr) {
erase->addr -= subdev->size;
- else
+ offset += subdev->size;
+ } else {
break;
- }
- if(i >= concat->num_subdev) /* must never happen since size */
- BUG(); /* limit has been verified above */
+ }
+ }
+
+ /* must never happen since size limit has been verified above */
+ if (i >= concat->num_subdev)
+ BUG();
/* now do the erase: */
err = 0;
- for(;length > 0; i++) /* loop for all subevices affected by this request */
- {
- subdev = concat->subdev[i]; /* get current subdevice */
+ for (; length > 0; i++) {
+ /* loop for all subdevices affected by this request */
+ subdev = concat->subdev[i]; /* get current subdevice */
/* limit length to subdevice's size: */
- if(erase->addr + length > subdev->size)
+ if (erase->addr + length > subdev->size)
erase->len = subdev->size - erase->addr;
else
erase->len = length;
- if (!(subdev->flags & MTD_WRITEABLE))
- {
+ if (!(subdev->flags & MTD_WRITEABLE)) {
err = -EROFS;
break;
}
length -= erase->len;
- if ((err = concat_dev_erase(subdev, erase)))
- {
- if(err == -EINVAL) /* sanity check: must never happen since */
- BUG(); /* block alignment has been checked above */
+ if ((err = concat_dev_erase(subdev, erase))) {
+ /* sanity check: should never happen since
+ * block alignment has been checked above */
+ if (err == -EINVAL)
+ BUG();
+ if (erase->fail_addr != 0xffffffff)
+ instr->fail_addr = erase->fail_addr + offset;
break;
}
/*
@@ -302,93 +515,91 @@ static int concat_erase (struct mtd_info *mtd, struct erase_info *instr)
* current subdevice, i.e. at offset zero.
*/
erase->addr = 0;
+ offset += subdev->size;
}
- instr->state = MTD_ERASE_DONE;
+ instr->state = erase->state;
+ kfree(erase);
+ if (err)
+ return err;
+
if (instr->callback)
instr->callback(instr);
- kfree(erase);
- return err;
+ return 0;
}
-static int concat_lock (struct mtd_info *mtd, loff_t ofs, size_t len)
+static int concat_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
{
struct mtd_concat *concat = CONCAT(mtd);
int i, err = -EINVAL;
- if ((len + ofs) > mtd->size)
+ if ((len + ofs) > mtd->size)
return -EINVAL;
- for(i = 0; i < concat->num_subdev; i++)
- {
+ for (i = 0; i < concat->num_subdev; i++) {
struct mtd_info *subdev = concat->subdev[i];
size_t size;
- if (ofs >= subdev->size)
- {
- size = 0;
+ if (ofs >= subdev->size) {
+ size = 0;
ofs -= subdev->size;
+ continue;
}
+ if (ofs + len > subdev->size)
+ size = subdev->size - ofs;
else
- {
- if (ofs + len > subdev->size)
- size = subdev->size - ofs;
- else
- size = len;
+ size = len;
- err = subdev->lock(subdev, ofs, size);
+ err = subdev->lock(subdev, ofs, size);
- if(err)
- break;
+ if (err)
+ break;
- len -= size;
- if(len == 0)
- break;
+ len -= size;
+ if (len == 0)
+ break;
- err = -EINVAL;
- ofs = 0;
- }
+ err = -EINVAL;
+ ofs = 0;
}
+
return err;
}
-static int concat_unlock (struct mtd_info *mtd, loff_t ofs, size_t len)
+static int concat_unlock(struct mtd_info *mtd, loff_t ofs, size_t len)
{
struct mtd_concat *concat = CONCAT(mtd);
int i, err = 0;
- if ((len + ofs) > mtd->size)
+ if ((len + ofs) > mtd->size)
return -EINVAL;
- for(i = 0; i < concat->num_subdev; i++)
- {
+ for (i = 0; i < concat->num_subdev; i++) {
struct mtd_info *subdev = concat->subdev[i];
size_t size;
- if (ofs >= subdev->size)
- {
- size = 0;
+ if (ofs >= subdev->size) {
+ size = 0;
ofs -= subdev->size;
+ continue;
}
+ if (ofs + len > subdev->size)
+ size = subdev->size - ofs;
else
- {
- if (ofs + len > subdev->size)
- size = subdev->size - ofs;
- else
- size = len;
+ size = len;
- err = subdev->unlock(subdev, ofs, size);
+ err = subdev->unlock(subdev, ofs, size);
- if(err)
- break;
+ if (err)
+ break;
- len -= size;
- if(len == 0)
- break;
+ len -= size;
+ if (len == 0)
+ break;
- err = -EINVAL;
- ofs = 0;
- }
+ err = -EINVAL;
+ ofs = 0;
}
+
return err;
}
@@ -397,8 +608,7 @@ static void concat_sync(struct mtd_info *mtd)
struct mtd_concat *concat = CONCAT(mtd);
int i;
- for(i = 0; i < concat->num_subdev; i++)
- {
+ for (i = 0; i < concat->num_subdev; i++) {
struct mtd_info *subdev = concat->subdev[i];
subdev->sync(subdev);
}
@@ -409,10 +619,9 @@ static int concat_suspend(struct mtd_info *mtd)
struct mtd_concat *concat = CONCAT(mtd);
int i, rc = 0;
- for(i = 0; i < concat->num_subdev; i++)
- {
+ for (i = 0; i < concat->num_subdev; i++) {
struct mtd_info *subdev = concat->subdev[i];
- if((rc = subdev->suspend(subdev)) < 0)
+ if ((rc = subdev->suspend(subdev)) < 0)
return rc;
}
return rc;
@@ -423,8 +632,7 @@ static void concat_resume(struct mtd_info *mtd)
struct mtd_concat *concat = CONCAT(mtd);
int i;
- for(i = 0; i < concat->num_subdev; i++)
- {
+ for (i = 0; i < concat->num_subdev; i++) {
struct mtd_info *subdev = concat->subdev[i];
subdev->resume(subdev);
}
@@ -436,11 +644,10 @@ static void concat_resume(struct mtd_info *mtd)
* stored to *new_dev upon success. This function does _not_
* register any devices: this is the caller's responsibility.
*/
-struct mtd_info *mtd_concat_create(
- struct mtd_info *subdev[], /* subdevices to concatenate */
- int num_devs, /* number of subdevices */
- char *name) /* name for the new device */
-{
+struct mtd_info *mtd_concat_create(struct mtd_info *subdev[], /* subdevices to concatenate */
+ int num_devs, /* number of subdevices */
+ char *name)
+{ /* name for the new device */
int i;
size_t size;
struct mtd_concat *concat;
@@ -448,90 +655,103 @@ struct mtd_info *mtd_concat_create(
int num_erase_region;
printk(KERN_NOTICE "Concatenating MTD devices:\n");
- for(i = 0; i < num_devs; i++)
+ for (i = 0; i < num_devs; i++)
printk(KERN_NOTICE "(%d): \"%s\"\n", i, subdev[i]->name);
printk(KERN_NOTICE "into device \"%s\"\n", name);
/* allocate the device structure */
size = SIZEOF_STRUCT_MTD_CONCAT(num_devs);
- concat = kmalloc (size, GFP_KERNEL);
- if(!concat)
- {
- printk ("memory allocation error while creating concatenated device \"%s\"\n",
- name);
- return NULL;
+ concat = kmalloc(size, GFP_KERNEL);
+ if (!concat) {
+ printk
+ ("memory allocation error while creating concatenated device \"%s\"\n",
+ name);
+ return NULL;
}
memset(concat, 0, size);
- concat->subdev = (struct mtd_info **)(concat + 1);
+ concat->subdev = (struct mtd_info **) (concat + 1);
/*
* Set up the new "super" device's MTD object structure, check for
* incompatibilites between the subdevices.
*/
- concat->mtd.type = subdev[0]->type;
- concat->mtd.flags = subdev[0]->flags;
- concat->mtd.size = subdev[0]->size;
+ concat->mtd.type = subdev[0]->type;
+ concat->mtd.flags = subdev[0]->flags;
+ concat->mtd.size = subdev[0]->size;
concat->mtd.erasesize = subdev[0]->erasesize;
- concat->mtd.oobblock = subdev[0]->oobblock;
- concat->mtd.oobsize = subdev[0]->oobsize;
- concat->mtd.ecctype = subdev[0]->ecctype;
- concat->mtd.eccsize = subdev[0]->eccsize;
-
- concat->subdev[0] = subdev[0];
-
- for(i = 1; i < num_devs; i++)
- {
- if(concat->mtd.type != subdev[i]->type)
- {
+ concat->mtd.oobblock = subdev[0]->oobblock;
+ concat->mtd.oobsize = subdev[0]->oobsize;
+ concat->mtd.ecctype = subdev[0]->ecctype;
+ concat->mtd.eccsize = subdev[0]->eccsize;
+ if (subdev[0]->read_ecc)
+ concat->mtd.read_ecc = concat_read_ecc;
+ if (subdev[0]->write_ecc)
+ concat->mtd.write_ecc = concat_write_ecc;
+ if (subdev[0]->read_oob)
+ concat->mtd.read_oob = concat_read_oob;
+ if (subdev[0]->write_oob)
+ concat->mtd.write_oob = concat_write_oob;
+
+ concat->subdev[0] = subdev[0];
+
+ for (i = 1; i < num_devs; i++) {
+ if (concat->mtd.type != subdev[i]->type) {
kfree(concat);
- printk ("Incompatible device type on \"%s\"\n", subdev[i]->name);
+ printk("Incompatible device type on \"%s\"\n",
+ subdev[i]->name);
return NULL;
}
- if(concat->mtd.flags != subdev[i]->flags)
- { /*
- * Expect all flags except MTD_WRITEABLE to be equal on
- * all subdevices.
+ if (concat->mtd.flags != subdev[i]->flags) {
+ /*
+ * Expect all flags except MTD_WRITEABLE to be
+ * equal on all subdevices.
*/
- if((concat->mtd.flags ^ subdev[i]->flags) & ~MTD_WRITEABLE)
- {
+ if ((concat->mtd.flags ^ subdev[i]->
+ flags) & ~MTD_WRITEABLE) {
kfree(concat);
- printk ("Incompatible device flags on \"%s\"\n", subdev[i]->name);
+ printk("Incompatible device flags on \"%s\"\n",
+ subdev[i]->name);
return NULL;
- }
- else /* if writeable attribute differs, make super device writeable */
- concat->mtd.flags |= subdev[i]->flags & MTD_WRITEABLE;
+ } else
+ /* if writeable attribute differs,
+ make super device writeable */
+ concat->mtd.flags |=
+ subdev[i]->flags & MTD_WRITEABLE;
}
concat->mtd.size += subdev[i]->size;
- if(concat->mtd.oobblock != subdev[i]->oobblock ||
- concat->mtd.oobsize != subdev[i]->oobsize ||
- concat->mtd.ecctype != subdev[i]->ecctype ||
- concat->mtd.eccsize != subdev[i]->eccsize)
- {
+ if (concat->mtd.oobblock != subdev[i]->oobblock ||
+ concat->mtd.oobsize != subdev[i]->oobsize ||
+ concat->mtd.ecctype != subdev[i]->ecctype ||
+ concat->mtd.eccsize != subdev[i]->eccsize ||
+ !concat->mtd.read_ecc != !subdev[i]->read_ecc ||
+ !concat->mtd.write_ecc != !subdev[i]->write_ecc ||
+ !concat->mtd.read_oob != !subdev[i]->read_oob ||
+ !concat->mtd.write_oob != !subdev[i]->write_oob) {
kfree(concat);
- printk ("Incompatible OOB or ECC data on \"%s\"\n", subdev[i]->name);
+ printk("Incompatible OOB or ECC data on \"%s\"\n",
+ subdev[i]->name);
return NULL;
}
concat->subdev[i] = subdev[i];
-
+
}
- concat->num_subdev = num_devs;
- concat->mtd.name = name;
+ concat->num_subdev = num_devs;
+ concat->mtd.name = name;
/*
* NOTE: for now, we do not provide any readv()/writev() methods
* because they are messy to implement and they are not
* used to a great extent anyway.
*/
- concat->mtd.erase = concat_erase;
- concat->mtd.read = concat_read;
- concat->mtd.write = concat_write;
- concat->mtd.sync = concat_sync;
- concat->mtd.lock = concat_lock;
- concat->mtd.unlock = concat_unlock;
+ concat->mtd.erase = concat_erase;
+ concat->mtd.read = concat_read;
+ concat->mtd.write = concat_write;
+ concat->mtd.sync = concat_sync;
+ concat->mtd.lock = concat_lock;
+ concat->mtd.unlock = concat_unlock;
concat->mtd.suspend = concat_suspend;
- concat->mtd.resume = concat_resume;
-
+ concat->mtd.resume = concat_resume;
/*
* Combine the erase block size info of the subdevices:
@@ -541,44 +761,44 @@ struct mtd_info *mtd_concat_create(
*/
max_erasesize = curr_erasesize = subdev[0]->erasesize;
num_erase_region = 1;
- for(i = 0; i < num_devs; i++)
- {
- if(subdev[i]->numeraseregions == 0)
- { /* current subdevice has uniform erase size */
- if(subdev[i]->erasesize != curr_erasesize)
- { /* if it differs from the last subdevice's erase size, count it */
+ for (i = 0; i < num_devs; i++) {
+ if (subdev[i]->numeraseregions == 0) {
+ /* current subdevice has uniform erase size */
+ if (subdev[i]->erasesize != curr_erasesize) {
+ /* if it differs from the last subdevice's erase size, count it */
++num_erase_region;
curr_erasesize = subdev[i]->erasesize;
- if(curr_erasesize > max_erasesize)
+ if (curr_erasesize > max_erasesize)
max_erasesize = curr_erasesize;
}
- }
- else
- { /* current subdevice has variable erase size */
+ } else {
+ /* current subdevice has variable erase size */
int j;
- for(j = 0; j < subdev[i]->numeraseregions; j++)
- { /* walk the list of erase regions, count any changes */
- if(subdev[i]->eraseregions[j].erasesize != curr_erasesize)
- {
+ for (j = 0; j < subdev[i]->numeraseregions; j++) {
+
+ /* walk the list of erase regions, count any changes */
+ if (subdev[i]->eraseregions[j].erasesize !=
+ curr_erasesize) {
++num_erase_region;
- curr_erasesize = subdev[i]->eraseregions[j].erasesize;
- if(curr_erasesize > max_erasesize)
+ curr_erasesize =
+ subdev[i]->eraseregions[j].
+ erasesize;
+ if (curr_erasesize > max_erasesize)
max_erasesize = curr_erasesize;
}
}
}
}
- if(num_erase_region == 1)
- { /*
+ if (num_erase_region == 1) {
+ /*
* All subdevices have the same uniform erase size.
* This is easy:
*/
concat->mtd.erasesize = curr_erasesize;
concat->mtd.numeraseregions = 0;
- }
- else
- { /*
+ } else {
+ /*
* erase block size varies across the subdevices: allocate
* space to store the data describing the variable erase regions
*/
@@ -587,13 +807,14 @@ struct mtd_info *mtd_concat_create(
concat->mtd.erasesize = max_erasesize;
concat->mtd.numeraseregions = num_erase_region;
- concat->mtd.eraseregions = erase_region_p = kmalloc (
- num_erase_region * sizeof(struct mtd_erase_region_info), GFP_KERNEL);
- if(!erase_region_p)
- {
+ concat->mtd.eraseregions = erase_region_p =
+ kmalloc(num_erase_region *
+ sizeof (struct mtd_erase_region_info), GFP_KERNEL);
+ if (!erase_region_p) {
kfree(concat);
- printk ("memory allocation error while creating erase region list"
- " for device \"%s\"\n", name);
+ printk
+ ("memory allocation error while creating erase region list"
+ " for device \"%s\"\n", name);
return NULL;
}
@@ -603,46 +824,53 @@ struct mtd_info *mtd_concat_create(
*/
curr_erasesize = subdev[0]->erasesize;
begin = position = 0;
- for(i = 0; i < num_devs; i++)
- {
- if(subdev[i]->numeraseregions == 0)
- { /* current subdevice has uniform erase size */
- if(subdev[i]->erasesize != curr_erasesize)
- { /*
+ for (i = 0; i < num_devs; i++) {
+ if (subdev[i]->numeraseregions == 0) {
+ /* current subdevice has uniform erase size */
+ if (subdev[i]->erasesize != curr_erasesize) {
+ /*
* fill in an mtd_erase_region_info structure for the area
* we have walked so far:
*/
- erase_region_p->offset = begin;
- erase_region_p->erasesize = curr_erasesize;
- erase_region_p->numblocks = (position - begin) / curr_erasesize;
+ erase_region_p->offset = begin;
+ erase_region_p->erasesize =
+ curr_erasesize;
+ erase_region_p->numblocks =
+ (position - begin) / curr_erasesize;
begin = position;
curr_erasesize = subdev[i]->erasesize;
++erase_region_p;
}
position += subdev[i]->size;
- }
- else
- { /* current subdevice has variable erase size */
+ } else {
+ /* current subdevice has variable erase size */
int j;
- for(j = 0; j < subdev[i]->numeraseregions; j++)
- { /* walk the list of erase regions, count any changes */
- if(subdev[i]->eraseregions[j].erasesize != curr_erasesize)
- {
- erase_region_p->offset = begin;
- erase_region_p->erasesize = curr_erasesize;
- erase_region_p->numblocks = (position - begin) / curr_erasesize;
+ for (j = 0; j < subdev[i]->numeraseregions; j++) {
+ /* walk the list of erase regions, count any changes */
+ if (subdev[i]->eraseregions[j].
+ erasesize != curr_erasesize) {
+ erase_region_p->offset = begin;
+ erase_region_p->erasesize =
+ curr_erasesize;
+ erase_region_p->numblocks =
+ (position -
+ begin) / curr_erasesize;
begin = position;
- curr_erasesize = subdev[i]->eraseregions[j].erasesize;
+ curr_erasesize =
+ subdev[i]->eraseregions[j].
+ erasesize;
++erase_region_p;
}
- position += subdev[i]->eraseregions[j].numblocks * curr_erasesize;
+ position +=
+ subdev[i]->eraseregions[j].
+ numblocks * curr_erasesize;
}
}
}
/* Now write the final entry */
- erase_region_p->offset = begin;
+ erase_region_p->offset = begin;
erase_region_p->erasesize = curr_erasesize;
erase_region_p->numblocks = (position - begin) / curr_erasesize;
}
@@ -650,23 +878,21 @@ struct mtd_info *mtd_concat_create(
return &concat->mtd;
}
-/*
+/*
* This function destroys an MTD object obtained from concat_mtd_devs()
*/
void mtd_concat_destroy(struct mtd_info *mtd)
{
struct mtd_concat *concat = CONCAT(mtd);
- if(concat->mtd.numeraseregions)
+ if (concat->mtd.numeraseregions)
kfree(concat->mtd.eraseregions);
kfree(concat);
}
-
EXPORT_SYMBOL(mtd_concat_create);
EXPORT_SYMBOL(mtd_concat_destroy);
-
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Robert Kaiser <rkaiser@sysgo.de>");
MODULE_DESCRIPTION("Generic support for concatenating of MTD devices");
diff --git a/linux-2.4.x/drivers/mtd/mtdcore.c b/linux-2.4.x/drivers/mtd/mtdcore.c
index d40f144..8fde482 100644
--- a/linux-2.4.x/drivers/mtd/mtdcore.c
+++ b/linux-2.4.x/drivers/mtd/mtdcore.c
@@ -1,5 +1,5 @@
/*
- * $Id: mtdcore.c,v 1.32 2002/03/07 18:38:10 joern Exp $
+ * $Id: mtdcore.c,v 1.49 2006/03/29 08:40:13 dwmw2 Exp $
*
* Core registration and callback routines for MTD
* drivers and users.
@@ -17,17 +17,21 @@
#include <linux/major.h>
#include <linux/fs.h>
#include <linux/ioctl.h>
-#include <stdarg.h>
+#include <linux/init.h>
#include <linux/mtd/compatmac.h>
-#ifdef CONFIG_PROC_FS
#include <linux/proc_fs.h>
-#endif
#include <linux/mtd/mtd.h>
-static DECLARE_MUTEX(mtd_table_mutex);
-static struct mtd_info *mtd_table[MAX_MTD_DEVICES];
-static struct mtd_notifier *mtd_notifiers = NULL;
+/* These are exported solely for the purpose of mtd_blkdevs.c. You
+ should not use them for _anything_ else */
+DEFINE_MUTEX(mtd_table_mutex);
+struct mtd_info *mtd_table[MAX_MTD_DEVICES];
+
+EXPORT_SYMBOL_GPL(mtd_table_mutex);
+EXPORT_SYMBOL_GPL(mtd_table);
+
+static LIST_HEAD(mtd_notifiers);
/**
* add_mtd_device - register an MTD device
@@ -43,27 +47,34 @@ int add_mtd_device(struct mtd_info *mtd)
{
int i;
- down(&mtd_table_mutex);
+ mutex_lock(&mtd_table_mutex);
- for (i=0; i< MAX_MTD_DEVICES; i++)
- if (!mtd_table[i])
- {
- struct mtd_notifier *not=mtd_notifiers;
+ for (i=0; i < MAX_MTD_DEVICES; i++)
+ if (!mtd_table[i]) {
+ struct list_head *this;
mtd_table[i] = mtd;
mtd->index = i;
+ mtd->usecount = 0;
+
DEBUG(0, "mtd: Giving out device %d to %s\n",i, mtd->name);
- while (not)
- {
- (*(not->add))(mtd);
- not = not->next;
+ /* No need to get a refcount on the module containing
+ the notifier, since we hold the mtd_table_mutex */
+ list_for_each(this, &mtd_notifiers) {
+ struct mtd_notifier *not = list_entry(this, struct mtd_notifier, list);
+ not->add(mtd);
}
- up(&mtd_table_mutex);
- MOD_INC_USE_COUNT;
+
+ mutex_unlock(&mtd_table_mutex);
+ /* We _know_ we aren't being removed, because
+ our caller is still holding us here. So none
+ of this try_ nonsense, and no bitching about it
+ either. :) */
+ __module_get(THIS_MODULE);
return 0;
}
-
- up(&mtd_table_mutex);
+
+ mutex_unlock(&mtd_table_mutex);
return 1;
}
@@ -79,29 +90,34 @@ int add_mtd_device(struct mtd_info *mtd)
int del_mtd_device (struct mtd_info *mtd)
{
- struct mtd_notifier *not=mtd_notifiers;
- int i;
-
- down(&mtd_table_mutex);
-
- for (i=0; i < MAX_MTD_DEVICES; i++)
- {
- if (mtd_table[i] == mtd)
- {
- while (not)
- {
- (*(not->remove))(mtd);
- not = not->next;
- }
- mtd_table[i] = NULL;
- up (&mtd_table_mutex);
- MOD_DEC_USE_COUNT;
- return 0;
+ int ret;
+
+ mutex_lock(&mtd_table_mutex);
+
+ if (mtd_table[mtd->index] != mtd) {
+ ret = -ENODEV;
+ } else if (mtd->usecount) {
+ printk(KERN_NOTICE "Removing MTD device #%d (%s) with use count %d\n",
+ mtd->index, mtd->name, mtd->usecount);
+ ret = -EBUSY;
+ } else {
+ struct list_head *this;
+
+ /* No need to get a refcount on the module containing
+ the notifier, since we hold the mtd_table_mutex */
+ list_for_each(this, &mtd_notifiers) {
+ struct mtd_notifier *not = list_entry(this, struct mtd_notifier, list);
+ not->remove(mtd);
}
+
+ mtd_table[mtd->index] = NULL;
+
+ module_put(THIS_MODULE);
+ ret = 0;
}
- up(&mtd_table_mutex);
- return 1;
+ mutex_unlock(&mtd_table_mutex);
+ return ret;
}
/**
@@ -117,23 +133,22 @@ void register_mtd_user (struct mtd_notifier *new)
{
int i;
- down(&mtd_table_mutex);
+ mutex_lock(&mtd_table_mutex);
- new->next = mtd_notifiers;
- mtd_notifiers = new;
+ list_add(&new->list, &mtd_notifiers);
+
+ __module_get(THIS_MODULE);
- MOD_INC_USE_COUNT;
-
for (i=0; i< MAX_MTD_DEVICES; i++)
if (mtd_table[i])
new->add(mtd_table[i]);
- up(&mtd_table_mutex);
+ mutex_unlock(&mtd_table_mutex);
}
/**
- * register_mtd_user - unregister a 'user' of MTD devices.
- * @new: pointer to notifier info structure
+ * unregister_mtd_user - unregister a 'user' of MTD devices.
+ * @old: pointer to notifier info structure
*
* Removes a callback function pair from the list of 'users' to be
* notified upon addition or removal of MTD devices. Causes the
@@ -143,34 +158,24 @@ void register_mtd_user (struct mtd_notifier *new)
int unregister_mtd_user (struct mtd_notifier *old)
{
- struct mtd_notifier **prev = &mtd_notifiers;
- struct mtd_notifier *cur;
int i;
- down(&mtd_table_mutex);
+ mutex_lock(&mtd_table_mutex);
- while ((cur = *prev)) {
- if (cur == old) {
- *prev = cur->next;
+ module_put(THIS_MODULE);
- MOD_DEC_USE_COUNT;
+ for (i=0; i< MAX_MTD_DEVICES; i++)
+ if (mtd_table[i])
+ old->remove(mtd_table[i]);
- for (i=0; i< MAX_MTD_DEVICES; i++)
- if (mtd_table[i])
- old->remove(mtd_table[i]);
-
- up(&mtd_table_mutex);
- return 0;
- }
- prev = &cur->next;
- }
- up(&mtd_table_mutex);
- return 1;
+ list_del(&old->list);
+ mutex_unlock(&mtd_table_mutex);
+ return 0;
}
/**
- * __get_mtd_device - obtain a validated handle for an MTD device
+ * get_mtd_device - obtain a validated handle for an MTD device
* @mtd: last known address of the required MTD device
* @num: internal device number of the required MTD device
*
@@ -178,16 +183,15 @@ int unregister_mtd_user (struct mtd_notifier *old)
* table, if any. Given an address and num == -1, search the device table
* for a device with that address and return if it's still present. Given
* both, return the num'th driver only if its address matches. Return NULL
- * if not. get_mtd_device() increases the use count, but
- * __get_mtd_device() doesn't - you should generally use get_mtd_device().
+ * if not.
*/
-
-struct mtd_info *__get_mtd_device(struct mtd_info *mtd, int num)
+
+struct mtd_info *get_mtd_device(struct mtd_info *mtd, int num)
{
struct mtd_info *ret = NULL;
int i;
- down(&mtd_table_mutex);
+ mutex_lock(&mtd_table_mutex);
if (num == -1) {
for (i=0; i< MAX_MTD_DEVICES; i++)
@@ -198,58 +202,104 @@ struct mtd_info *__get_mtd_device(struct mtd_info *mtd, int num)
if (mtd && mtd != ret)
ret = NULL;
}
-
- up(&mtd_table_mutex);
+
+ if (ret && !try_module_get(ret->owner))
+ ret = NULL;
+
+ if (ret)
+ ret->usecount++;
+
+ mutex_unlock(&mtd_table_mutex);
return ret;
}
-EXPORT_SYMBOL(add_mtd_device);
-EXPORT_SYMBOL(del_mtd_device);
-EXPORT_SYMBOL(__get_mtd_device);
-EXPORT_SYMBOL(register_mtd_user);
-EXPORT_SYMBOL(unregister_mtd_user);
-
-/*====================================================================*/
-/* Power management code */
+void put_mtd_device(struct mtd_info *mtd)
+{
+ int c;
-#ifdef CONFIG_PM
+ mutex_lock(&mtd_table_mutex);
+ c = --mtd->usecount;
+ mutex_unlock(&mtd_table_mutex);
+ BUG_ON(c < 0);
-#include <linux/pm.h>
+ module_put(mtd->owner);
+}
-static struct pm_dev *mtd_pm_dev = NULL;
+/* default_mtd_writev - default mtd writev method for MTD devices that
+ * dont implement their own
+ */
-static int mtd_pm_callback(struct pm_dev *dev, pm_request_t rqst, void *data)
+int default_mtd_writev(struct mtd_info *mtd, const struct kvec *vecs,
+ unsigned long count, loff_t to, size_t *retlen)
{
- int ret = 0, i;
-
- if (down_trylock(&mtd_table_mutex))
- return -EAGAIN;
- if (rqst == PM_SUSPEND) {
- for (i = 0; ret == 0 && i < MAX_MTD_DEVICES; i++) {
- if (mtd_table[i] && mtd_table[i]->suspend)
- ret = mtd_table[i]->suspend(mtd_table[i]);
+ unsigned long i;
+ size_t totlen = 0, thislen;
+ int ret = 0;
+
+ if(!mtd->write) {
+ ret = -EROFS;
+ } else {
+ for (i=0; i<count; i++) {
+ if (!vecs[i].iov_len)
+ continue;
+ ret = mtd->write(mtd, to, vecs[i].iov_len, &thislen, vecs[i].iov_base);
+ totlen += thislen;
+ if (ret || thislen != vecs[i].iov_len)
+ break;
+ to += vecs[i].iov_len;
}
- } else i = MAX_MTD_DEVICES-1;
+ }
+ if (retlen)
+ *retlen = totlen;
+ return ret;
+}
+
+
+/* default_mtd_readv - default mtd readv method for MTD devices that dont
+ * implement their own
+ */
- if (rqst == PM_RESUME || ret) {
- for ( ; i >= 0; i--) {
- if (mtd_table[i] && mtd_table[i]->resume)
- mtd_table[i]->resume(mtd_table[i]);
+int default_mtd_readv(struct mtd_info *mtd, struct kvec *vecs,
+ unsigned long count, loff_t from, size_t *retlen)
+{
+ unsigned long i;
+ size_t totlen = 0, thislen;
+ int ret = 0;
+
+ if(!mtd->read) {
+ ret = -EIO;
+ } else {
+ for (i=0; i<count; i++) {
+ if (!vecs[i].iov_len)
+ continue;
+ ret = mtd->read(mtd, from, vecs[i].iov_len, &thislen, vecs[i].iov_base);
+ totlen += thislen;
+ if (ret || thislen != vecs[i].iov_len)
+ break;
+ from += vecs[i].iov_len;
}
}
- up(&mtd_table_mutex);
+ if (retlen)
+ *retlen = totlen;
return ret;
}
-#endif
-/*====================================================================*/
-/* Support for /proc/mtd */
+
+EXPORT_SYMBOL(add_mtd_device);
+EXPORT_SYMBOL(del_mtd_device);
+EXPORT_SYMBOL(get_mtd_device);
+EXPORT_SYMBOL(put_mtd_device);
+EXPORT_SYMBOL(register_mtd_user);
+EXPORT_SYMBOL(unregister_mtd_user);
+EXPORT_SYMBOL(default_mtd_writev);
+EXPORT_SYMBOL(default_mtd_readv);
#ifdef CONFIG_PROC_FS
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,2,0)
+/*====================================================================*/
+/* Support for /proc/mtd */
+
static struct proc_dir_entry *proc_mtd;
-#endif
static inline int mtd_proc_info (char *buf, int i)
{
@@ -262,18 +312,13 @@ static inline int mtd_proc_info (char *buf, int i)
this->erasesize, this->name);
}
-static int mtd_read_proc ( char *page, char **start, off_t off,int count
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,2,0)
- ,int *eof, void *data_unused
-#else
- ,int unused
-#endif
- )
+static int mtd_read_proc (char *page, char **start, off_t off, int count,
+ int *eof, void *data_unused)
{
int len, l, i;
off_t begin = 0;
- down(&mtd_table_mutex);
+ mutex_lock(&mtd_table_mutex);
len = sprintf(page, "dev: size erasesize name\n");
for (i=0; i< MAX_MTD_DEVICES; i++) {
@@ -288,78 +333,37 @@ static int mtd_read_proc ( char *page, char **start, off_t off,int count
}
}
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,2,0)
*eof = 1;
-#endif
done:
- up(&mtd_table_mutex);
+ mutex_unlock(&mtd_table_mutex);
if (off >= len+begin)
return 0;
*start = page + (off-begin);
return ((count < begin+len-off) ? count : begin+len-off);
}
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,2,0)
-struct proc_dir_entry mtd_proc_entry = {
- 0, /* low_ino: the inode -- dynamic */
- 3, "mtd", /* len of name and name */
- S_IFREG | S_IRUGO, /* mode */
- 1, 0, 0, /* nlinks, owner, group */
- 0, NULL, /* size - unused; operations -- use default */
- &mtd_read_proc, /* function used to read data */
- /* nothing more */
- };
-#endif
-
-#endif /* CONFIG_PROC_FS */
-
/*====================================================================*/
/* Init code */
-int __init init_mtd(void)
+static int __init init_mtd(void)
{
-#ifdef CONFIG_PROC_FS
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,2,0)
- if ((proc_mtd = create_proc_entry( "mtd", 0, 0 )))
- proc_mtd->read_proc = mtd_read_proc;
-#else
- proc_register_dynamic(&proc_root,&mtd_proc_entry);
-#endif
-#endif
-
-#if LINUX_VERSION_CODE < 0x20212
- init_mtd_devices();
-#endif
-
-#ifdef CONFIG_PM
- mtd_pm_dev = pm_register(PM_UNKNOWN_DEV, 0, mtd_pm_callback);
-#endif
+ if ((proc_mtd = create_proc_entry( "mtd", 0, NULL )))
+ proc_mtd->read_proc = mtd_read_proc;
return 0;
}
static void __exit cleanup_mtd(void)
{
-#ifdef CONFIG_PM
- if (mtd_pm_dev) {
- pm_unregister(mtd_pm_dev);
- mtd_pm_dev = NULL;
- }
-#endif
-
-#ifdef CONFIG_PROC_FS
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,2,0)
if (proc_mtd)
- remove_proc_entry( "mtd", 0);
-#else
- proc_unregister(&proc_root,mtd_proc_entry.low_ino);
-#endif
-#endif
+ remove_proc_entry( "mtd", NULL);
}
module_init(init_mtd);
module_exit(cleanup_mtd);
+#endif /* CONFIG_PROC_FS */
+
MODULE_LICENSE("GPL");
MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
diff --git a/linux-2.4.x/drivers/mtd/mtdpart.c b/linux-2.4.x/drivers/mtd/mtdpart.c
index 2453697..3306566 100644
--- a/linux-2.4.x/drivers/mtd/mtdpart.c
+++ b/linux-2.4.x/drivers/mtd/mtdpart.c
@@ -5,22 +5,22 @@
*
* This code is GPL
*
- * $Id: mtdpart.c,v 1.27 2002/03/08 16:34:35 rkaiser Exp $
- * - with protection register access removed until that code is merged in 2.4.
+ * $Id: mtdpart.c,v 1.56 2005/11/29 16:00:23 vwool Exp $
*
* 02-21-2002 Thomas Gleixner <gleixner@autronix.de>
* added support for read_oob, write_oob
- */
+ */
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/list.h>
-
+#include <linux/config.h>
+#include <linux/kmod.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
-
+#include <linux/mtd/compatmac.h>
/* Our partition linked list */
static LIST_HEAD(mtd_partitions);
@@ -41,13 +41,13 @@ struct mtd_part {
*/
#define PART(x) ((struct mtd_part *)(x))
-
-/*
+
+/*
* MTD methods which simply translate the effective address and pass through
* to the _real_ device.
*/
-static int part_read (struct mtd_info *mtd, loff_t from, size_t len,
+static int part_read (struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, u_char *buf)
{
struct mtd_part *part = PART(mtd);
@@ -55,11 +55,48 @@ static int part_read (struct mtd_info *mtd, loff_t from, size_t len,
len = 0;
else if (from + len > mtd->size)
len = mtd->size - from;
- return part->master->read (part->master, from + part->offset,
+ if (part->master->read_ecc == NULL)
+ return part->master->read (part->master, from + part->offset,
len, retlen, buf);
+ else
+ return part->master->read_ecc (part->master, from + part->offset,
+ len, retlen, buf, NULL, &mtd->oobinfo);
+}
+
+static int part_point (struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, u_char **buf)
+{
+ struct mtd_part *part = PART(mtd);
+ if (from >= mtd->size)
+ len = 0;
+ else if (from + len > mtd->size)
+ len = mtd->size - from;
+ return part->master->point (part->master, from + part->offset,
+ len, retlen, buf);
+}
+static void part_unpoint (struct mtd_info *mtd, u_char *addr, loff_t from, size_t len)
+{
+ struct mtd_part *part = PART(mtd);
+
+ part->master->unpoint (part->master, addr, from + part->offset, len);
+}
+
+
+static int part_read_ecc (struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, u_char *buf, u_char *eccbuf, struct nand_oobinfo *oobsel)
+{
+ struct mtd_part *part = PART(mtd);
+ if (oobsel == NULL)
+ oobsel = &mtd->oobinfo;
+ if (from >= mtd->size)
+ len = 0;
+ else if (from + len > mtd->size)
+ len = mtd->size - from;
+ return part->master->read_ecc (part->master, from + part->offset,
+ len, retlen, buf, eccbuf, oobsel);
}
-static int part_read_oob (struct mtd_info *mtd, loff_t from, size_t len,
+static int part_read_oob (struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, u_char *buf)
{
struct mtd_part *part = PART(mtd);
@@ -67,10 +104,40 @@ static int part_read_oob (struct mtd_info *mtd, loff_t from, size_t len,
len = 0;
else if (from + len > mtd->size)
len = mtd->size - from;
- return part->master->read_oob (part->master, from + part->offset,
+ return part->master->read_oob (part->master, from + part->offset,
len, retlen, buf);
}
+static int part_read_user_prot_reg (struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, u_char *buf)
+{
+ struct mtd_part *part = PART(mtd);
+ return part->master->read_user_prot_reg (part->master, from,
+ len, retlen, buf);
+}
+
+static int part_get_user_prot_info (struct mtd_info *mtd,
+ struct otp_info *buf, size_t len)
+{
+ struct mtd_part *part = PART(mtd);
+ return part->master->get_user_prot_info (part->master, buf, len);
+}
+
+static int part_read_fact_prot_reg (struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, u_char *buf)
+{
+ struct mtd_part *part = PART(mtd);
+ return part->master->read_fact_prot_reg (part->master, from,
+ len, retlen, buf);
+}
+
+static int part_get_fact_prot_info (struct mtd_info *mtd,
+ struct otp_info *buf, size_t len)
+{
+ struct mtd_part *part = PART(mtd);
+ return part->master->get_fact_prot_info (part->master, buf, len);
+}
+
static int part_write (struct mtd_info *mtd, loff_t to, size_t len,
size_t *retlen, const u_char *buf)
{
@@ -81,20 +148,30 @@ static int part_write (struct mtd_info *mtd, loff_t to, size_t len,
len = 0;
else if (to + len > mtd->size)
len = mtd->size - to;
- return part->master->write (part->master, to + part->offset,
+ if (part->master->write_ecc == NULL)
+ return part->master->write (part->master, to + part->offset,
len, retlen, buf);
+ else
+ return part->master->write_ecc (part->master, to + part->offset,
+ len, retlen, buf, NULL, &mtd->oobinfo);
+
}
-static int part_point (struct mtd_info *mtd, loff_t off, size_t len,
- size_t *retlen, u_char **buf)
+static int part_write_ecc (struct mtd_info *mtd, loff_t to, size_t len,
+ size_t *retlen, const u_char *buf,
+ u_char *eccbuf, struct nand_oobinfo *oobsel)
{
struct mtd_part *part = PART(mtd);
- if (off >= mtd->size)
+ if (!(mtd->flags & MTD_WRITEABLE))
+ return -EROFS;
+ if (oobsel == NULL)
+ oobsel = &mtd->oobinfo;
+ if (to >= mtd->size)
len = 0;
- else if (off + len > mtd->size)
- len = mtd->size - off;
- return part->master->point (part->master, off + part->offset, len,
- retlen, buf);
+ else if (to + len > mtd->size)
+ len = mtd->size - to;
+ return part->master->write_ecc (part->master, to + part->offset,
+ len, retlen, buf, eccbuf, oobsel);
}
static int part_write_oob (struct mtd_info *mtd, loff_t to, size_t len,
@@ -107,43 +184,109 @@ static int part_write_oob (struct mtd_info *mtd, loff_t to, size_t len,
len = 0;
else if (to + len > mtd->size)
len = mtd->size - to;
- return part->master->write_oob (part->master, to + part->offset,
+ return part->master->write_oob (part->master, to + part->offset,
len, retlen, buf);
}
-static int part_writev (struct mtd_info *mtd, const struct iovec *vecs,
+static int part_write_user_prot_reg (struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, u_char *buf)
+{
+ struct mtd_part *part = PART(mtd);
+ return part->master->write_user_prot_reg (part->master, from,
+ len, retlen, buf);
+}
+
+static int part_lock_user_prot_reg (struct mtd_info *mtd, loff_t from, size_t len)
+{
+ struct mtd_part *part = PART(mtd);
+ return part->master->lock_user_prot_reg (part->master, from, len);
+}
+
+static int part_writev (struct mtd_info *mtd, const struct kvec *vecs,
unsigned long count, loff_t to, size_t *retlen)
{
struct mtd_part *part = PART(mtd);
if (!(mtd->flags & MTD_WRITEABLE))
return -EROFS;
- return part->master->writev (part->master, vecs, count,
+ if (part->master->writev_ecc == NULL)
+ return part->master->writev (part->master, vecs, count,
to + part->offset, retlen);
+ else
+ return part->master->writev_ecc (part->master, vecs, count,
+ to + part->offset, retlen,
+ NULL, &mtd->oobinfo);
}
-static int part_readv (struct mtd_info *mtd, struct iovec *vecs,
+static int part_readv (struct mtd_info *mtd, struct kvec *vecs,
unsigned long count, loff_t from, size_t *retlen)
{
struct mtd_part *part = PART(mtd);
- return part->master->readv (part->master, vecs, count,
+ if (part->master->readv_ecc == NULL)
+ return part->master->readv (part->master, vecs, count,
from + part->offset, retlen);
+ else
+ return part->master->readv_ecc (part->master, vecs, count,
+ from + part->offset, retlen,
+ NULL, &mtd->oobinfo);
+}
+
+static int part_writev_ecc (struct mtd_info *mtd, const struct kvec *vecs,
+ unsigned long count, loff_t to, size_t *retlen,
+ u_char *eccbuf, struct nand_oobinfo *oobsel)
+{
+ struct mtd_part *part = PART(mtd);
+ if (!(mtd->flags & MTD_WRITEABLE))
+ return -EROFS;
+ if (oobsel == NULL)
+ oobsel = &mtd->oobinfo;
+ return part->master->writev_ecc (part->master, vecs, count,
+ to + part->offset, retlen,
+ eccbuf, oobsel);
+}
+
+static int part_readv_ecc (struct mtd_info *mtd, struct kvec *vecs,
+ unsigned long count, loff_t from, size_t *retlen,
+ u_char *eccbuf, struct nand_oobinfo *oobsel)
+{
+ struct mtd_part *part = PART(mtd);
+ if (oobsel == NULL)
+ oobsel = &mtd->oobinfo;
+ return part->master->readv_ecc (part->master, vecs, count,
+ from + part->offset, retlen,
+ eccbuf, oobsel);
}
static int part_erase (struct mtd_info *mtd, struct erase_info *instr)
{
struct mtd_part *part = PART(mtd);
+ int ret;
if (!(mtd->flags & MTD_WRITEABLE))
return -EROFS;
if (instr->addr >= mtd->size)
return -EINVAL;
instr->addr += part->offset;
- return part->master->erase(part->master, instr);
+ ret = part->master->erase(part->master, instr);
+ return ret;
+}
+
+void mtd_erase_callback(struct erase_info *instr)
+{
+ if (instr->mtd->erase == part_erase) {
+ struct mtd_part *part = PART(instr->mtd);
+
+ if (instr->fail_addr != 0xffffffff)
+ instr->fail_addr -= part->offset;
+ instr->addr -= part->offset;
+ }
+ if (instr->callback)
+ instr->callback(instr);
}
+EXPORT_SYMBOL_GPL(mtd_erase_callback);
static int part_lock (struct mtd_info *mtd, loff_t ofs, size_t len)
{
struct mtd_part *part = PART(mtd);
- if ((len + ofs) > mtd->size)
+ if ((len + ofs) > mtd->size)
return -EINVAL;
return part->master->lock(part->master, ofs + part->offset, len);
}
@@ -151,7 +294,7 @@ static int part_lock (struct mtd_info *mtd, loff_t ofs, size_t len)
static int part_unlock (struct mtd_info *mtd, loff_t ofs, size_t len)
{
struct mtd_part *part = PART(mtd);
- if ((len + ofs) > mtd->size)
+ if ((len + ofs) > mtd->size)
return -EINVAL;
return part->master->unlock(part->master, ofs + part->offset, len);
}
@@ -174,8 +317,28 @@ static void part_resume(struct mtd_info *mtd)
part->master->resume(part->master);
}
-/*
- * This function unregisters and destroy all slave MTD objects which are
+static int part_block_isbad (struct mtd_info *mtd, loff_t ofs)
+{
+ struct mtd_part *part = PART(mtd);
+ if (ofs >= mtd->size)
+ return -EINVAL;
+ ofs += part->offset;
+ return part->master->block_isbad(part->master, ofs);
+}
+
+static int part_block_markbad (struct mtd_info *mtd, loff_t ofs)
+{
+ struct mtd_part *part = PART(mtd);
+ if (!(mtd->flags & MTD_WRITEABLE))
+ return -EROFS;
+ if (ofs >= mtd->size)
+ return -EINVAL;
+ ofs += part->offset;
+ return part->master->block_markbad(part->master, ofs);
+}
+
+/*
+ * This function unregisters and destroy all slave MTD objects which are
* attached to the given master MTD object.
*/
@@ -208,8 +371,8 @@ int del_mtd_partitions(struct mtd_info *master)
* (Q: should we register the master MTD object as well?)
*/
-int add_mtd_partitions(struct mtd_info *master,
- struct mtd_partition *parts,
+int add_mtd_partitions(struct mtd_info *master,
+ const struct mtd_partition *parts,
int nbparts)
{
struct mtd_part *slave;
@@ -237,38 +400,64 @@ int add_mtd_partitions(struct mtd_info *master,
slave->mtd.size = parts[i].size;
slave->mtd.oobblock = master->oobblock;
slave->mtd.oobsize = master->oobsize;
+ slave->mtd.oobavail = master->oobavail;
slave->mtd.ecctype = master->ecctype;
slave->mtd.eccsize = master->eccsize;
slave->mtd.name = parts[i].name;
slave->mtd.bank_size = master->bank_size;
- slave->mtd.module = master->module;
+ slave->mtd.owner = master->owner;
slave->mtd.read = part_read;
slave->mtd.write = part_write;
+ if(master->point && master->unpoint){
+ slave->mtd.point = part_point;
+ slave->mtd.unpoint = part_unpoint;
+ }
+
+ if (master->read_ecc)
+ slave->mtd.read_ecc = part_read_ecc;
+ if (master->write_ecc)
+ slave->mtd.write_ecc = part_write_ecc;
if (master->read_oob)
slave->mtd.read_oob = part_read_oob;
if (master->write_oob)
slave->mtd.write_oob = part_write_oob;
+ if(master->read_user_prot_reg)
+ slave->mtd.read_user_prot_reg = part_read_user_prot_reg;
+ if(master->read_fact_prot_reg)
+ slave->mtd.read_fact_prot_reg = part_read_fact_prot_reg;
+ if(master->write_user_prot_reg)
+ slave->mtd.write_user_prot_reg = part_write_user_prot_reg;
+ if(master->lock_user_prot_reg)
+ slave->mtd.lock_user_prot_reg = part_lock_user_prot_reg;
+ if(master->get_user_prot_info)
+ slave->mtd.get_user_prot_info = part_get_user_prot_info;
+ if(master->get_fact_prot_info)
+ slave->mtd.get_fact_prot_info = part_get_fact_prot_info;
if (master->sync)
slave->mtd.sync = part_sync;
if (!i && master->suspend && master->resume) {
slave->mtd.suspend = part_suspend;
slave->mtd.resume = part_resume;
}
-
- if (master->point)
- slave->mtd.point = part_point;
-
if (master->writev)
slave->mtd.writev = part_writev;
if (master->readv)
slave->mtd.readv = part_readv;
+ if (master->writev_ecc)
+ slave->mtd.writev_ecc = part_writev_ecc;
+ if (master->readv_ecc)
+ slave->mtd.readv_ecc = part_readv_ecc;
if (master->lock)
slave->mtd.lock = part_lock;
if (master->unlock)
slave->mtd.unlock = part_unlock;
+ if (master->block_isbad)
+ slave->mtd.block_isbad = part_block_isbad;
+ if (master->block_markbad)
+ slave->mtd.block_markbad = part_block_markbad;
slave->mtd.erase = part_erase;
slave->master = master;
slave->offset = parts[i].offset;
@@ -277,9 +466,10 @@ int add_mtd_partitions(struct mtd_info *master,
if (slave->offset == MTDPART_OFS_APPEND)
slave->offset = cur_offset;
if (slave->offset == MTDPART_OFS_NXTBLK) {
- u_int32_t emask = master->erasesize-1;
- slave->offset = (cur_offset + emask) & ~emask;
- if (slave->offset != cur_offset) {
+ slave->offset = cur_offset;
+ if ((cur_offset % master->erasesize) != 0) {
+ /* Round up to next erasesize */
+ slave->offset = ((cur_offset / master->erasesize) + 1) * master->erasesize;
printk(KERN_NOTICE "Moving partition %d: "
"0x%08x -> 0x%08x\n", i,
cur_offset, slave->offset);
@@ -288,8 +478,8 @@ int add_mtd_partitions(struct mtd_info *master,
if (slave->mtd.size == MTDPART_SIZ_FULL)
slave->mtd.size = master->size - slave->offset;
cur_offset = slave->offset + slave->mtd.size;
-
- printk (KERN_NOTICE "0x%08x-0x%08x : \"%s\"\n", slave->offset,
+
+ printk (KERN_NOTICE "0x%08x-0x%08x : \"%s\"\n", slave->offset,
slave->offset + slave->mtd.size, slave->mtd.name);
/* let's do some sanity checks */
@@ -309,7 +499,7 @@ int add_mtd_partitions(struct mtd_info *master,
/* Deal with variable erase size stuff */
int i;
struct mtd_erase_region_info *regions = master->eraseregions;
-
+
/* Find the first erase regions which is part of this partition. */
for (i=0; i < master->numeraseregions && slave->offset >= regions[i].offset; i++)
;
@@ -324,7 +514,7 @@ int add_mtd_partitions(struct mtd_info *master,
slave->mtd.erasesize = master->erasesize;
}
- if ((slave->mtd.flags & MTD_WRITEABLE) &&
+ if ((slave->mtd.flags & MTD_WRITEABLE) &&
(slave->offset % slave->mtd.erasesize)) {
/* Doesn't start on a boundary of major erase size */
/* FIXME: Let it be writable if it is on a boundary of _minor_ erase size though */
@@ -332,13 +522,16 @@ int add_mtd_partitions(struct mtd_info *master,
printk ("mtd: partition \"%s\" doesn't start on an erase block boundary -- force read-only\n",
parts[i].name);
}
- if ((slave->mtd.flags & MTD_WRITEABLE) &&
+ if ((slave->mtd.flags & MTD_WRITEABLE) &&
(slave->mtd.size % slave->mtd.erasesize)) {
slave->mtd.flags &= ~MTD_WRITEABLE;
printk ("mtd: partition \"%s\" doesn't end on an erase block -- force read-only\n",
parts[i].name);
}
+ /* copy oobinfo from master */
+ memcpy(&slave->mtd.oobinfo, &master->oobinfo, sizeof(slave->mtd.oobinfo));
+
if(parts[i].mtdp)
{ /* store the object pointer (caller may or may not register it */
*parts[i].mtdp = &slave->mtd;
@@ -358,6 +551,75 @@ int add_mtd_partitions(struct mtd_info *master,
EXPORT_SYMBOL(add_mtd_partitions);
EXPORT_SYMBOL(del_mtd_partitions);
+static DEFINE_SPINLOCK(part_parser_lock);
+static LIST_HEAD(part_parsers);
+
+static struct mtd_part_parser *get_partition_parser(const char *name)
+{
+ struct list_head *this;
+ void *ret = NULL;
+ spin_lock(&part_parser_lock);
+
+ list_for_each(this, &part_parsers) {
+ struct mtd_part_parser *p = list_entry(this, struct mtd_part_parser, list);
+
+ if (!strcmp(p->name, name) && try_module_get(p->owner)) {
+ ret = p;
+ break;
+ }
+ }
+ spin_unlock(&part_parser_lock);
+
+ return ret;
+}
+
+int register_mtd_parser(struct mtd_part_parser *p)
+{
+ spin_lock(&part_parser_lock);
+ list_add(&p->list, &part_parsers);
+ spin_unlock(&part_parser_lock);
+
+ return 0;
+}
+
+int deregister_mtd_parser(struct mtd_part_parser *p)
+{
+ spin_lock(&part_parser_lock);
+ list_del(&p->list);
+ spin_unlock(&part_parser_lock);
+ return 0;
+}
+
+int parse_mtd_partitions(struct mtd_info *master, const char **types,
+ struct mtd_partition **pparts, unsigned long origin)
+{
+ struct mtd_part_parser *parser;
+ int ret = 0;
+
+ for ( ; ret <= 0 && *types; types++) {
+ parser = get_partition_parser(*types);
+#ifdef CONFIG_KMOD
+ if (!parser && !request_module("%s", *types))
+ parser = get_partition_parser(*types);
+#endif
+ if (!parser) {
+ printk(KERN_NOTICE "%s partition parsing not available\n",
+ *types);
+ continue;
+ }
+ ret = (*parser->parse_fn)(master, pparts, origin);
+ if (ret > 0) {
+ printk(KERN_NOTICE "%d %s partitions found on MTD device %s\n",
+ ret, parser->name, master->name);
+ }
+ put_partition_parser(parser);
+ }
+ return ret;
+}
+
+EXPORT_SYMBOL_GPL(parse_mtd_partitions);
+EXPORT_SYMBOL_GPL(register_mtd_parser);
+EXPORT_SYMBOL_GPL(deregister_mtd_parser);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Nicolas Pitre <nico@cam.org>");
diff --git a/linux-2.4.x/drivers/mtd/nand/Config.in b/linux-2.4.x/drivers/mtd/nand/Config.in
index 29d7e58..fb3771e 100644
--- a/linux-2.4.x/drivers/mtd/nand/Config.in
+++ b/linux-2.4.x/drivers/mtd/nand/Config.in
@@ -1,6 +1,6 @@
# drivers/mtd/nand/Config.in
-# $Id: Config.in,v 1.4 2001/09/19 09:35:23 dwmw2 Exp $
+# $Id: Config.in,v 1.24 2005/01/17 18:25:19 dmarlin Exp $
mainmenu_option next_comment
@@ -8,11 +8,59 @@ comment 'NAND Flash Device Drivers'
dep_tristate ' NAND Device Support' CONFIG_MTD_NAND $CONFIG_MTD
if [ "$CONFIG_MTD_NAND" = "y" -o "$CONFIG_MTD_NAND" = "m" ]; then
- bool ' Enable ECC correction algorithm' CONFIG_MTD_NAND_ECC
bool ' Verify NAND page writes' CONFIG_MTD_NAND_VERIFY_WRITE
fi
-if [ "$CONFIG_ARM" = "y" -a "$CONFIG_ARCH_P720T" = "y" ]; then
- dep_tristate ' NAND Flash device on SPIA board' CONFIG_MTD_NAND_SPIA $CONFIG_MTD_NAND
+
+if [ "$CONFIG_ARM" = "y" ]; then
+ dep_tristate ' NAND Flash device on SPIA board' CONFIG_MTD_NAND_SPIA $CONFIG_MTD_NAND $CONFIG_ARCH_P720T
+ dep_tristate ' NAND Flash device on TOTO board' CONFIG_MTD_NAND_TOTO $CONFIG_MTD_NAND $CONFIG_ARCH_OMAP
+ dep_tristate ' SmartMedia Card on AUTCPU12 board' CONFIG_MTD_NAND_AUTCPU12 $CONFIG_MTD_NAND $CONFIG_ARCH_AUTCPU12
+ dep_tristate ' NAND Flash device on EDP7312 board' CONFIG_MTD_NAND_EDB7312 $CONFIG_MTD_NAND $CONFIG_ARCH_EDB7312
+fi
+
+if [ "$CONFIG_MTD_DOC2001PLUS" = "y" -o "$CONFIG_MTD_DOC2001" = "y" -o "$CONFIG_MTD_DOC2000" = "y" -o "$CONFIG_MTD_NAND" = "y" ]; then
+ define_bool CONFIG_MTD_NAND_IDS y
+else
+ if [ "$CONFIG_MTD_DOC2001PLUS" = "m" -o "$CONFIG_MTD_DOC2001" = "m" -o "$CONFIG_MTD_DOC2000" = "m" -o "$CONFIG_MTD_NAND" = "m" ]; then
+ define_bool CONFIG_MTD_NAND_IDS m
+ fi
+fi
+
+if [ "$CONFIG_TOSHIBA_RBTX4925" = "y" ]; then
+ dep_tristate ' SmartMedia Card on Toshiba RBTX4925 reference board' CONFIG_MTD_NAND_TX4925NDFMC $CONFIG_MTD_NAND $CONFIG_TOSHIBA_RBTX4925_MPLEX_NAND
+fi
+
+if [ "$CONFIG_TOSHIBA_RBTX4938" = "y" ]; then
+ dep_tristate ' NAND Flash device on Toshiba RBTX4938 reference board' CONFIG_MTD_NAND_TX4938NDFMC $CONFIG_MTD_NAND $CONFIG_TOSHIBA_RBTX4938_MPLEX_NAND
+fi
+
+if [ "$CONFIG_PPCHAMELEONEVB" = "y" ]; then
+ dep_tristate ' NAND Flash device on PPChameleonEVB board' CONFIG_MTD_NAND_PPCHAMELEONEVB $CONFIG_MTD_NAND
+fi
+
+if [ "$CONFIG_SOC_AU1550" = "y" ]; then
+ dep_tristate ' NAND Flash Driver for Au1550 controller' CONFIG_MTD_NAND_AU1550 $CONFIG_MTD_NAND
+fi
+
+if [ "$CONFIG_SH_SOLUTION_ENGINE" = "y" ]; then
+ dep_tristate ' Renesas Flash ROM 4-slot interface board (FROM_BOARD4)' CONFIG_MTD_NAND_RTC_FROM4 $CONFIG_MTD_NAND
+ if [ "$CONFIG_MTD_NAND_RTC_FROM4" = "y" ]; then
+ define_bool CONFIG_REED_SOLOMON y
+ define_bool CONFIG_REED_SOLOMON_DEC8 y
+ else
+ if [ "$CONFIG_MTD_NAND_RTC_FROM4" = "m" ]; then
+ define_bool CONFIG_REED_SOLOMON m
+ define_bool CONFIG_REED_SOLOMON_DEC8 m
+ fi
+ fi
+fi
+
+dep_tristate ' DiskOnChip 2000, Millennium and Millennium Plus (NAND reimplementation) (EXPERIMENTAL)' CONFIG_MTD_NAND_DISKONCHIP $CONFIG_MTD_NAND $CONFIG_EXPERIMENTAL
+if [ "$CONFIG_MTD_NAND_DISKONCHIP" = "y" -o "$CONFIG_MTD_NAND_DISKONCHIP" = "m" ]; then
+ bool ' Advanced detection options for DiskOnChip' CONFIG_MTD_NAND_DISKONCHIP_PROBE_ADVANCED
+ hex ' Physical address of DiskOnChip' CONFIG_MTD_NAND_DISKONCHIP_PROBE_ADDRESS
+ bool ' Probe high addresses' CONFIG_MTD_NAND_DISKONCHIP_PROBE_HIGH $CONFIG_MTD_NAND_DISKONCHIP_PROBE_ADVANCED
+ bool ' Allow BBT write on DiskOnChip Millennium and 2000TSOP' CONFIG_MTD_NAND_DISKONCHIP_BBTWRITE
fi
endmenu
diff --git a/linux-2.4.x/drivers/mtd/nand/Makefile b/linux-2.4.x/drivers/mtd/nand/Makefile
index 87de013..b390d5f 100644
--- a/linux-2.4.x/drivers/mtd/nand/Makefile
+++ b/linux-2.4.x/drivers/mtd/nand/Makefile
@@ -1,16 +1,16 @@
#
-# linux/drivers/nand/Makefile
+# linux/drivers/nand/Makefile.24
+# Makefile for obsolete kernels.
#
-# $Id: Makefile,v 1.5 2001/09/19 22:39:59 dwmw2 Exp $
+# $Id: Makefile.24,v 1.1 2004/07/12 16:08:17 dwmw2 Exp $
O_TARGET := nandlink.o
+export-objs := nand_base.o nand_bbt.o nand_ecc.o nand_ids.o
+list-multi := nand.o
-export-objs := nand.o nand_ecc.o
-
-nandobjs-y := nand.o
-nandobjs-$(CONFIG_MTD_NAND_ECC) += nand_ecc.o
-
-obj-$(CONFIG_MTD_NAND) += $(nandobjs-y)
-obj-$(CONFIG_MTD_NAND_SPIA) += spia.o
+include Makefile.common
include $(TOPDIR)/Rules.make
+
+nand.o: $(nand-objs)
+ $(LD) -r -o $@ $(nand-objs)
diff --git a/linux-2.4.x/drivers/mtd/nand/Makefile.common b/linux-2.4.x/drivers/mtd/nand/Makefile.common
new file mode 100644
index 0000000..0375ac1
--- /dev/null
+++ b/linux-2.4.x/drivers/mtd/nand/Makefile.common
@@ -0,0 +1,23 @@
+#
+# linux/drivers/nand/Makefile
+#
+# $Id: Makefile.common,v 1.17 2006/02/21 09:29:24 pavlov Exp $
+
+obj-$(CONFIG_MTD_NAND) += nand.o nand_ecc.o
+obj-$(CONFIG_MTD_NAND_IDS) += nand_ids.o
+
+obj-$(CONFIG_MTD_NAND_SPIA) += spia.o
+obj-$(CONFIG_MTD_NAND_TOTO) += toto.o
+obj-$(CONFIG_MTD_NAND_AUTCPU12) += autcpu12.o
+obj-$(CONFIG_MTD_NAND_EDB7312) += edb7312.o
+obj-$(CONFIG_MTD_NAND_AU1550) += au1550nd.o
+obj-$(CONFIG_MTD_NAND_PPCHAMELEONEVB) += ppchameleonevb.o
+obj-$(CONFIG_MTD_NAND_S3C2410) += s3c2410.o
+obj-$(CONFIG_MTD_NAND_DISKONCHIP) += diskonchip.o
+obj-$(CONFIG_MTD_NAND_H1900) += h1910.o
+obj-$(CONFIG_MTD_NAND_RTC_FROM4) += rtc_from4.o
+obj-$(CONFIG_MTD_NAND_SHARPSL) += sharpsl.o
+obj-$(CONFIG_MTD_NAND_NANDSIM) += nandsim.o
+obj-$(CONFIG_MTD_NAND_AT91) += at91_nand.o
+
+nand-objs = nand_base.o nand_bbt.o
diff --git a/linux-2.4.x/drivers/mtd/nand/at91_nand.c b/linux-2.4.x/drivers/mtd/nand/at91_nand.c
new file mode 100644
index 0000000..b134ebd
--- /dev/null
+++ b/linux-2.4.x/drivers/mtd/nand/at91_nand.c
@@ -0,0 +1,246 @@
+/*
+ * drivers/mtd/nand/at91_nand.c
+ *
+ * Copyright (C) 2003 Rick Bronson
+ *
+ * Derived from drivers/mtd/nand/autcpu12.c
+ * Copyright (c) 2001 Thomas Gleixner (gleixner@autronix.de)
+ *
+ * Derived from drivers/mtd/spia.c
+ * Copyright (C) 2000 Steven J. Hill (sjhill@cotw.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/partitions.h>
+
+#include <asm/io.h>
+#include <asm/sizes.h>
+
+#include <asm/arch/hardware.h>
+#include <asm/arch/board.h>
+#include <asm/arch/gpio.h>
+
+struct at91_nand_host {
+ struct nand_chip nand_chip;
+ struct mtd_info mtd;
+ void __iomem *io_base;
+ struct at91_nand_data *board;
+};
+
+/*
+ * Hardware specific access to control-lines
+ */
+static void at91_nand_hwcontrol(struct mtd_info *mtd, int cmd)
+{
+ struct nand_chip *nand_chip = mtd->priv;
+ struct at91_nand_host *host = nand_chip->priv;
+
+ switch(cmd) {
+ case NAND_CTL_SETCLE:
+ nand_chip->IO_ADDR_W = host->io_base + (1 << host->board->cle);
+ break;
+ case NAND_CTL_CLRCLE:
+ nand_chip->IO_ADDR_W = host->io_base;
+ break;
+ case NAND_CTL_SETALE:
+ nand_chip->IO_ADDR_W = host->io_base + (1 << host->board->ale);
+ break;
+ case NAND_CTL_CLRALE:
+ nand_chip->IO_ADDR_W = host->io_base;
+ break;
+ case NAND_CTL_SETNCE:
+ break;
+ case NAND_CTL_CLRNCE:
+ break;
+ }
+}
+
+
+/*
+ * Read the Device Ready pin.
+ */
+static int at91_nand_device_ready(struct mtd_info *mtd)
+{
+ struct nand_chip *nand_chip = mtd->priv;
+ struct at91_nand_host *host = nand_chip->priv;
+
+ return at91_get_gpio_value(host->board->rdy_pin);
+}
+
+
+/*
+ * Enable NAND and detect card.
+ */
+static void at91_nand_enable(struct at91_nand_host *host)
+{
+ unsigned int csa;
+
+ /* Setup Smart Media, first enable the address range of CS3 */
+ csa = at91_sys_read(AT91_EBI_CSA);
+ at91_sys_write(AT91_EBI_CSA, csa | AT91_EBI_CS3A_SMC_SMARTMEDIA);
+
+ /* set the bus interface characteristics */
+ at91_sys_write(AT91_SMC_CSR(3), AT91_SMC_ACSS_STD | AT91_SMC_DBW_8 | AT91_SMC_WSEN
+ | AT91_SMC_NWS_(5)
+ | AT91_SMC_TDF_(1)
+ | AT91_SMC_RWSETUP_(0) /* tDS Data Set up Time 30 - ns */
+ | AT91_SMC_RWHOLD_(1) /* tDH Data Hold Time 20 - ns */
+ );
+
+ if (host->board->enable_pin)
+ at91_set_gpio_value(host->board->enable_pin, 0);
+}
+
+/*
+ * Disable NAND.
+ */
+static void at91_nand_disable(struct at91_nand_host *host)
+{
+ if (host->board->enable_pin)
+ at91_set_gpio_value(host->board->enable_pin, 1);
+}
+
+/*
+ * Probe for the NAND device.
+ */
+static int __init at91_nand_probe(struct platform_device *pdev)
+{
+ struct at91_nand_host *host;
+ struct mtd_info *mtd;
+ struct nand_chip *nand_chip;
+ int res;
+
+#ifdef CONFIG_MTD_PARTITIONS
+ struct mtd_partition *partitions = NULL;
+ int num_partitions = 0;
+#endif
+
+ /* Allocate memory for the device structure (and zero it) */
+ host = kzalloc(sizeof(struct at91_nand_host), GFP_KERNEL);
+ if (!host) {
+ printk(KERN_ERR "at91_nand: failed to allocate device structure.\n");
+ return -ENOMEM;
+ }
+
+ host->io_base = ioremap(pdev->resource[0].start,
+ pdev->resource[0].end - pdev->resource[0].start + 1);
+ if (host->io_base == NULL) {
+ printk(KERN_ERR "at91_nand: ioremap failed\n");
+ kfree(host);
+ return -EIO;
+ }
+
+ mtd = &host->mtd;
+ nand_chip = &host->nand_chip;
+ host->board = pdev->dev.platform_data;
+
+ nand_chip->priv = host; /* link the private data structures */
+ mtd->priv = nand_chip;
+
+ /* Set address of NAND IO lines */
+ nand_chip->IO_ADDR_R = host->io_base;
+ nand_chip->IO_ADDR_W = host->io_base;
+ nand_chip->hwcontrol = at91_nand_hwcontrol;
+ nand_chip->dev_ready = at91_nand_device_ready;
+ nand_chip->eccmode = NAND_ECC_SOFT; /* enable ECC */
+ nand_chip->chip_delay = 20; /* 20us command delay time */
+
+ platform_set_drvdata(pdev, host);
+ at91_nand_enable(host);
+
+ if (host->board->det_pin) {
+ if (at91_get_gpio_value(host->board->det_pin)) {
+ printk ("No SmartMedia card inserted.\n");
+ res = ENXIO;
+ goto out;
+ }
+ }
+
+ /* Scan to find existance of the device */
+ if (nand_scan(mtd, 1)) {
+ res = -ENXIO;
+ goto out;
+ }
+
+#ifdef CONFIG_MTD_PARTITIONS
+ if (host->board->partition_info)
+ partitions = host->board->partition_info(mtd->size, &num_partitions);
+
+ if ((!partitions) || (num_partitions == 0)) {
+ printk(KERN_ERR "at91_nand: No parititions defined, or unsupported device.\n");
+ res = ENXIO;
+ goto out;
+ }
+
+ res = add_mtd_partitions(mtd, partitions, num_partitions);
+#else
+ res = add_mtd_device(mtd);
+#endif
+
+out:
+ if (res) {
+ at91_nand_disable(host);
+ platform_set_drvdata(pdev, NULL);
+
+ iounmap(host->io_base);
+ kfree(host);
+ }
+
+ return res;
+}
+
+/*
+ * Remove a NAND device.
+ */
+static int __devexit at91_nand_remove(struct platform_device *pdev)
+{
+ struct at91_nand_host *host = platform_get_drvdata(pdev);
+ struct mtd_info *mtd = &host->mtd;
+
+ del_mtd_partitions(mtd);
+ del_mtd_device(mtd);
+
+ at91_nand_disable(host);
+
+ iounmap(host->io_base);
+ kfree(host);
+
+ return 0;
+}
+
+static struct platform_driver at91_nand_driver = {
+ .probe = at91_nand_probe,
+ .remove = at91_nand_remove,
+ .driver = {
+ .name = "at91_nand",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init at91_nand_init(void)
+{
+ return platform_driver_register(&at91_nand_driver);
+}
+
+
+static void __exit at91_nand_exit(void)
+{
+ platform_driver_unregister(&at91_nand_driver);
+}
+
+
+module_init(at91_nand_init);
+module_exit(at91_nand_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Rick Bronson");
+MODULE_DESCRIPTION("Glue layer for SmartMediaCard on ATMEL AT91RM9200");
diff --git a/linux-2.4.x/drivers/mtd/nand/au1550nd.c b/linux-2.4.x/drivers/mtd/nand/au1550nd.c
new file mode 100644
index 0000000..057f393
--- /dev/null
+++ b/linux-2.4.x/drivers/mtd/nand/au1550nd.c
@@ -0,0 +1,634 @@
+/*
+ * drivers/mtd/nand/au1550nd.c
+ *
+ * Copyright (C) 2004 Embedded Edge, LLC
+ *
+ * $Id: au1550nd.c,v 1.15 2006/03/29 08:31:12 dwmw2 Exp $
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/partitions.h>
+#include <linux/version.h>
+#include <asm/io.h>
+
+/* fixme: this is ugly */
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 0)
+#include <asm/mach-au1x00/au1xxx.h>
+#else
+#include <asm/au1000.h>
+#ifdef CONFIG_MIPS_PB1550
+#include <asm/pb1550.h>
+#endif
+#ifdef CONFIG_MIPS_DB1550
+#include <asm/db1x00.h>
+#endif
+#endif
+
+/*
+ * MTD structure for NAND controller
+ */
+static struct mtd_info *au1550_mtd = NULL;
+static void __iomem *p_nand;
+static int nand_width = 1; /* default x8*/
+
+/*
+ * Define partitions for flash device
+ */
+const static struct mtd_partition partition_info[] = {
+ {
+ .name = "NAND FS 0",
+ .offset = 0,
+ .size = 8*1024*1024
+ },
+ {
+ .name = "NAND FS 1",
+ .offset = MTDPART_OFS_APPEND,
+ .size = MTDPART_SIZ_FULL
+ }
+};
+
+/**
+ * au_read_byte - read one byte from the chip
+ * @mtd: MTD device structure
+ *
+ * read function for 8bit buswith
+ */
+static u_char au_read_byte(struct mtd_info *mtd)
+{
+ struct nand_chip *this = mtd->priv;
+ u_char ret = readb(this->IO_ADDR_R);
+ au_sync();
+ return ret;
+}
+
+/**
+ * au_write_byte - write one byte to the chip
+ * @mtd: MTD device structure
+ * @byte: pointer to data byte to write
+ *
+ * write function for 8it buswith
+ */
+static void au_write_byte(struct mtd_info *mtd, u_char byte)
+{
+ struct nand_chip *this = mtd->priv;
+ writeb(byte, this->IO_ADDR_W);
+ au_sync();
+}
+
+/**
+ * au_read_byte16 - read one byte endianess aware from the chip
+ * @mtd: MTD device structure
+ *
+ * read function for 16bit buswith with
+ * endianess conversion
+ */
+static u_char au_read_byte16(struct mtd_info *mtd)
+{
+ struct nand_chip *this = mtd->priv;
+ u_char ret = (u_char) cpu_to_le16(readw(this->IO_ADDR_R));
+ au_sync();
+ return ret;
+}
+
+/**
+ * au_write_byte16 - write one byte endianess aware to the chip
+ * @mtd: MTD device structure
+ * @byte: pointer to data byte to write
+ *
+ * write function for 16bit buswith with
+ * endianess conversion
+ */
+static void au_write_byte16(struct mtd_info *mtd, u_char byte)
+{
+ struct nand_chip *this = mtd->priv;
+ writew(le16_to_cpu((u16) byte), this->IO_ADDR_W);
+ au_sync();
+}
+
+/**
+ * au_read_word - read one word from the chip
+ * @mtd: MTD device structure
+ *
+ * read function for 16bit buswith without
+ * endianess conversion
+ */
+static u16 au_read_word(struct mtd_info *mtd)
+{
+ struct nand_chip *this = mtd->priv;
+ u16 ret = readw(this->IO_ADDR_R);
+ au_sync();
+ return ret;
+}
+
+/**
+ * au_write_word - write one word to the chip
+ * @mtd: MTD device structure
+ * @word: data word to write
+ *
+ * write function for 16bit buswith without
+ * endianess conversion
+ */
+static void au_write_word(struct mtd_info *mtd, u16 word)
+{
+ struct nand_chip *this = mtd->priv;
+ writew(word, this->IO_ADDR_W);
+ au_sync();
+}
+
+/**
+ * au_write_buf - write buffer to chip
+ * @mtd: MTD device structure
+ * @buf: data buffer
+ * @len: number of bytes to write
+ *
+ * write function for 8bit buswith
+ */
+static void au_write_buf(struct mtd_info *mtd, const u_char *buf, int len)
+{
+ int i;
+ struct nand_chip *this = mtd->priv;
+
+ for (i=0; i<len; i++) {
+ writeb(buf[i], this->IO_ADDR_W);
+ au_sync();
+ }
+}
+
+/**
+ * au_read_buf - read chip data into buffer
+ * @mtd: MTD device structure
+ * @buf: buffer to store date
+ * @len: number of bytes to read
+ *
+ * read function for 8bit buswith
+ */
+static void au_read_buf(struct mtd_info *mtd, u_char *buf, int len)
+{
+ int i;
+ struct nand_chip *this = mtd->priv;
+
+ for (i=0; i<len; i++) {
+ buf[i] = readb(this->IO_ADDR_R);
+ au_sync();
+ }
+}
+
+/**
+ * au_verify_buf - Verify chip data against buffer
+ * @mtd: MTD device structure
+ * @buf: buffer containing the data to compare
+ * @len: number of bytes to compare
+ *
+ * verify function for 8bit buswith
+ */
+static int au_verify_buf(struct mtd_info *mtd, const u_char *buf, int len)
+{
+ int i;
+ struct nand_chip *this = mtd->priv;
+
+ for (i=0; i<len; i++) {
+ if (buf[i] != readb(this->IO_ADDR_R))
+ return -EFAULT;
+ au_sync();
+ }
+
+ return 0;
+}
+
+/**
+ * au_write_buf16 - write buffer to chip
+ * @mtd: MTD device structure
+ * @buf: data buffer
+ * @len: number of bytes to write
+ *
+ * write function for 16bit buswith
+ */
+static void au_write_buf16(struct mtd_info *mtd, const u_char *buf, int len)
+{
+ int i;
+ struct nand_chip *this = mtd->priv;
+ u16 *p = (u16 *) buf;
+ len >>= 1;
+
+ for (i=0; i<len; i++) {
+ writew(p[i], this->IO_ADDR_W);
+ au_sync();
+ }
+
+}
+
+/**
+ * au_read_buf16 - read chip data into buffer
+ * @mtd: MTD device structure
+ * @buf: buffer to store date
+ * @len: number of bytes to read
+ *
+ * read function for 16bit buswith
+ */
+static void au_read_buf16(struct mtd_info *mtd, u_char *buf, int len)
+{
+ int i;
+ struct nand_chip *this = mtd->priv;
+ u16 *p = (u16 *) buf;
+ len >>= 1;
+
+ for (i=0; i<len; i++) {
+ p[i] = readw(this->IO_ADDR_R);
+ au_sync();
+ }
+}
+
+/**
+ * au_verify_buf16 - Verify chip data against buffer
+ * @mtd: MTD device structure
+ * @buf: buffer containing the data to compare
+ * @len: number of bytes to compare
+ *
+ * verify function for 16bit buswith
+ */
+static int au_verify_buf16(struct mtd_info *mtd, const u_char *buf, int len)
+{
+ int i;
+ struct nand_chip *this = mtd->priv;
+ u16 *p = (u16 *) buf;
+ len >>= 1;
+
+ for (i=0; i<len; i++) {
+ if (p[i] != readw(this->IO_ADDR_R))
+ return -EFAULT;
+ au_sync();
+ }
+ return 0;
+}
+
+
+static void au1550_hwcontrol(struct mtd_info *mtd, int cmd)
+{
+ register struct nand_chip *this = mtd->priv;
+
+ switch(cmd){
+
+ case NAND_CTL_SETCLE: this->IO_ADDR_W = p_nand + MEM_STNAND_CMD; break;
+ case NAND_CTL_CLRCLE: this->IO_ADDR_W = p_nand + MEM_STNAND_DATA; break;
+
+ case NAND_CTL_SETALE: this->IO_ADDR_W = p_nand + MEM_STNAND_ADDR; break;
+ case NAND_CTL_CLRALE:
+ this->IO_ADDR_W = p_nand + MEM_STNAND_DATA;
+ /* FIXME: Nobody knows why this is neccecary,
+ * but it works only that way */
+ udelay(1);
+ break;
+
+ case NAND_CTL_SETNCE:
+ /* assert (force assert) chip enable */
+ au_writel((1<<(4+NAND_CS)) , MEM_STNDCTL); break;
+ break;
+
+ case NAND_CTL_CLRNCE:
+ /* deassert chip enable */
+ au_writel(0, MEM_STNDCTL); break;
+ break;
+ }
+
+ this->IO_ADDR_R = this->IO_ADDR_W;
+
+ /* Drain the writebuffer */
+ au_sync();
+}
+
+int au1550_device_ready(struct mtd_info *mtd)
+{
+ int ret = (au_readl(MEM_STSTAT) & 0x1) ? 1 : 0;
+ au_sync();
+ return ret;
+}
+
+/**
+ * au1550_select_chip - control -CE line
+ * Forbid driving -CE manually permitting the NAND controller to do this.
+ * Keeping -CE asserted during the whole sector reads interferes with the
+ * NOR flash and PCMCIA drivers as it causes contention on the static bus.
+ * We only have to hold -CE low for the NAND read commands since the flash
+ * chip needs it to be asserted during chip not ready time but the NAND
+ * controller keeps it released.
+ *
+ * @mtd: MTD device structure
+ * @chip: chipnumber to select, -1 for deselect
+ */
+static void au1550_select_chip(struct mtd_info *mtd, int chip)
+{
+}
+
+/**
+ * au1550_command - Send command to NAND device
+ * @mtd: MTD device structure
+ * @command: the command to be sent
+ * @column: the column address for this command, -1 if none
+ * @page_addr: the page address for this command, -1 if none
+ */
+static void au1550_command(struct mtd_info *mtd, unsigned command, int column, int page_addr)
+{
+ register struct nand_chip *this = mtd->priv;
+ int ce_override = 0, i;
+ ulong flags;
+
+ /* Begin command latch cycle */
+ this->hwcontrol(mtd, NAND_CTL_SETCLE);
+ /*
+ * Write out the command to the device.
+ */
+ if (command == NAND_CMD_SEQIN) {
+ int readcmd;
+
+ if (column >= mtd->oobblock) {
+ /* OOB area */
+ column -= mtd->oobblock;
+ readcmd = NAND_CMD_READOOB;
+ } else if (column < 256) {
+ /* First 256 bytes --> READ0 */
+ readcmd = NAND_CMD_READ0;
+ } else {
+ column -= 256;
+ readcmd = NAND_CMD_READ1;
+ }
+ this->write_byte(mtd, readcmd);
+ }
+ this->write_byte(mtd, command);
+
+ /* Set ALE and clear CLE to start address cycle */
+ this->hwcontrol(mtd, NAND_CTL_CLRCLE);
+
+ if (column != -1 || page_addr != -1) {
+ this->hwcontrol(mtd, NAND_CTL_SETALE);
+
+ /* Serially input address */
+ if (column != -1) {
+ /* Adjust columns for 16 bit buswidth */
+ if (this->options & NAND_BUSWIDTH_16)
+ column >>= 1;
+ this->write_byte(mtd, column);
+ }
+ if (page_addr != -1) {
+ this->write_byte(mtd, (u8)(page_addr & 0xff));
+
+ if (command == NAND_CMD_READ0 ||
+ command == NAND_CMD_READ1 ||
+ command == NAND_CMD_READOOB) {
+ /*
+ * NAND controller will release -CE after
+ * the last address byte is written, so we'll
+ * have to forcibly assert it. No interrupts
+ * are allowed while we do this as we don't
+ * want the NOR flash or PCMCIA drivers to
+ * steal our precious bytes of data...
+ */
+ ce_override = 1;
+ local_irq_save(flags);
+ this->hwcontrol(mtd, NAND_CTL_SETNCE);
+ }
+
+ this->write_byte(mtd, (u8)(page_addr >> 8));
+
+ /* One more address cycle for devices > 32MiB */
+ if (this->chipsize > (32 << 20))
+ this->write_byte(mtd, (u8)((page_addr >> 16) & 0x0f));
+ }
+ /* Latch in address */
+ this->hwcontrol(mtd, NAND_CTL_CLRALE);
+ }
+
+ /*
+ * Program and erase have their own busy handlers.
+ * Status and sequential in need no delay.
+ */
+ switch (command) {
+
+ case NAND_CMD_PAGEPROG:
+ case NAND_CMD_ERASE1:
+ case NAND_CMD_ERASE2:
+ case NAND_CMD_SEQIN:
+ case NAND_CMD_STATUS:
+ return;
+
+ case NAND_CMD_RESET:
+ break;
+
+ case NAND_CMD_READ0:
+ case NAND_CMD_READ1:
+ case NAND_CMD_READOOB:
+ /* Check if we're really driving -CE low (just in case) */
+ if (unlikely(!ce_override))
+ break;
+
+ /* Apply a short delay always to ensure that we do wait tWB. */
+ ndelay(100);
+ /* Wait for a chip to become ready... */
+ for (i = this->chip_delay; !this->dev_ready(mtd) && i > 0; --i)
+ udelay(1);
+
+ /* Release -CE and re-enable interrupts. */
+ this->hwcontrol(mtd, NAND_CTL_CLRNCE);
+ local_irq_restore(flags);
+ return;
+ }
+ /* Apply this short delay always to ensure that we do wait tWB. */
+ ndelay(100);
+
+ while(!this->dev_ready(mtd));
+}
+
+
+/*
+ * Main initialization routine
+ */
+int __init au1xxx_nand_init (void)
+{
+ struct nand_chip *this;
+ u16 boot_swapboot = 0; /* default value */
+ int retval;
+ u32 mem_staddr;
+ u32 nand_phys;
+
+ /* Allocate memory for MTD device structure and private data */
+ au1550_mtd = kmalloc (sizeof(struct mtd_info) +
+ sizeof (struct nand_chip), GFP_KERNEL);
+ if (!au1550_mtd) {
+ printk ("Unable to allocate NAND MTD dev structure.\n");
+ return -ENOMEM;
+ }
+
+ /* Get pointer to private data */
+ this = (struct nand_chip *) (&au1550_mtd[1]);
+
+ /* Initialize structures */
+ memset((char *) au1550_mtd, 0, sizeof(struct mtd_info));
+ memset((char *) this, 0, sizeof(struct nand_chip));
+
+ /* Link the private data with the MTD structure */
+ au1550_mtd->priv = this;
+
+ /* MEM_STNDCTL: disable ints, disable nand boot */
+ au_writel(0, MEM_STNDCTL);
+
+#ifdef CONFIG_MIPS_PB1550
+ /* set gpio206 high */
+ au_writel(au_readl(GPIO2_DIR) & ~(1<<6), GPIO2_DIR);
+
+ boot_swapboot = (au_readl(MEM_STSTAT) & (0x7<<1)) |
+ ((bcsr->status >> 6) & 0x1);
+ switch (boot_swapboot) {
+ case 0:
+ case 2:
+ case 8:
+ case 0xC:
+ case 0xD:
+ /* x16 NAND Flash */
+ nand_width = 0;
+ break;
+ case 1:
+ case 9:
+ case 3:
+ case 0xE:
+ case 0xF:
+ /* x8 NAND Flash */
+ nand_width = 1;
+ break;
+ default:
+ printk("Pb1550 NAND: bad boot:swap\n");
+ retval = -EINVAL;
+ goto outmem;
+ }
+#endif
+
+ /* Configure chip-select; normally done by boot code, e.g. YAMON */
+#ifdef NAND_STCFG
+ if (NAND_CS == 0) {
+ au_writel(NAND_STCFG, MEM_STCFG0);
+ au_writel(NAND_STTIME, MEM_STTIME0);
+ au_writel(NAND_STADDR, MEM_STADDR0);
+ }
+ if (NAND_CS == 1) {
+ au_writel(NAND_STCFG, MEM_STCFG1);
+ au_writel(NAND_STTIME, MEM_STTIME1);
+ au_writel(NAND_STADDR, MEM_STADDR1);
+ }
+ if (NAND_CS == 2) {
+ au_writel(NAND_STCFG, MEM_STCFG2);
+ au_writel(NAND_STTIME, MEM_STTIME2);
+ au_writel(NAND_STADDR, MEM_STADDR2);
+ }
+ if (NAND_CS == 3) {
+ au_writel(NAND_STCFG, MEM_STCFG3);
+ au_writel(NAND_STTIME, MEM_STTIME3);
+ au_writel(NAND_STADDR, MEM_STADDR3);
+ }
+#endif
+
+ /* Locate NAND chip-select in order to determine NAND phys address */
+ mem_staddr = 0x00000000;
+ if (((au_readl(MEM_STCFG0) & 0x7) == 0x5) && (NAND_CS == 0))
+ mem_staddr = au_readl(MEM_STADDR0);
+ else if (((au_readl(MEM_STCFG1) & 0x7) == 0x5) && (NAND_CS == 1))
+ mem_staddr = au_readl(MEM_STADDR1);
+ else if (((au_readl(MEM_STCFG2) & 0x7) == 0x5) && (NAND_CS == 2))
+ mem_staddr = au_readl(MEM_STADDR2);
+ else if (((au_readl(MEM_STCFG3) & 0x7) == 0x5) && (NAND_CS == 3))
+ mem_staddr = au_readl(MEM_STADDR3);
+
+ if (mem_staddr == 0x00000000) {
+ printk("Au1xxx NAND: ERROR WITH NAND CHIP-SELECT\n");
+ kfree(au1550_mtd);
+ return 1;
+ }
+ nand_phys = (mem_staddr << 4) & 0xFFFC0000;
+
+ p_nand = (void __iomem *)ioremap(nand_phys, 0x1000);
+
+ /* make controller and MTD agree */
+ if (NAND_CS == 0)
+ nand_width = au_readl(MEM_STCFG0) & (1<<22);
+ if (NAND_CS == 1)
+ nand_width = au_readl(MEM_STCFG1) & (1<<22);
+ if (NAND_CS == 2)
+ nand_width = au_readl(MEM_STCFG2) & (1<<22);
+ if (NAND_CS == 3)
+ nand_width = au_readl(MEM_STCFG3) & (1<<22);
+
+
+ /* Set address of hardware control function */
+ this->hwcontrol = au1550_hwcontrol;
+ this->dev_ready = au1550_device_ready;
+ this->select_chip = au1550_select_chip;
+ this->cmdfunc = au1550_command;
+
+ /* 30 us command delay time */
+ this->chip_delay = 30;
+ this->eccmode = NAND_ECC_SOFT;
+
+ this->options = NAND_NO_AUTOINCR;
+
+ if (!nand_width)
+ this->options |= NAND_BUSWIDTH_16;
+
+ this->read_byte = (!nand_width) ? au_read_byte16 : au_read_byte;
+ this->write_byte = (!nand_width) ? au_write_byte16 : au_write_byte;
+ this->write_word = au_write_word;
+ this->read_word = au_read_word;
+ this->write_buf = (!nand_width) ? au_write_buf16 : au_write_buf;
+ this->read_buf = (!nand_width) ? au_read_buf16 : au_read_buf;
+ this->verify_buf = (!nand_width) ? au_verify_buf16 : au_verify_buf;
+
+ /* Scan to find existence of the device */
+ if (nand_scan (au1550_mtd, 1)) {
+ retval = -ENXIO;
+ goto outio;
+ }
+
+ /* Register the partitions */
+ add_mtd_partitions(au1550_mtd, partition_info, ARRAY_SIZE(partition_info));
+
+ return 0;
+
+ outio:
+ iounmap ((void *)p_nand);
+
+ outmem:
+ kfree (au1550_mtd);
+ return retval;
+}
+
+module_init(au1xxx_nand_init);
+
+/*
+ * Clean up routine
+ */
+#ifdef MODULE
+static void __exit au1550_cleanup (void)
+{
+ struct nand_chip *this = (struct nand_chip *) &au1550_mtd[1];
+
+ /* Release resources, unregister device */
+ nand_release (au1550_mtd);
+
+ /* Free the MTD device structure */
+ kfree (au1550_mtd);
+
+ /* Unmap */
+ iounmap ((void *)p_nand);
+}
+module_exit(au1550_cleanup);
+#endif
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Embedded Edge, LLC");
+MODULE_DESCRIPTION("Board-specific glue layer for NAND flash on Pb1550 board");
diff --git a/linux-2.4.x/drivers/mtd/nand/autcpu12.c b/linux-2.4.x/drivers/mtd/nand/autcpu12.c
new file mode 100644
index 0000000..f42c122
--- /dev/null
+++ b/linux-2.4.x/drivers/mtd/nand/autcpu12.c
@@ -0,0 +1,224 @@
+/*
+ * drivers/mtd/autcpu12.c
+ *
+ * Copyright (c) 2002 Thomas Gleixner <tgxl@linutronix.de>
+ *
+ * Derived from drivers/mtd/spia.c
+ * Copyright (C) 2000 Steven J. Hill (sjhill@realitydiluted.com)
+ *
+ * $Id: autcpu12.c,v 1.24 2005/11/29 20:01:29 gleixner Exp $
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Overview:
+ * This is a device driver for the NAND flash device found on the
+ * autronix autcpu12 board, which is a SmartMediaCard. It supports
+ * 16MiB, 32MiB and 64MiB cards.
+ *
+ *
+ * 02-12-2002 TG Cleanup of module params
+ *
+ * 02-20-2002 TG adjusted for different rd/wr adress support
+ * added support for read device ready/busy line
+ * added page_cache
+ *
+ * 10-06-2002 TG 128K card support added
+ */
+
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/partitions.h>
+#include <asm/io.h>
+#include <asm/arch/hardware.h>
+#include <asm/sizes.h>
+#include <asm/arch/autcpu12.h>
+
+/*
+ * MTD structure for AUTCPU12 board
+ */
+static struct mtd_info *autcpu12_mtd = NULL;
+
+static int autcpu12_io_base = CS89712_VIRT_BASE;
+static int autcpu12_fio_pbase = AUTCPU12_PHYS_SMC;
+static int autcpu12_fio_ctrl = AUTCPU12_SMC_SELECT_OFFSET;
+static int autcpu12_pedr = AUTCPU12_SMC_PORT_OFFSET;
+static void __iomem * autcpu12_fio_base;
+
+/*
+ * Define partitions for flash devices
+ */
+static struct mtd_partition partition_info16k[] = {
+ { .name = "AUTCPU12 flash partition 1",
+ .offset = 0,
+ .size = 8 * SZ_1M },
+ { .name = "AUTCPU12 flash partition 2",
+ .offset = 8 * SZ_1M,
+ .size = 8 * SZ_1M },
+};
+
+static struct mtd_partition partition_info32k[] = {
+ { .name = "AUTCPU12 flash partition 1",
+ .offset = 0,
+ .size = 8 * SZ_1M },
+ { .name = "AUTCPU12 flash partition 2",
+ .offset = 8 * SZ_1M,
+ .size = 24 * SZ_1M },
+};
+
+static struct mtd_partition partition_info64k[] = {
+ { .name = "AUTCPU12 flash partition 1",
+ .offset = 0,
+ .size = 16 * SZ_1M },
+ { .name = "AUTCPU12 flash partition 2",
+ .offset = 16 * SZ_1M,
+ .size = 48 * SZ_1M },
+};
+
+static struct mtd_partition partition_info128k[] = {
+ { .name = "AUTCPU12 flash partition 1",
+ .offset = 0,
+ .size = 16 * SZ_1M },
+ { .name = "AUTCPU12 flash partition 2",
+ .offset = 16 * SZ_1M,
+ .size = 112 * SZ_1M },
+};
+
+#define NUM_PARTITIONS16K 2
+#define NUM_PARTITIONS32K 2
+#define NUM_PARTITIONS64K 2
+#define NUM_PARTITIONS128K 2
+/*
+ * hardware specific access to control-lines
+*/
+static void autcpu12_hwcontrol(struct mtd_info *mtd, int cmd)
+{
+
+ switch(cmd){
+
+ case NAND_CTL_SETCLE: (*(volatile unsigned char *) (autcpu12_io_base + autcpu12_pedr)) |= AUTCPU12_SMC_CLE; break;
+ case NAND_CTL_CLRCLE: (*(volatile unsigned char *) (autcpu12_io_base + autcpu12_pedr)) &= ~AUTCPU12_SMC_CLE; break;
+
+ case NAND_CTL_SETALE: (*(volatile unsigned char *) (autcpu12_io_base + autcpu12_pedr)) |= AUTCPU12_SMC_ALE; break;
+ case NAND_CTL_CLRALE: (*(volatile unsigned char *) (autcpu12_io_base + autcpu12_pedr)) &= ~AUTCPU12_SMC_ALE; break;
+
+ case NAND_CTL_SETNCE: (*(volatile unsigned char *) (autcpu12_fio_base + autcpu12_fio_ctrl)) = 0x01; break;
+ case NAND_CTL_CLRNCE: (*(volatile unsigned char *) (autcpu12_fio_base + autcpu12_fio_ctrl)) = 0x00; break;
+ }
+}
+
+/*
+* read device ready pin
+*/
+int autcpu12_device_ready(struct mtd_info *mtd)
+{
+
+ return ( (*(volatile unsigned char *) (autcpu12_io_base + autcpu12_pedr)) & AUTCPU12_SMC_RDY) ? 1 : 0;
+
+}
+
+/*
+ * Main initialization routine
+ */
+int __init autcpu12_init (void)
+{
+ struct nand_chip *this;
+ int err = 0;
+
+ /* Allocate memory for MTD device structure and private data */
+ autcpu12_mtd = kmalloc (sizeof(struct mtd_info) + sizeof (struct nand_chip),
+ GFP_KERNEL);
+ if (!autcpu12_mtd) {
+ printk ("Unable to allocate AUTCPU12 NAND MTD device structure.\n");
+ err = -ENOMEM;
+ goto out;
+ }
+
+ /* map physical adress */
+ autcpu12_fio_base = ioremap(autcpu12_fio_pbase,SZ_1K);
+ if(!autcpu12_fio_base){
+ printk("Ioremap autcpu12 SmartMedia Card failed\n");
+ err = -EIO;
+ goto out_mtd;
+ }
+
+ /* Get pointer to private data */
+ this = (struct nand_chip *) (&autcpu12_mtd[1]);
+
+ /* Initialize structures */
+ memset((char *) autcpu12_mtd, 0, sizeof(struct mtd_info));
+ memset((char *) this, 0, sizeof(struct nand_chip));
+
+ /* Link the private data with the MTD structure */
+ autcpu12_mtd->priv = this;
+
+ /* Set address of NAND IO lines */
+ this->IO_ADDR_R = autcpu12_fio_base;
+ this->IO_ADDR_W = autcpu12_fio_base;
+ this->hwcontrol = autcpu12_hwcontrol;
+ this->dev_ready = autcpu12_device_ready;
+ /* 20 us command delay time */
+ this->chip_delay = 20;
+ this->eccmode = NAND_ECC_SOFT;
+
+ /* Enable the following for a flash based bad block table */
+ /*
+ this->options = NAND_USE_FLASH_BBT;
+ */
+ this->options = NAND_USE_FLASH_BBT;
+
+ /* Scan to find existance of the device */
+ if (nand_scan (autcpu12_mtd, 1)) {
+ err = -ENXIO;
+ goto out_ior;
+ }
+
+ /* Register the partitions */
+ switch(autcpu12_mtd->size){
+ case SZ_16M: add_mtd_partitions(autcpu12_mtd, partition_info16k, NUM_PARTITIONS16K); break;
+ case SZ_32M: add_mtd_partitions(autcpu12_mtd, partition_info32k, NUM_PARTITIONS32K); break;
+ case SZ_64M: add_mtd_partitions(autcpu12_mtd, partition_info64k, NUM_PARTITIONS64K); break;
+ case SZ_128M: add_mtd_partitions(autcpu12_mtd, partition_info128k, NUM_PARTITIONS128K); break;
+ default: {
+ printk ("Unsupported SmartMedia device\n");
+ err = -ENXIO;
+ goto out_ior;
+ }
+ }
+ goto out;
+
+out_ior:
+ iounmap((void *)autcpu12_fio_base);
+out_mtd:
+ kfree (autcpu12_mtd);
+out:
+ return err;
+}
+
+module_init(autcpu12_init);
+
+/*
+ * Clean up routine
+ */
+#ifdef MODULE
+static void __exit autcpu12_cleanup (void)
+{
+ /* Release resources, unregister device */
+ nand_release (autcpu12_mtd);
+
+ /* unmap physical adress */
+ iounmap((void *)autcpu12_fio_base);
+
+ /* Free the MTD device structure */
+ kfree (autcpu12_mtd);
+}
+module_exit(autcpu12_cleanup);
+#endif
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Thomas Gleixner <tglx@linutronix.de>");
+MODULE_DESCRIPTION("Glue layer for SmartMediaCard on autronix autcpu12");
diff --git a/linux-2.4.x/drivers/mtd/nand/diskonchip.c b/linux-2.4.x/drivers/mtd/nand/diskonchip.c
new file mode 100644
index 0000000..21d4e8f
--- /dev/null
+++ b/linux-2.4.x/drivers/mtd/nand/diskonchip.c
@@ -0,0 +1,1794 @@
+/*
+ * drivers/mtd/nand/diskonchip.c
+ *
+ * (C) 2003 Red Hat, Inc.
+ * (C) 2004 Dan Brown <dan_brown@ieee.org>
+ * (C) 2004 Kalev Lember <kalev@smartlink.ee>
+ *
+ * Author: David Woodhouse <dwmw2@infradead.org>
+ * Additional Diskonchip 2000 and Millennium support by Dan Brown <dan_brown@ieee.org>
+ * Diskonchip Millennium Plus support by Kalev Lember <kalev@smartlink.ee>
+ *
+ * Error correction code lifted from the old docecc code
+ * Author: Fabrice Bellard (fabrice.bellard@netgem.com)
+ * Copyright (C) 2000 Netgem S.A.
+ * converted to the generic Reed-Solomon library by Thomas Gleixner <tglx@linutronix.de>
+ *
+ * Interface to generic NAND code for M-Systems DiskOnChip devices
+ *
+ * $Id: diskonchip.c,v 1.55 2005/11/07 11:14:30 gleixner Exp $
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/delay.h>
+#include <linux/rslib.h>
+#include <linux/moduleparam.h>
+#include <asm/io.h>
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/doc2000.h>
+#include <linux/mtd/compatmac.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/inftl.h>
+
+/* Where to look for the devices? */
+#ifndef CONFIG_MTD_NAND_DISKONCHIP_PROBE_ADDRESS
+#define CONFIG_MTD_NAND_DISKONCHIP_PROBE_ADDRESS 0
+#endif
+
+static unsigned long __initdata doc_locations[] = {
+#if defined (__alpha__) || defined(__i386__) || defined(__x86_64__)
+#ifdef CONFIG_MTD_NAND_DISKONCHIP_PROBE_HIGH
+ 0xfffc8000, 0xfffca000, 0xfffcc000, 0xfffce000,
+ 0xfffd0000, 0xfffd2000, 0xfffd4000, 0xfffd6000,
+ 0xfffd8000, 0xfffda000, 0xfffdc000, 0xfffde000,
+ 0xfffe0000, 0xfffe2000, 0xfffe4000, 0xfffe6000,
+ 0xfffe8000, 0xfffea000, 0xfffec000, 0xfffee000,
+#else /* CONFIG_MTD_DOCPROBE_HIGH */
+ 0xc8000, 0xca000, 0xcc000, 0xce000,
+ 0xd0000, 0xd2000, 0xd4000, 0xd6000,
+ 0xd8000, 0xda000, 0xdc000, 0xde000,
+ 0xe0000, 0xe2000, 0xe4000, 0xe6000,
+ 0xe8000, 0xea000, 0xec000, 0xee000,
+#endif /* CONFIG_MTD_DOCPROBE_HIGH */
+#elif defined(__PPC__)
+ 0xe4000000,
+#elif defined(CONFIG_MOMENCO_OCELOT)
+ 0x2f000000,
+ 0xff000000,
+#elif defined(CONFIG_MOMENCO_OCELOT_G) || defined (CONFIG_MOMENCO_OCELOT_C)
+ 0xff000000,
+##else
+#warning Unknown architecture for DiskOnChip. No default probe locations defined
+#endif
+ 0xffffffff };
+
+static struct mtd_info *doclist = NULL;
+
+struct doc_priv {
+ void __iomem *virtadr;
+ unsigned long physadr;
+ u_char ChipID;
+ u_char CDSNControl;
+ int chips_per_floor; /* The number of chips detected on each floor */
+ int curfloor;
+ int curchip;
+ int mh0_page;
+ int mh1_page;
+ struct mtd_info *nextdoc;
+};
+
+/* This is the syndrome computed by the HW ecc generator upon reading an empty
+ page, one with all 0xff for data and stored ecc code. */
+static u_char empty_read_syndrome[6] = { 0x26, 0xff, 0x6d, 0x47, 0x73, 0x7a };
+/* This is the ecc value computed by the HW ecc generator upon writing an empty
+ page, one with all 0xff for data. */
+static u_char empty_write_ecc[6] = { 0x4b, 0x00, 0xe2, 0x0e, 0x93, 0xf7 };
+
+#define INFTL_BBT_RESERVED_BLOCKS 4
+
+#define DoC_is_MillenniumPlus(doc) ((doc)->ChipID == DOC_ChipID_DocMilPlus16 || (doc)->ChipID == DOC_ChipID_DocMilPlus32)
+#define DoC_is_Millennium(doc) ((doc)->ChipID == DOC_ChipID_DocMil)
+#define DoC_is_2000(doc) ((doc)->ChipID == DOC_ChipID_Doc2k)
+
+static void doc200x_hwcontrol(struct mtd_info *mtd, int cmd);
+static void doc200x_select_chip(struct mtd_info *mtd, int chip);
+
+static int debug=0;
+module_param(debug, int, 0);
+
+static int try_dword=1;
+module_param(try_dword, int, 0);
+
+static int no_ecc_failures=0;
+module_param(no_ecc_failures, int, 0);
+
+static int no_autopart=0;
+module_param(no_autopart, int, 0);
+
+static int show_firmware_partition=0;
+module_param(show_firmware_partition, int, 0);
+
+#ifdef MTD_NAND_DISKONCHIP_BBTWRITE
+static int inftl_bbt_write=1;
+#else
+static int inftl_bbt_write=0;
+#endif
+module_param(inftl_bbt_write, int, 0);
+
+static unsigned long doc_config_location = CONFIG_MTD_NAND_DISKONCHIP_PROBE_ADDRESS;
+module_param(doc_config_location, ulong, 0);
+MODULE_PARM_DESC(doc_config_location, "Physical memory address at which to probe for DiskOnChip");
+
+
+/* Sector size for HW ECC */
+#define SECTOR_SIZE 512
+/* The sector bytes are packed into NB_DATA 10 bit words */
+#define NB_DATA (((SECTOR_SIZE + 1) * 8 + 6) / 10)
+/* Number of roots */
+#define NROOTS 4
+/* First consective root */
+#define FCR 510
+/* Number of symbols */
+#define NN 1023
+
+/* the Reed Solomon control structure */
+static struct rs_control *rs_decoder;
+
+/*
+ * The HW decoder in the DoC ASIC's provides us a error syndrome,
+ * which we must convert to a standard syndrom usable by the generic
+ * Reed-Solomon library code.
+ *
+ * Fabrice Bellard figured this out in the old docecc code. I added
+ * some comments, improved a minor bit and converted it to make use
+ * of the generic Reed-Solomon libary. tglx
+ */
+static int doc_ecc_decode (struct rs_control *rs, uint8_t *data, uint8_t *ecc)
+{
+ int i, j, nerr, errpos[8];
+ uint8_t parity;
+ uint16_t ds[4], s[5], tmp, errval[8], syn[4];
+
+ /* Convert the ecc bytes into words */
+ ds[0] = ((ecc[4] & 0xff) >> 0) | ((ecc[5] & 0x03) << 8);
+ ds[1] = ((ecc[5] & 0xfc) >> 2) | ((ecc[2] & 0x0f) << 6);
+ ds[2] = ((ecc[2] & 0xf0) >> 4) | ((ecc[3] & 0x3f) << 4);
+ ds[3] = ((ecc[3] & 0xc0) >> 6) | ((ecc[0] & 0xff) << 2);
+ parity = ecc[1];
+
+ /* Initialize the syndrom buffer */
+ for (i = 0; i < NROOTS; i++)
+ s[i] = ds[0];
+ /*
+ * Evaluate
+ * s[i] = ds[3]x^3 + ds[2]x^2 + ds[1]x^1 + ds[0]
+ * where x = alpha^(FCR + i)
+ */
+ for(j = 1; j < NROOTS; j++) {
+ if(ds[j] == 0)
+ continue;
+ tmp = rs->index_of[ds[j]];
+ for(i = 0; i < NROOTS; i++)
+ s[i] ^= rs->alpha_to[rs_modnn(rs, tmp + (FCR + i) * j)];
+ }
+
+ /* Calc s[i] = s[i] / alpha^(v + i) */
+ for (i = 0; i < NROOTS; i++) {
+ if (syn[i])
+ syn[i] = rs_modnn(rs, rs->index_of[s[i]] + (NN - FCR - i));
+ }
+ /* Call the decoder library */
+ nerr = decode_rs16(rs, NULL, NULL, 1019, syn, 0, errpos, 0, errval);
+
+ /* Incorrectable errors ? */
+ if (nerr < 0)
+ return nerr;
+
+ /*
+ * Correct the errors. The bitpositions are a bit of magic,
+ * but they are given by the design of the de/encoder circuit
+ * in the DoC ASIC's.
+ */
+ for(i = 0;i < nerr; i++) {
+ int index, bitpos, pos = 1015 - errpos[i];
+ uint8_t val;
+ if (pos >= NB_DATA && pos < 1019)
+ continue;
+ if (pos < NB_DATA) {
+ /* extract bit position (MSB first) */
+ pos = 10 * (NB_DATA - 1 - pos) - 6;
+ /* now correct the following 10 bits. At most two bytes
+ can be modified since pos is even */
+ index = (pos >> 3) ^ 1;
+ bitpos = pos & 7;
+ if ((index >= 0 && index < SECTOR_SIZE) ||
+ index == (SECTOR_SIZE + 1)) {
+ val = (uint8_t) (errval[i] >> (2 + bitpos));
+ parity ^= val;
+ if (index < SECTOR_SIZE)
+ data[index] ^= val;
+ }
+ index = ((pos >> 3) + 1) ^ 1;
+ bitpos = (bitpos + 10) & 7;
+ if (bitpos == 0)
+ bitpos = 8;
+ if ((index >= 0 && index < SECTOR_SIZE) ||
+ index == (SECTOR_SIZE + 1)) {
+ val = (uint8_t)(errval[i] << (8 - bitpos));
+ parity ^= val;
+ if (index < SECTOR_SIZE)
+ data[index] ^= val;
+ }
+ }
+ }
+ /* If the parity is wrong, no rescue possible */
+ return parity ? -1 : nerr;
+}
+
+static void DoC_Delay(struct doc_priv *doc, unsigned short cycles)
+{
+ volatile char dummy;
+ int i;
+
+ for (i = 0; i < cycles; i++) {
+ if (DoC_is_Millennium(doc))
+ dummy = ReadDOC(doc->virtadr, NOP);
+ else if (DoC_is_MillenniumPlus(doc))
+ dummy = ReadDOC(doc->virtadr, Mplus_NOP);
+ else
+ dummy = ReadDOC(doc->virtadr, DOCStatus);
+ }
+
+}
+
+#define CDSN_CTRL_FR_B_MASK (CDSN_CTRL_FR_B0 | CDSN_CTRL_FR_B1)
+
+/* DOC_WaitReady: Wait for RDY line to be asserted by the flash chip */
+static int _DoC_WaitReady(struct doc_priv *doc)
+{
+ void __iomem *docptr = doc->virtadr;
+ unsigned long timeo = jiffies + (HZ * 10);
+
+ if(debug) printk("_DoC_WaitReady...\n");
+ /* Out-of-line routine to wait for chip response */
+ if (DoC_is_MillenniumPlus(doc)) {
+ while ((ReadDOC(docptr, Mplus_FlashControl) & CDSN_CTRL_FR_B_MASK) != CDSN_CTRL_FR_B_MASK) {
+ if (time_after(jiffies, timeo)) {
+ printk("_DoC_WaitReady timed out.\n");
+ return -EIO;
+ }
+ udelay(1);
+ cond_resched();
+ }
+ } else {
+ while (!(ReadDOC(docptr, CDSNControl) & CDSN_CTRL_FR_B)) {
+ if (time_after(jiffies, timeo)) {
+ printk("_DoC_WaitReady timed out.\n");
+ return -EIO;
+ }
+ udelay(1);
+ cond_resched();
+ }
+ }
+
+ return 0;
+}
+
+static inline int DoC_WaitReady(struct doc_priv *doc)
+{
+ void __iomem *docptr = doc->virtadr;
+ int ret = 0;
+
+ if (DoC_is_MillenniumPlus(doc)) {
+ DoC_Delay(doc, 4);
+
+ if ((ReadDOC(docptr, Mplus_FlashControl) & CDSN_CTRL_FR_B_MASK) != CDSN_CTRL_FR_B_MASK)
+ /* Call the out-of-line routine to wait */
+ ret = _DoC_WaitReady(doc);
+ } else {
+ DoC_Delay(doc, 4);
+
+ if (!(ReadDOC(docptr, CDSNControl) & CDSN_CTRL_FR_B))
+ /* Call the out-of-line routine to wait */
+ ret = _DoC_WaitReady(doc);
+ DoC_Delay(doc, 2);
+ }
+
+ if(debug) printk("DoC_WaitReady OK\n");
+ return ret;
+}
+
+static void doc2000_write_byte(struct mtd_info *mtd, u_char datum)
+{
+ struct nand_chip *this = mtd->priv;
+ struct doc_priv *doc = this->priv;
+ void __iomem *docptr = doc->virtadr;
+
+ if(debug)printk("write_byte %02x\n", datum);
+ WriteDOC(datum, docptr, CDSNSlowIO);
+ WriteDOC(datum, docptr, 2k_CDSN_IO);
+}
+
+static u_char doc2000_read_byte(struct mtd_info *mtd)
+{
+ struct nand_chip *this = mtd->priv;
+ struct doc_priv *doc = this->priv;
+ void __iomem *docptr = doc->virtadr;
+ u_char ret;
+
+ ReadDOC(docptr, CDSNSlowIO);
+ DoC_Delay(doc, 2);
+ ret = ReadDOC(docptr, 2k_CDSN_IO);
+ if (debug) printk("read_byte returns %02x\n", ret);
+ return ret;
+}
+
+static void doc2000_writebuf(struct mtd_info *mtd,
+ const u_char *buf, int len)
+{
+ struct nand_chip *this = mtd->priv;
+ struct doc_priv *doc = this->priv;
+ void __iomem *docptr = doc->virtadr;
+ int i;
+ if (debug)printk("writebuf of %d bytes: ", len);
+ for (i=0; i < len; i++) {
+ WriteDOC_(buf[i], docptr, DoC_2k_CDSN_IO + i);
+ if (debug && i < 16)
+ printk("%02x ", buf[i]);
+ }
+ if (debug) printk("\n");
+}
+
+static void doc2000_readbuf(struct mtd_info *mtd,
+ u_char *buf, int len)
+{
+ struct nand_chip *this = mtd->priv;
+ struct doc_priv *doc = this->priv;
+ void __iomem *docptr = doc->virtadr;
+ int i;
+
+ if (debug)printk("readbuf of %d bytes: ", len);
+
+ for (i=0; i < len; i++) {
+ buf[i] = ReadDOC(docptr, 2k_CDSN_IO + i);
+ }
+}
+
+static void doc2000_readbuf_dword(struct mtd_info *mtd,
+ u_char *buf, int len)
+{
+ struct nand_chip *this = mtd->priv;
+ struct doc_priv *doc = this->priv;
+ void __iomem *docptr = doc->virtadr;
+ int i;
+
+ if (debug) printk("readbuf_dword of %d bytes: ", len);
+
+ if (unlikely((((unsigned long)buf)|len) & 3)) {
+ for (i=0; i < len; i++) {
+ *(uint8_t *)(&buf[i]) = ReadDOC(docptr, 2k_CDSN_IO + i);
+ }
+ } else {
+ for (i=0; i < len; i+=4) {
+ *(uint32_t*)(&buf[i]) = readl(docptr + DoC_2k_CDSN_IO + i);
+ }
+ }
+}
+
+static int doc2000_verifybuf(struct mtd_info *mtd,
+ const u_char *buf, int len)
+{
+ struct nand_chip *this = mtd->priv;
+ struct doc_priv *doc = this->priv;
+ void __iomem *docptr = doc->virtadr;
+ int i;
+
+ for (i=0; i < len; i++)
+ if (buf[i] != ReadDOC(docptr, 2k_CDSN_IO))
+ return -EFAULT;
+ return 0;
+}
+
+static uint16_t __init doc200x_ident_chip(struct mtd_info *mtd, int nr)
+{
+ struct nand_chip *this = mtd->priv;
+ struct doc_priv *doc = this->priv;
+ uint16_t ret;
+
+ doc200x_select_chip(mtd, nr);
+ doc200x_hwcontrol(mtd, NAND_CTL_SETCLE);
+ this->write_byte(mtd, NAND_CMD_READID);
+ doc200x_hwcontrol(mtd, NAND_CTL_CLRCLE);
+ doc200x_hwcontrol(mtd, NAND_CTL_SETALE);
+ this->write_byte(mtd, 0);
+ doc200x_hwcontrol(mtd, NAND_CTL_CLRALE);
+
+ /* We cant' use dev_ready here, but at least we wait for the
+ * command to complete
+ */
+ udelay(50);
+
+ ret = this->read_byte(mtd) << 8;
+ ret |= this->read_byte(mtd);
+
+ if (doc->ChipID == DOC_ChipID_Doc2k && try_dword && !nr) {
+ /* First chip probe. See if we get same results by 32-bit access */
+ union {
+ uint32_t dword;
+ uint8_t byte[4];
+ } ident;
+ void __iomem *docptr = doc->virtadr;
+
+ doc200x_hwcontrol(mtd, NAND_CTL_SETCLE);
+ doc2000_write_byte(mtd, NAND_CMD_READID);
+ doc200x_hwcontrol(mtd, NAND_CTL_CLRCLE);
+ doc200x_hwcontrol(mtd, NAND_CTL_SETALE);
+ doc2000_write_byte(mtd, 0);
+ doc200x_hwcontrol(mtd, NAND_CTL_CLRALE);
+
+ udelay(50);
+
+ ident.dword = readl(docptr + DoC_2k_CDSN_IO);
+ if (((ident.byte[0] << 8) | ident.byte[1]) == ret) {
+ printk(KERN_INFO "DiskOnChip 2000 responds to DWORD access\n");
+ this->read_buf = &doc2000_readbuf_dword;
+ }
+ }
+
+ return ret;
+}
+
+static void __init doc2000_count_chips(struct mtd_info *mtd)
+{
+ struct nand_chip *this = mtd->priv;
+ struct doc_priv *doc = this->priv;
+ uint16_t mfrid;
+ int i;
+
+ /* Max 4 chips per floor on DiskOnChip 2000 */
+ doc->chips_per_floor = 4;
+
+ /* Find out what the first chip is */
+ mfrid = doc200x_ident_chip(mtd, 0);
+
+ /* Find how many chips in each floor. */
+ for (i = 1; i < 4; i++) {
+ if (doc200x_ident_chip(mtd, i) != mfrid)
+ break;
+ }
+ doc->chips_per_floor = i;
+ printk(KERN_DEBUG "Detected %d chips per floor.\n", i);
+}
+
+static int doc200x_wait(struct mtd_info *mtd, struct nand_chip *this, int state)
+{
+ struct doc_priv *doc = this->priv;
+
+ int status;
+
+ DoC_WaitReady(doc);
+ this->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
+ DoC_WaitReady(doc);
+ status = (int)this->read_byte(mtd);
+
+ return status;
+}
+
+static void doc2001_write_byte(struct mtd_info *mtd, u_char datum)
+{
+ struct nand_chip *this = mtd->priv;
+ struct doc_priv *doc = this->priv;
+ void __iomem *docptr = doc->virtadr;
+
+ WriteDOC(datum, docptr, CDSNSlowIO);
+ WriteDOC(datum, docptr, Mil_CDSN_IO);
+ WriteDOC(datum, docptr, WritePipeTerm);
+}
+
+static u_char doc2001_read_byte(struct mtd_info *mtd)
+{
+ struct nand_chip *this = mtd->priv;
+ struct doc_priv *doc = this->priv;
+ void __iomem *docptr = doc->virtadr;
+
+ //ReadDOC(docptr, CDSNSlowIO);
+ /* 11.4.5 -- delay twice to allow extended length cycle */
+ DoC_Delay(doc, 2);
+ ReadDOC(docptr, ReadPipeInit);
+ //return ReadDOC(docptr, Mil_CDSN_IO);
+ return ReadDOC(docptr, LastDataRead);
+}
+
+static void doc2001_writebuf(struct mtd_info *mtd,
+ const u_char *buf, int len)
+{
+ struct nand_chip *this = mtd->priv;
+ struct doc_priv *doc = this->priv;
+ void __iomem *docptr = doc->virtadr;
+ int i;
+
+ for (i=0; i < len; i++)
+ WriteDOC_(buf[i], docptr, DoC_Mil_CDSN_IO + i);
+ /* Terminate write pipeline */
+ WriteDOC(0x00, docptr, WritePipeTerm);
+}
+
+static void doc2001_readbuf(struct mtd_info *mtd,
+ u_char *buf, int len)
+{
+ struct nand_chip *this = mtd->priv;
+ struct doc_priv *doc = this->priv;
+ void __iomem *docptr = doc->virtadr;
+ int i;
+
+ /* Start read pipeline */
+ ReadDOC(docptr, ReadPipeInit);
+
+ for (i=0; i < len-1; i++)
+ buf[i] = ReadDOC(docptr, Mil_CDSN_IO + (i & 0xff));
+
+ /* Terminate read pipeline */
+ buf[i] = ReadDOC(docptr, LastDataRead);
+}
+
+static int doc2001_verifybuf(struct mtd_info *mtd,
+ const u_char *buf, int len)
+{
+ struct nand_chip *this = mtd->priv;
+ struct doc_priv *doc = this->priv;
+ void __iomem *docptr = doc->virtadr;
+ int i;
+
+ /* Start read pipeline */
+ ReadDOC(docptr, ReadPipeInit);
+
+ for (i=0; i < len-1; i++)
+ if (buf[i] != ReadDOC(docptr, Mil_CDSN_IO)) {
+ ReadDOC(docptr, LastDataRead);
+ return i;
+ }
+ if (buf[i] != ReadDOC(docptr, LastDataRead))
+ return i;
+ return 0;
+}
+
+static u_char doc2001plus_read_byte(struct mtd_info *mtd)
+{
+ struct nand_chip *this = mtd->priv;
+ struct doc_priv *doc = this->priv;
+ void __iomem *docptr = doc->virtadr;
+ u_char ret;
+
+ ReadDOC(docptr, Mplus_ReadPipeInit);
+ ReadDOC(docptr, Mplus_ReadPipeInit);
+ ret = ReadDOC(docptr, Mplus_LastDataRead);
+ if (debug) printk("read_byte returns %02x\n", ret);
+ return ret;
+}
+
+static void doc2001plus_writebuf(struct mtd_info *mtd,
+ const u_char *buf, int len)
+{
+ struct nand_chip *this = mtd->priv;
+ struct doc_priv *doc = this->priv;
+ void __iomem *docptr = doc->virtadr;
+ int i;
+
+ if (debug)printk("writebuf of %d bytes: ", len);
+ for (i=0; i < len; i++) {
+ WriteDOC_(buf[i], docptr, DoC_Mil_CDSN_IO + i);
+ if (debug && i < 16)
+ printk("%02x ", buf[i]);
+ }
+ if (debug) printk("\n");
+}
+
+static void doc2001plus_readbuf(struct mtd_info *mtd,
+ u_char *buf, int len)
+{
+ struct nand_chip *this = mtd->priv;
+ struct doc_priv *doc = this->priv;
+ void __iomem *docptr = doc->virtadr;
+ int i;
+
+ if (debug)printk("readbuf of %d bytes: ", len);
+
+ /* Start read pipeline */
+ ReadDOC(docptr, Mplus_ReadPipeInit);
+ ReadDOC(docptr, Mplus_ReadPipeInit);
+
+ for (i=0; i < len-2; i++) {
+ buf[i] = ReadDOC(docptr, Mil_CDSN_IO);
+ if (debug && i < 16)
+ printk("%02x ", buf[i]);
+ }
+
+ /* Terminate read pipeline */
+ buf[len-2] = ReadDOC(docptr, Mplus_LastDataRead);
+ if (debug && i < 16)
+ printk("%02x ", buf[len-2]);
+ buf[len-1] = ReadDOC(docptr, Mplus_LastDataRead);
+ if (debug && i < 16)
+ printk("%02x ", buf[len-1]);
+ if (debug) printk("\n");
+}
+
+static int doc2001plus_verifybuf(struct mtd_info *mtd,
+ const u_char *buf, int len)
+{
+ struct nand_chip *this = mtd->priv;
+ struct doc_priv *doc = this->priv;
+ void __iomem *docptr = doc->virtadr;
+ int i;
+
+ if (debug)printk("verifybuf of %d bytes: ", len);
+
+ /* Start read pipeline */
+ ReadDOC(docptr, Mplus_ReadPipeInit);
+ ReadDOC(docptr, Mplus_ReadPipeInit);
+
+ for (i=0; i < len-2; i++)
+ if (buf[i] != ReadDOC(docptr, Mil_CDSN_IO)) {
+ ReadDOC(docptr, Mplus_LastDataRead);
+ ReadDOC(docptr, Mplus_LastDataRead);
+ return i;
+ }
+ if (buf[len-2] != ReadDOC(docptr, Mplus_LastDataRead))
+ return len-2;
+ if (buf[len-1] != ReadDOC(docptr, Mplus_LastDataRead))
+ return len-1;
+ return 0;
+}
+
+static void doc2001plus_select_chip(struct mtd_info *mtd, int chip)
+{
+ struct nand_chip *this = mtd->priv;
+ struct doc_priv *doc = this->priv;
+ void __iomem *docptr = doc->virtadr;
+ int floor = 0;
+
+ if(debug)printk("select chip (%d)\n", chip);
+
+ if (chip == -1) {
+ /* Disable flash internally */
+ WriteDOC(0, docptr, Mplus_FlashSelect);
+ return;
+ }
+
+ floor = chip / doc->chips_per_floor;
+ chip -= (floor * doc->chips_per_floor);
+
+ /* Assert ChipEnable and deassert WriteProtect */
+ WriteDOC((DOC_FLASH_CE), docptr, Mplus_FlashSelect);
+ this->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
+
+ doc->curchip = chip;
+ doc->curfloor = floor;
+}
+
+static void doc200x_select_chip(struct mtd_info *mtd, int chip)
+{
+ struct nand_chip *this = mtd->priv;
+ struct doc_priv *doc = this->priv;
+ void __iomem *docptr = doc->virtadr;
+ int floor = 0;
+
+ if(debug)printk("select chip (%d)\n", chip);
+
+ if (chip == -1)
+ return;
+
+ floor = chip / doc->chips_per_floor;
+ chip -= (floor * doc->chips_per_floor);
+
+ /* 11.4.4 -- deassert CE before changing chip */
+ doc200x_hwcontrol(mtd, NAND_CTL_CLRNCE);
+
+ WriteDOC(floor, docptr, FloorSelect);
+ WriteDOC(chip, docptr, CDSNDeviceSelect);
+
+ doc200x_hwcontrol(mtd, NAND_CTL_SETNCE);
+
+ doc->curchip = chip;
+ doc->curfloor = floor;
+}
+
+static void doc200x_hwcontrol(struct mtd_info *mtd, int cmd)
+{
+ struct nand_chip *this = mtd->priv;
+ struct doc_priv *doc = this->priv;
+ void __iomem *docptr = doc->virtadr;
+
+ switch(cmd) {
+ case NAND_CTL_SETNCE:
+ doc->CDSNControl |= CDSN_CTRL_CE;
+ break;
+ case NAND_CTL_CLRNCE:
+ doc->CDSNControl &= ~CDSN_CTRL_CE;
+ break;
+ case NAND_CTL_SETCLE:
+ doc->CDSNControl |= CDSN_CTRL_CLE;
+ break;
+ case NAND_CTL_CLRCLE:
+ doc->CDSNControl &= ~CDSN_CTRL_CLE;
+ break;
+ case NAND_CTL_SETALE:
+ doc->CDSNControl |= CDSN_CTRL_ALE;
+ break;
+ case NAND_CTL_CLRALE:
+ doc->CDSNControl &= ~CDSN_CTRL_ALE;
+ break;
+ case NAND_CTL_SETWP:
+ doc->CDSNControl |= CDSN_CTRL_WP;
+ break;
+ case NAND_CTL_CLRWP:
+ doc->CDSNControl &= ~CDSN_CTRL_WP;
+ break;
+ }
+ if (debug)printk("hwcontrol(%d): %02x\n", cmd, doc->CDSNControl);
+ WriteDOC(doc->CDSNControl, docptr, CDSNControl);
+ /* 11.4.3 -- 4 NOPs after CSDNControl write */
+ DoC_Delay(doc, 4);
+}
+
+static void doc2001plus_command (struct mtd_info *mtd, unsigned command, int column, int page_addr)
+{
+ struct nand_chip *this = mtd->priv;
+ struct doc_priv *doc = this->priv;
+ void __iomem *docptr = doc->virtadr;
+
+ /*
+ * Must terminate write pipeline before sending any commands
+ * to the device.
+ */
+ if (command == NAND_CMD_PAGEPROG) {
+ WriteDOC(0x00, docptr, Mplus_WritePipeTerm);
+ WriteDOC(0x00, docptr, Mplus_WritePipeTerm);
+ }
+
+ /*
+ * Write out the command to the device.
+ */
+ if (command == NAND_CMD_SEQIN) {
+ int readcmd;
+
+ if (column >= mtd->oobblock) {
+ /* OOB area */
+ column -= mtd->oobblock;
+ readcmd = NAND_CMD_READOOB;
+ } else if (column < 256) {
+ /* First 256 bytes --> READ0 */
+ readcmd = NAND_CMD_READ0;
+ } else {
+ column -= 256;
+ readcmd = NAND_CMD_READ1;
+ }
+ WriteDOC(readcmd, docptr, Mplus_FlashCmd);
+ }
+ WriteDOC(command, docptr, Mplus_FlashCmd);
+ WriteDOC(0, docptr, Mplus_WritePipeTerm);
+ WriteDOC(0, docptr, Mplus_WritePipeTerm);
+
+ if (column != -1 || page_addr != -1) {
+ /* Serially input address */
+ if (column != -1) {
+ /* Adjust columns for 16 bit buswidth */
+ if (this->options & NAND_BUSWIDTH_16)
+ column >>= 1;
+ WriteDOC(column, docptr, Mplus_FlashAddress);
+ }
+ if (page_addr != -1) {
+ WriteDOC((unsigned char) (page_addr & 0xff), docptr, Mplus_FlashAddress);
+ WriteDOC((unsigned char) ((page_addr >> 8) & 0xff), docptr, Mplus_FlashAddress);
+ /* One more address cycle for higher density devices */
+ if (this->chipsize & 0x0c000000) {
+ WriteDOC((unsigned char) ((page_addr >> 16) & 0x0f), docptr, Mplus_FlashAddress);
+ printk("high density\n");
+ }
+ }
+ WriteDOC(0, docptr, Mplus_WritePipeTerm);
+ WriteDOC(0, docptr, Mplus_WritePipeTerm);
+ /* deassert ALE */
+ if (command == NAND_CMD_READ0 || command == NAND_CMD_READ1 || command == NAND_CMD_READOOB || command == NAND_CMD_READID)
+ WriteDOC(0, docptr, Mplus_FlashControl);
+ }
+
+ /*
+ * program and erase have their own busy handlers
+ * status and sequential in needs no delay
+ */
+ switch (command) {
+
+ case NAND_CMD_PAGEPROG:
+ case NAND_CMD_ERASE1:
+ case NAND_CMD_ERASE2:
+ case NAND_CMD_SEQIN:
+ case NAND_CMD_STATUS:
+ return;
+
+ case NAND_CMD_RESET:
+ if (this->dev_ready)
+ break;
+ udelay(this->chip_delay);
+ WriteDOC(NAND_CMD_STATUS, docptr, Mplus_FlashCmd);
+ WriteDOC(0, docptr, Mplus_WritePipeTerm);
+ WriteDOC(0, docptr, Mplus_WritePipeTerm);
+ while ( !(this->read_byte(mtd) & 0x40));
+ return;
+
+ /* This applies to read commands */
+ default:
+ /*
+ * If we don't have access to the busy pin, we apply the given
+ * command delay
+ */
+ if (!this->dev_ready) {
+ udelay (this->chip_delay);
+ return;
+ }
+ }
+
+ /* Apply this short delay always to ensure that we do wait tWB in
+ * any case on any machine. */
+ ndelay (100);
+ /* wait until command is processed */
+ while (!this->dev_ready(mtd));
+}
+
+static int doc200x_dev_ready(struct mtd_info *mtd)
+{
+ struct nand_chip *this = mtd->priv;
+ struct doc_priv *doc = this->priv;
+ void __iomem *docptr = doc->virtadr;
+
+ if (DoC_is_MillenniumPlus(doc)) {
+ /* 11.4.2 -- must NOP four times before checking FR/B# */
+ DoC_Delay(doc, 4);
+ if ((ReadDOC(docptr, Mplus_FlashControl) & CDSN_CTRL_FR_B_MASK) != CDSN_CTRL_FR_B_MASK) {
+ if(debug)
+ printk("not ready\n");
+ return 0;
+ }
+ if (debug)printk("was ready\n");
+ return 1;
+ } else {
+ /* 11.4.2 -- must NOP four times before checking FR/B# */
+ DoC_Delay(doc, 4);
+ if (!(ReadDOC(docptr, CDSNControl) & CDSN_CTRL_FR_B)) {
+ if(debug)
+ printk("not ready\n");
+ return 0;
+ }
+ /* 11.4.2 -- Must NOP twice if it's ready */
+ DoC_Delay(doc, 2);
+ if (debug)printk("was ready\n");
+ return 1;
+ }
+}
+
+static int doc200x_block_bad(struct mtd_info *mtd, loff_t ofs, int getchip)
+{
+ /* This is our last resort if we couldn't find or create a BBT. Just
+ pretend all blocks are good. */
+ return 0;
+}
+
+static void doc200x_enable_hwecc(struct mtd_info *mtd, int mode)
+{
+ struct nand_chip *this = mtd->priv;
+ struct doc_priv *doc = this->priv;
+ void __iomem *docptr = doc->virtadr;
+
+ /* Prime the ECC engine */
+ switch(mode) {
+ case NAND_ECC_READ:
+ WriteDOC(DOC_ECC_RESET, docptr, ECCConf);
+ WriteDOC(DOC_ECC_EN, docptr, ECCConf);
+ break;
+ case NAND_ECC_WRITE:
+ WriteDOC(DOC_ECC_RESET, docptr, ECCConf);
+ WriteDOC(DOC_ECC_EN | DOC_ECC_RW, docptr, ECCConf);
+ break;
+ }
+}
+
+static void doc2001plus_enable_hwecc(struct mtd_info *mtd, int mode)
+{
+ struct nand_chip *this = mtd->priv;
+ struct doc_priv *doc = this->priv;
+ void __iomem *docptr = doc->virtadr;
+
+ /* Prime the ECC engine */
+ switch(mode) {
+ case NAND_ECC_READ:
+ WriteDOC(DOC_ECC_RESET, docptr, Mplus_ECCConf);
+ WriteDOC(DOC_ECC_EN, docptr, Mplus_ECCConf);
+ break;
+ case NAND_ECC_WRITE:
+ WriteDOC(DOC_ECC_RESET, docptr, Mplus_ECCConf);
+ WriteDOC(DOC_ECC_EN | DOC_ECC_RW, docptr, Mplus_ECCConf);
+ break;
+ }
+}
+
+/* This code is only called on write */
+static int doc200x_calculate_ecc(struct mtd_info *mtd, const u_char *dat,
+ unsigned char *ecc_code)
+{
+ struct nand_chip *this = mtd->priv;
+ struct doc_priv *doc = this->priv;
+ void __iomem *docptr = doc->virtadr;
+ int i;
+ int emptymatch = 1;
+
+ /* flush the pipeline */
+ if (DoC_is_2000(doc)) {
+ WriteDOC(doc->CDSNControl & ~CDSN_CTRL_FLASH_IO, docptr, CDSNControl);
+ WriteDOC(0, docptr, 2k_CDSN_IO);
+ WriteDOC(0, docptr, 2k_CDSN_IO);
+ WriteDOC(0, docptr, 2k_CDSN_IO);
+ WriteDOC(doc->CDSNControl, docptr, CDSNControl);
+ } else if (DoC_is_MillenniumPlus(doc)) {
+ WriteDOC(0, docptr, Mplus_NOP);
+ WriteDOC(0, docptr, Mplus_NOP);
+ WriteDOC(0, docptr, Mplus_NOP);
+ } else {
+ WriteDOC(0, docptr, NOP);
+ WriteDOC(0, docptr, NOP);
+ WriteDOC(0, docptr, NOP);
+ }
+
+ for (i = 0; i < 6; i++) {
+ if (DoC_is_MillenniumPlus(doc))
+ ecc_code[i] = ReadDOC_(docptr, DoC_Mplus_ECCSyndrome0 + i);
+ else
+ ecc_code[i] = ReadDOC_(docptr, DoC_ECCSyndrome0 + i);
+ if (ecc_code[i] != empty_write_ecc[i])
+ emptymatch = 0;
+ }
+ if (DoC_is_MillenniumPlus(doc))
+ WriteDOC(DOC_ECC_DIS, docptr, Mplus_ECCConf);
+ else
+ WriteDOC(DOC_ECC_DIS, docptr, ECCConf);
+#if 0
+ /* If emptymatch=1, we might have an all-0xff data buffer. Check. */
+ if (emptymatch) {
+ /* Note: this somewhat expensive test should not be triggered
+ often. It could be optimized away by examining the data in
+ the writebuf routine, and remembering the result. */
+ for (i = 0; i < 512; i++) {
+ if (dat[i] == 0xff) continue;
+ emptymatch = 0;
+ break;
+ }
+ }
+ /* If emptymatch still =1, we do have an all-0xff data buffer.
+ Return all-0xff ecc value instead of the computed one, so
+ it'll look just like a freshly-erased page. */
+ if (emptymatch) memset(ecc_code, 0xff, 6);
+#endif
+ return 0;
+}
+
+static int doc200x_correct_data(struct mtd_info *mtd, u_char *dat, u_char *read_ecc, u_char *calc_ecc)
+{
+ int i, ret = 0;
+ struct nand_chip *this = mtd->priv;
+ struct doc_priv *doc = this->priv;
+ void __iomem *docptr = doc->virtadr;
+ volatile u_char dummy;
+ int emptymatch = 1;
+
+ /* flush the pipeline */
+ if (DoC_is_2000(doc)) {
+ dummy = ReadDOC(docptr, 2k_ECCStatus);
+ dummy = ReadDOC(docptr, 2k_ECCStatus);
+ dummy = ReadDOC(docptr, 2k_ECCStatus);
+ } else if (DoC_is_MillenniumPlus(doc)) {
+ dummy = ReadDOC(docptr, Mplus_ECCConf);
+ dummy = ReadDOC(docptr, Mplus_ECCConf);
+ dummy = ReadDOC(docptr, Mplus_ECCConf);
+ } else {
+ dummy = ReadDOC(docptr, ECCConf);
+ dummy = ReadDOC(docptr, ECCConf);
+ dummy = ReadDOC(docptr, ECCConf);
+ }
+
+ /* Error occured ? */
+ if (dummy & 0x80) {
+ for (i = 0; i < 6; i++) {
+ if (DoC_is_MillenniumPlus(doc))
+ calc_ecc[i] = ReadDOC_(docptr, DoC_Mplus_ECCSyndrome0 + i);
+ else
+ calc_ecc[i] = ReadDOC_(docptr, DoC_ECCSyndrome0 + i);
+ if (calc_ecc[i] != empty_read_syndrome[i])
+ emptymatch = 0;
+ }
+ /* If emptymatch=1, the read syndrome is consistent with an
+ all-0xff data and stored ecc block. Check the stored ecc. */
+ if (emptymatch) {
+ for (i = 0; i < 6; i++) {
+ if (read_ecc[i] == 0xff) continue;
+ emptymatch = 0;
+ break;
+ }
+ }
+ /* If emptymatch still =1, check the data block. */
+ if (emptymatch) {
+ /* Note: this somewhat expensive test should not be triggered
+ often. It could be optimized away by examining the data in
+ the readbuf routine, and remembering the result. */
+ for (i = 0; i < 512; i++) {
+ if (dat[i] == 0xff) continue;
+ emptymatch = 0;
+ break;
+ }
+ }
+ /* If emptymatch still =1, this is almost certainly a freshly-
+ erased block, in which case the ECC will not come out right.
+ We'll suppress the error and tell the caller everything's
+ OK. Because it is. */
+ if (!emptymatch) ret = doc_ecc_decode (rs_decoder, dat, calc_ecc);
+ if (ret > 0)
+ printk(KERN_ERR "doc200x_correct_data corrected %d errors\n", ret);
+ }
+ if (DoC_is_MillenniumPlus(doc))
+ WriteDOC(DOC_ECC_DIS, docptr, Mplus_ECCConf);
+ else
+ WriteDOC(DOC_ECC_DIS, docptr, ECCConf);
+ if (no_ecc_failures && (ret == -1)) {
+ printk(KERN_ERR "suppressing ECC failure\n");
+ ret = 0;
+ }
+ return ret;
+}
+
+//u_char mydatabuf[528];
+
+/* The strange out-of-order .oobfree list below is a (possibly unneeded)
+ * attempt to retain compatibility. It used to read:
+ * .oobfree = { {8, 8} }
+ * Since that leaves two bytes unusable, it was changed. But the following
+ * scheme might affect existing jffs2 installs by moving the cleanmarker:
+ * .oobfree = { {6, 10} }
+ * jffs2 seems to handle the above gracefully, but the current scheme seems
+ * safer. The only problem with it is that any code that parses oobfree must
+ * be able to handle out-of-order segments.
+ */
+static struct nand_oobinfo doc200x_oobinfo = {
+ .useecc = MTD_NANDECC_AUTOPLACE,
+ .eccbytes = 6,
+ .eccpos = {0, 1, 2, 3, 4, 5},
+ .oobfree = { {8, 8}, {6, 2} }
+};
+
+/* Find the (I)NFTL Media Header, and optionally also the mirror media header.
+ On sucessful return, buf will contain a copy of the media header for
+ further processing. id is the string to scan for, and will presumably be
+ either "ANAND" or "BNAND". If findmirror=1, also look for the mirror media
+ header. The page #s of the found media headers are placed in mh0_page and
+ mh1_page in the DOC private structure. */
+static int __init find_media_headers(struct mtd_info *mtd, u_char *buf,
+ const char *id, int findmirror)
+{
+ struct nand_chip *this = mtd->priv;
+ struct doc_priv *doc = this->priv;
+ unsigned offs;
+ int ret;
+ size_t retlen;
+
+ for (offs = 0; offs < mtd->size; offs += mtd->erasesize) {
+ ret = mtd->read(mtd, offs, mtd->oobblock, &retlen, buf);
+ if (retlen != mtd->oobblock) continue;
+ if (ret) {
+ printk(KERN_WARNING "ECC error scanning DOC at 0x%x\n",
+ offs);
+ }
+ if (memcmp(buf, id, 6)) continue;
+ printk(KERN_INFO "Found DiskOnChip %s Media Header at 0x%x\n", id, offs);
+ if (doc->mh0_page == -1) {
+ doc->mh0_page = offs >> this->page_shift;
+ if (!findmirror) return 1;
+ continue;
+ }
+ doc->mh1_page = offs >> this->page_shift;
+ return 2;
+ }
+ if (doc->mh0_page == -1) {
+ printk(KERN_WARNING "DiskOnChip %s Media Header not found.\n", id);
+ return 0;
+ }
+ /* Only one mediaheader was found. We want buf to contain a
+ mediaheader on return, so we'll have to re-read the one we found. */
+ offs = doc->mh0_page << this->page_shift;
+ ret = mtd->read(mtd, offs, mtd->oobblock, &retlen, buf);
+ if (retlen != mtd->oobblock) {
+ /* Insanity. Give up. */
+ printk(KERN_ERR "Read DiskOnChip Media Header once, but can't reread it???\n");
+ return 0;
+ }
+ return 1;
+}
+
+static inline int __init nftl_partscan(struct mtd_info *mtd,
+ struct mtd_partition *parts)
+{
+ struct nand_chip *this = mtd->priv;
+ struct doc_priv *doc = this->priv;
+ int ret = 0;
+ u_char *buf;
+ struct NFTLMediaHeader *mh;
+ const unsigned psize = 1 << this->page_shift;
+ int numparts = 0;
+ unsigned blocks, maxblocks;
+ int offs, numheaders;
+
+ buf = kmalloc(mtd->oobblock, GFP_KERNEL);
+ if (!buf) {
+ printk(KERN_ERR "DiskOnChip mediaheader kmalloc failed!\n");
+ return 0;
+ }
+ if (!(numheaders=find_media_headers(mtd, buf, "ANAND", 1))) goto out;
+ mh = (struct NFTLMediaHeader *) buf;
+
+ mh->NumEraseUnits = le16_to_cpu(mh->NumEraseUnits);
+ mh->FirstPhysicalEUN = le16_to_cpu(mh->FirstPhysicalEUN);
+ mh->FormattedSize = le32_to_cpu(mh->FormattedSize);
+
+ printk(KERN_INFO " DataOrgID = %s\n"
+ " NumEraseUnits = %d\n"
+ " FirstPhysicalEUN = %d\n"
+ " FormattedSize = %d\n"
+ " UnitSizeFactor = %d\n",
+ mh->DataOrgID, mh->NumEraseUnits,
+ mh->FirstPhysicalEUN, mh->FormattedSize,
+ mh->UnitSizeFactor);
+
+ blocks = mtd->size >> this->phys_erase_shift;
+ maxblocks = min(32768U, mtd->erasesize - psize);
+
+ if (mh->UnitSizeFactor == 0x00) {
+ /* Auto-determine UnitSizeFactor. The constraints are:
+ - There can be at most 32768 virtual blocks.
+ - There can be at most (virtual block size - page size)
+ virtual blocks (because MediaHeader+BBT must fit in 1).
+ */
+ mh->UnitSizeFactor = 0xff;
+ while (blocks > maxblocks) {
+ blocks >>= 1;
+ maxblocks = min(32768U, (maxblocks << 1) + psize);
+ mh->UnitSizeFactor--;
+ }
+ printk(KERN_WARNING "UnitSizeFactor=0x00 detected. Correct value is assumed to be 0x%02x.\n", mh->UnitSizeFactor);
+ }
+
+ /* NOTE: The lines below modify internal variables of the NAND and MTD
+ layers; variables with have already been configured by nand_scan.
+ Unfortunately, we didn't know before this point what these values
+ should be. Thus, this code is somewhat dependant on the exact
+ implementation of the NAND layer. */
+ if (mh->UnitSizeFactor != 0xff) {
+ this->bbt_erase_shift += (0xff - mh->UnitSizeFactor);
+ mtd->erasesize <<= (0xff - mh->UnitSizeFactor);
+ printk(KERN_INFO "Setting virtual erase size to %d\n", mtd->erasesize);
+ blocks = mtd->size >> this->bbt_erase_shift;
+ maxblocks = min(32768U, mtd->erasesize - psize);
+ }
+
+ if (blocks > maxblocks) {
+ printk(KERN_ERR "UnitSizeFactor of 0x%02x is inconsistent with device size. Aborting.\n", mh->UnitSizeFactor);
+ goto out;
+ }
+
+ /* Skip past the media headers. */
+ offs = max(doc->mh0_page, doc->mh1_page);
+ offs <<= this->page_shift;
+ offs += mtd->erasesize;
+
+ if (show_firmware_partition == 1) {
+ parts[0].name = " DiskOnChip Firmware / Media Header partition";
+ parts[0].offset = 0;
+ parts[0].size = offs;
+ numparts = 1;
+ }
+
+ parts[numparts].name = " DiskOnChip BDTL partition";
+ parts[numparts].offset = offs;
+ parts[numparts].size = (mh->NumEraseUnits - numheaders) << this->bbt_erase_shift;
+
+ offs += parts[numparts].size;
+ numparts++;
+
+ if (offs < mtd->size) {
+ parts[numparts].name = " DiskOnChip Remainder partition";
+ parts[numparts].offset = offs;
+ parts[numparts].size = mtd->size - offs;
+ numparts++;
+ }
+
+ ret = numparts;
+out:
+ kfree(buf);
+ return ret;
+}
+
+/* This is a stripped-down copy of the code in inftlmount.c */
+static inline int __init inftl_partscan(struct mtd_info *mtd,
+ struct mtd_partition *parts)
+{
+ struct nand_chip *this = mtd->priv;
+ struct doc_priv *doc = this->priv;
+ int ret = 0;
+ u_char *buf;
+ struct INFTLMediaHeader *mh;
+ struct INFTLPartition *ip;
+ int numparts = 0;
+ int blocks;
+ int vshift, lastvunit = 0;
+ int i;
+ int end = mtd->size;
+
+ if (inftl_bbt_write)
+ end -= (INFTL_BBT_RESERVED_BLOCKS << this->phys_erase_shift);
+
+ buf = kmalloc(mtd->oobblock, GFP_KERNEL);
+ if (!buf) {
+ printk(KERN_ERR "DiskOnChip mediaheader kmalloc failed!\n");
+ return 0;
+ }
+
+ if (!find_media_headers(mtd, buf, "BNAND", 0)) goto out;
+ doc->mh1_page = doc->mh0_page + (4096 >> this->page_shift);
+ mh = (struct INFTLMediaHeader *) buf;
+
+ mh->NoOfBootImageBlocks = le32_to_cpu(mh->NoOfBootImageBlocks);
+ mh->NoOfBinaryPartitions = le32_to_cpu(mh->NoOfBinaryPartitions);
+ mh->NoOfBDTLPartitions = le32_to_cpu(mh->NoOfBDTLPartitions);
+ mh->BlockMultiplierBits = le32_to_cpu(mh->BlockMultiplierBits);
+ mh->FormatFlags = le32_to_cpu(mh->FormatFlags);
+ mh->PercentUsed = le32_to_cpu(mh->PercentUsed);
+
+ printk(KERN_INFO " bootRecordID = %s\n"
+ " NoOfBootImageBlocks = %d\n"
+ " NoOfBinaryPartitions = %d\n"
+ " NoOfBDTLPartitions = %d\n"
+ " BlockMultiplerBits = %d\n"
+ " FormatFlgs = %d\n"
+ " OsakVersion = %d.%d.%d.%d\n"
+ " PercentUsed = %d\n",
+ mh->bootRecordID, mh->NoOfBootImageBlocks,
+ mh->NoOfBinaryPartitions,
+ mh->NoOfBDTLPartitions,
+ mh->BlockMultiplierBits, mh->FormatFlags,
+ ((unsigned char *) &mh->OsakVersion)[0] & 0xf,
+ ((unsigned char *) &mh->OsakVersion)[1] & 0xf,
+ ((unsigned char *) &mh->OsakVersion)[2] & 0xf,
+ ((unsigned char *) &mh->OsakVersion)[3] & 0xf,
+ mh->PercentUsed);
+
+ vshift = this->phys_erase_shift + mh->BlockMultiplierBits;
+
+ blocks = mtd->size >> vshift;
+ if (blocks > 32768) {
+ printk(KERN_ERR "BlockMultiplierBits=%d is inconsistent with device size. Aborting.\n", mh->BlockMultiplierBits);
+ goto out;
+ }
+
+ blocks = doc->chips_per_floor << (this->chip_shift - this->phys_erase_shift);
+ if (inftl_bbt_write && (blocks > mtd->erasesize)) {
+ printk(KERN_ERR "Writeable BBTs spanning more than one erase block are not yet supported. FIX ME!\n");
+ goto out;
+ }
+
+ /* Scan the partitions */
+ for (i = 0; (i < 4); i++) {
+ ip = &(mh->Partitions[i]);
+ ip->virtualUnits = le32_to_cpu(ip->virtualUnits);
+ ip->firstUnit = le32_to_cpu(ip->firstUnit);
+ ip->lastUnit = le32_to_cpu(ip->lastUnit);
+ ip->flags = le32_to_cpu(ip->flags);
+ ip->spareUnits = le32_to_cpu(ip->spareUnits);
+ ip->Reserved0 = le32_to_cpu(ip->Reserved0);
+
+ printk(KERN_INFO " PARTITION[%d] ->\n"
+ " virtualUnits = %d\n"
+ " firstUnit = %d\n"
+ " lastUnit = %d\n"
+ " flags = 0x%x\n"
+ " spareUnits = %d\n",
+ i, ip->virtualUnits, ip->firstUnit,
+ ip->lastUnit, ip->flags,
+ ip->spareUnits);
+
+ if ((show_firmware_partition == 1) &&
+ (i == 0) && (ip->firstUnit > 0)) {
+ parts[0].name = " DiskOnChip IPL / Media Header partition";
+ parts[0].offset = 0;
+ parts[0].size = mtd->erasesize * ip->firstUnit;
+ numparts = 1;
+ }
+
+ if (ip->flags & INFTL_BINARY)
+ parts[numparts].name = " DiskOnChip BDK partition";
+ else
+ parts[numparts].name = " DiskOnChip BDTL partition";
+ parts[numparts].offset = ip->firstUnit << vshift;
+ parts[numparts].size = (1 + ip->lastUnit - ip->firstUnit) << vshift;
+ numparts++;
+ if (ip->lastUnit > lastvunit) lastvunit = ip->lastUnit;
+ if (ip->flags & INFTL_LAST) break;
+ }
+ lastvunit++;
+ if ((lastvunit << vshift) < end) {
+ parts[numparts].name = " DiskOnChip Remainder partition";
+ parts[numparts].offset = lastvunit << vshift;
+ parts[numparts].size = end - parts[numparts].offset;
+ numparts++;
+ }
+ ret = numparts;
+out:
+ kfree(buf);
+ return ret;
+}
+
+static int __init nftl_scan_bbt(struct mtd_info *mtd)
+{
+ int ret, numparts;
+ struct nand_chip *this = mtd->priv;
+ struct doc_priv *doc = this->priv;
+ struct mtd_partition parts[2];
+
+ memset((char *) parts, 0, sizeof(parts));
+ /* On NFTL, we have to find the media headers before we can read the
+ BBTs, since they're stored in the media header eraseblocks. */
+ numparts = nftl_partscan(mtd, parts);
+ if (!numparts) return -EIO;
+ this->bbt_td->options = NAND_BBT_ABSPAGE | NAND_BBT_8BIT |
+ NAND_BBT_SAVECONTENT | NAND_BBT_WRITE |
+ NAND_BBT_VERSION;
+ this->bbt_td->veroffs = 7;
+ this->bbt_td->pages[0] = doc->mh0_page + 1;
+ if (doc->mh1_page != -1) {
+ this->bbt_md->options = NAND_BBT_ABSPAGE | NAND_BBT_8BIT |
+ NAND_BBT_SAVECONTENT | NAND_BBT_WRITE |
+ NAND_BBT_VERSION;
+ this->bbt_md->veroffs = 7;
+ this->bbt_md->pages[0] = doc->mh1_page + 1;
+ } else {
+ this->bbt_md = NULL;
+ }
+
+ /* It's safe to set bd=NULL below because NAND_BBT_CREATE is not set.
+ At least as nand_bbt.c is currently written. */
+ if ((ret = nand_scan_bbt(mtd, NULL)))
+ return ret;
+ add_mtd_device(mtd);
+#ifdef CONFIG_MTD_PARTITIONS
+ if (!no_autopart)
+ add_mtd_partitions(mtd, parts, numparts);
+#endif
+ return 0;
+}
+
+static int __init inftl_scan_bbt(struct mtd_info *mtd)
+{
+ int ret, numparts;
+ struct nand_chip *this = mtd->priv;
+ struct doc_priv *doc = this->priv;
+ struct mtd_partition parts[5];
+
+ if (this->numchips > doc->chips_per_floor) {
+ printk(KERN_ERR "Multi-floor INFTL devices not yet supported.\n");
+ return -EIO;
+ }
+
+ if (DoC_is_MillenniumPlus(doc)) {
+ this->bbt_td->options = NAND_BBT_2BIT | NAND_BBT_ABSPAGE;
+ if (inftl_bbt_write)
+ this->bbt_td->options |= NAND_BBT_WRITE;
+ this->bbt_td->pages[0] = 2;
+ this->bbt_md = NULL;
+ } else {
+ this->bbt_td->options = NAND_BBT_LASTBLOCK | NAND_BBT_8BIT |
+ NAND_BBT_VERSION;
+ if (inftl_bbt_write)
+ this->bbt_td->options |= NAND_BBT_WRITE;
+ this->bbt_td->offs = 8;
+ this->bbt_td->len = 8;
+ this->bbt_td->veroffs = 7;
+ this->bbt_td->maxblocks = INFTL_BBT_RESERVED_BLOCKS;
+ this->bbt_td->reserved_block_code = 0x01;
+ this->bbt_td->pattern = "MSYS_BBT";
+
+ this->bbt_md->options = NAND_BBT_LASTBLOCK | NAND_BBT_8BIT |
+ NAND_BBT_VERSION;
+ if (inftl_bbt_write)
+ this->bbt_md->options |= NAND_BBT_WRITE;
+ this->bbt_md->offs = 8;
+ this->bbt_md->len = 8;
+ this->bbt_md->veroffs = 7;
+ this->bbt_md->maxblocks = INFTL_BBT_RESERVED_BLOCKS;
+ this->bbt_md->reserved_block_code = 0x01;
+ this->bbt_md->pattern = "TBB_SYSM";
+ }
+
+ /* It's safe to set bd=NULL below because NAND_BBT_CREATE is not set.
+ At least as nand_bbt.c is currently written. */
+ if ((ret = nand_scan_bbt(mtd, NULL)))
+ return ret;
+ memset((char *) parts, 0, sizeof(parts));
+ numparts = inftl_partscan(mtd, parts);
+ /* At least for now, require the INFTL Media Header. We could probably
+ do without it for non-INFTL use, since all it gives us is
+ autopartitioning, but I want to give it more thought. */
+ if (!numparts) return -EIO;
+ add_mtd_device(mtd);
+#ifdef CONFIG_MTD_PARTITIONS
+ if (!no_autopart)
+ add_mtd_partitions(mtd, parts, numparts);
+#endif
+ return 0;
+}
+
+static inline int __init doc2000_init(struct mtd_info *mtd)
+{
+ struct nand_chip *this = mtd->priv;
+ struct doc_priv *doc = this->priv;
+
+ this->write_byte = doc2000_write_byte;
+ this->read_byte = doc2000_read_byte;
+ this->write_buf = doc2000_writebuf;
+ this->read_buf = doc2000_readbuf;
+ this->verify_buf = doc2000_verifybuf;
+ this->scan_bbt = nftl_scan_bbt;
+
+ doc->CDSNControl = CDSN_CTRL_FLASH_IO | CDSN_CTRL_ECC_IO;
+ doc2000_count_chips(mtd);
+ mtd->name = "DiskOnChip 2000 (NFTL Model)";
+ return (4 * doc->chips_per_floor);
+}
+
+static inline int __init doc2001_init(struct mtd_info *mtd)
+{
+ struct nand_chip *this = mtd->priv;
+ struct doc_priv *doc = this->priv;
+
+ this->write_byte = doc2001_write_byte;
+ this->read_byte = doc2001_read_byte;
+ this->write_buf = doc2001_writebuf;
+ this->read_buf = doc2001_readbuf;
+ this->verify_buf = doc2001_verifybuf;
+
+ ReadDOC(doc->virtadr, ChipID);
+ ReadDOC(doc->virtadr, ChipID);
+ ReadDOC(doc->virtadr, ChipID);
+ if (ReadDOC(doc->virtadr, ChipID) != DOC_ChipID_DocMil) {
+ /* It's not a Millennium; it's one of the newer
+ DiskOnChip 2000 units with a similar ASIC.
+ Treat it like a Millennium, except that it
+ can have multiple chips. */
+ doc2000_count_chips(mtd);
+ mtd->name = "DiskOnChip 2000 (INFTL Model)";
+ this->scan_bbt = inftl_scan_bbt;
+ return (4 * doc->chips_per_floor);
+ } else {
+ /* Bog-standard Millennium */
+ doc->chips_per_floor = 1;
+ mtd->name = "DiskOnChip Millennium";
+ this->scan_bbt = nftl_scan_bbt;
+ return 1;
+ }
+}
+
+static inline int __init doc2001plus_init(struct mtd_info *mtd)
+{
+ struct nand_chip *this = mtd->priv;
+ struct doc_priv *doc = this->priv;
+
+ this->write_byte = NULL;
+ this->read_byte = doc2001plus_read_byte;
+ this->write_buf = doc2001plus_writebuf;
+ this->read_buf = doc2001plus_readbuf;
+ this->verify_buf = doc2001plus_verifybuf;
+ this->scan_bbt = inftl_scan_bbt;
+ this->hwcontrol = NULL;
+ this->select_chip = doc2001plus_select_chip;
+ this->cmdfunc = doc2001plus_command;
+ this->enable_hwecc = doc2001plus_enable_hwecc;
+
+ doc->chips_per_floor = 1;
+ mtd->name = "DiskOnChip Millennium Plus";
+
+ return 1;
+}
+
+static inline int __init doc_probe(unsigned long physadr)
+{
+ unsigned char ChipID;
+ struct mtd_info *mtd;
+ struct nand_chip *nand;
+ struct doc_priv *doc;
+ void __iomem *virtadr;
+ unsigned char save_control;
+ unsigned char tmp, tmpb, tmpc;
+ int reg, len, numchips;
+ int ret = 0;
+
+ virtadr = ioremap(physadr, DOC_IOREMAP_LEN);
+ if (!virtadr) {
+ printk(KERN_ERR "Diskonchip ioremap failed: 0x%x bytes at 0x%lx\n", DOC_IOREMAP_LEN, physadr);
+ return -EIO;
+ }
+
+ /* It's not possible to cleanly detect the DiskOnChip - the
+ * bootup procedure will put the device into reset mode, and
+ * it's not possible to talk to it without actually writing
+ * to the DOCControl register. So we store the current contents
+ * of the DOCControl register's location, in case we later decide
+ * that it's not a DiskOnChip, and want to put it back how we
+ * found it.
+ */
+ save_control = ReadDOC(virtadr, DOCControl);
+
+ /* Reset the DiskOnChip ASIC */
+ WriteDOC(DOC_MODE_CLR_ERR | DOC_MODE_MDWREN | DOC_MODE_RESET,
+ virtadr, DOCControl);
+ WriteDOC(DOC_MODE_CLR_ERR | DOC_MODE_MDWREN | DOC_MODE_RESET,
+ virtadr, DOCControl);
+
+ /* Enable the DiskOnChip ASIC */
+ WriteDOC(DOC_MODE_CLR_ERR | DOC_MODE_MDWREN | DOC_MODE_NORMAL,
+ virtadr, DOCControl);
+ WriteDOC(DOC_MODE_CLR_ERR | DOC_MODE_MDWREN | DOC_MODE_NORMAL,
+ virtadr, DOCControl);
+
+ ChipID = ReadDOC(virtadr, ChipID);
+
+ switch(ChipID) {
+ case DOC_ChipID_Doc2k:
+ reg = DoC_2k_ECCStatus;
+ break;
+ case DOC_ChipID_DocMil:
+ reg = DoC_ECCConf;
+ break;
+ case DOC_ChipID_DocMilPlus16:
+ case DOC_ChipID_DocMilPlus32:
+ case 0:
+ /* Possible Millennium Plus, need to do more checks */
+ /* Possibly release from power down mode */
+ for (tmp = 0; (tmp < 4); tmp++)
+ ReadDOC(virtadr, Mplus_Power);
+
+ /* Reset the Millennium Plus ASIC */
+ tmp = DOC_MODE_RESET | DOC_MODE_MDWREN | DOC_MODE_RST_LAT |
+ DOC_MODE_BDECT;
+ WriteDOC(tmp, virtadr, Mplus_DOCControl);
+ WriteDOC(~tmp, virtadr, Mplus_CtrlConfirm);
+
+ mdelay(1);
+ /* Enable the Millennium Plus ASIC */
+ tmp = DOC_MODE_NORMAL | DOC_MODE_MDWREN | DOC_MODE_RST_LAT |
+ DOC_MODE_BDECT;
+ WriteDOC(tmp, virtadr, Mplus_DOCControl);
+ WriteDOC(~tmp, virtadr, Mplus_CtrlConfirm);
+ mdelay(1);
+
+ ChipID = ReadDOC(virtadr, ChipID);
+
+ switch (ChipID) {
+ case DOC_ChipID_DocMilPlus16:
+ reg = DoC_Mplus_Toggle;
+ break;
+ case DOC_ChipID_DocMilPlus32:
+ printk(KERN_ERR "DiskOnChip Millennium Plus 32MB is not supported, ignoring.\n");
+ default:
+ ret = -ENODEV;
+ goto notfound;
+ }
+ break;
+
+ default:
+ ret = -ENODEV;
+ goto notfound;
+ }
+ /* Check the TOGGLE bit in the ECC register */
+ tmp = ReadDOC_(virtadr, reg) & DOC_TOGGLE_BIT;
+ tmpb = ReadDOC_(virtadr, reg) & DOC_TOGGLE_BIT;
+ tmpc = ReadDOC_(virtadr, reg) & DOC_TOGGLE_BIT;
+ if ((tmp == tmpb) || (tmp != tmpc)) {
+ printk(KERN_WARNING "Possible DiskOnChip at 0x%lx failed TOGGLE test, dropping.\n", physadr);
+ ret = -ENODEV;
+ goto notfound;
+ }
+
+ for (mtd = doclist; mtd; mtd = doc->nextdoc) {
+ unsigned char oldval;
+ unsigned char newval;
+ nand = mtd->priv;
+ doc = nand->priv;
+ /* Use the alias resolution register to determine if this is
+ in fact the same DOC aliased to a new address. If writes
+ to one chip's alias resolution register change the value on
+ the other chip, they're the same chip. */
+ if (ChipID == DOC_ChipID_DocMilPlus16) {
+ oldval = ReadDOC(doc->virtadr, Mplus_AliasResolution);
+ newval = ReadDOC(virtadr, Mplus_AliasResolution);
+ } else {
+ oldval = ReadDOC(doc->virtadr, AliasResolution);
+ newval = ReadDOC(virtadr, AliasResolution);
+ }
+ if (oldval != newval)
+ continue;
+ if (ChipID == DOC_ChipID_DocMilPlus16) {
+ WriteDOC(~newval, virtadr, Mplus_AliasResolution);
+ oldval = ReadDOC(doc->virtadr, Mplus_AliasResolution);
+ WriteDOC(newval, virtadr, Mplus_AliasResolution); // restore it
+ } else {
+ WriteDOC(~newval, virtadr, AliasResolution);
+ oldval = ReadDOC(doc->virtadr, AliasResolution);
+ WriteDOC(newval, virtadr, AliasResolution); // restore it
+ }
+ newval = ~newval;
+ if (oldval == newval) {
+ printk(KERN_DEBUG "Found alias of DOC at 0x%lx to 0x%lx\n", doc->physadr, physadr);
+ goto notfound;
+ }
+ }
+
+ printk(KERN_NOTICE "DiskOnChip found at 0x%lx\n", physadr);
+
+ len = sizeof(struct mtd_info) +
+ sizeof(struct nand_chip) +
+ sizeof(struct doc_priv) +
+ (2 * sizeof(struct nand_bbt_descr));
+ mtd = kmalloc(len, GFP_KERNEL);
+ if (!mtd) {
+ printk(KERN_ERR "DiskOnChip kmalloc (%d bytes) failed!\n", len);
+ ret = -ENOMEM;
+ goto fail;
+ }
+ memset(mtd, 0, len);
+
+ nand = (struct nand_chip *) (mtd + 1);
+ doc = (struct doc_priv *) (nand + 1);
+ nand->bbt_td = (struct nand_bbt_descr *) (doc + 1);
+ nand->bbt_md = nand->bbt_td + 1;
+
+ mtd->priv = nand;
+ mtd->owner = THIS_MODULE;
+
+ nand->priv = doc;
+ nand->select_chip = doc200x_select_chip;
+ nand->hwcontrol = doc200x_hwcontrol;
+ nand->dev_ready = doc200x_dev_ready;
+ nand->waitfunc = doc200x_wait;
+ nand->block_bad = doc200x_block_bad;
+ nand->enable_hwecc = doc200x_enable_hwecc;
+ nand->calculate_ecc = doc200x_calculate_ecc;
+ nand->correct_data = doc200x_correct_data;
+
+ nand->autooob = &doc200x_oobinfo;
+ nand->eccmode = NAND_ECC_HW6_512;
+ nand->options = NAND_USE_FLASH_BBT | NAND_HWECC_SYNDROME;
+
+ doc->physadr = physadr;
+ doc->virtadr = virtadr;
+ doc->ChipID = ChipID;
+ doc->curfloor = -1;
+ doc->curchip = -1;
+ doc->mh0_page = -1;
+ doc->mh1_page = -1;
+ doc->nextdoc = doclist;
+
+ if (ChipID == DOC_ChipID_Doc2k)
+ numchips = doc2000_init(mtd);
+ else if (ChipID == DOC_ChipID_DocMilPlus16)
+ numchips = doc2001plus_init(mtd);
+ else
+ numchips = doc2001_init(mtd);
+
+ if ((ret = nand_scan(mtd, numchips))) {
+ /* DBB note: i believe nand_release is necessary here, as
+ buffers may have been allocated in nand_base. Check with
+ Thomas. FIX ME! */
+ /* nand_release will call del_mtd_device, but we haven't yet
+ added it. This is handled without incident by
+ del_mtd_device, as far as I can tell. */
+ nand_release(mtd);
+ kfree(mtd);
+ goto fail;
+ }
+
+ /* Success! */
+ doclist = mtd;
+ return 0;
+
+notfound:
+ /* Put back the contents of the DOCControl register, in case it's not
+ actually a DiskOnChip. */
+ WriteDOC(save_control, virtadr, DOCControl);
+fail:
+ iounmap(virtadr);
+ return ret;
+}
+
+static void release_nanddoc(void)
+{
+ struct mtd_info *mtd, *nextmtd;
+ struct nand_chip *nand;
+ struct doc_priv *doc;
+
+ for (mtd = doclist; mtd; mtd = nextmtd) {
+ nand = mtd->priv;
+ doc = nand->priv;
+
+ nextmtd = doc->nextdoc;
+ nand_release(mtd);
+ iounmap(doc->virtadr);
+ kfree(mtd);
+ }
+}
+
+static int __init init_nanddoc(void)
+{
+ int i, ret = 0;
+
+ /* We could create the decoder on demand, if memory is a concern.
+ * This way we have it handy, if an error happens
+ *
+ * Symbolsize is 10 (bits)
+ * Primitve polynomial is x^10+x^3+1
+ * first consecutive root is 510
+ * primitve element to generate roots = 1
+ * generator polinomial degree = 4
+ */
+ rs_decoder = init_rs(10, 0x409, FCR, 1, NROOTS);
+ if (!rs_decoder) {
+ printk (KERN_ERR "DiskOnChip: Could not create a RS decoder\n");
+ return -ENOMEM;
+ }
+
+ if (doc_config_location) {
+ printk(KERN_INFO "Using configured DiskOnChip probe address 0x%lx\n", doc_config_location);
+ ret = doc_probe(doc_config_location);
+ if (ret < 0)
+ goto outerr;
+ } else {
+ for (i=0; (doc_locations[i] != 0xffffffff); i++) {
+ doc_probe(doc_locations[i]);
+ }
+ }
+ /* No banner message any more. Print a message if no DiskOnChip
+ found, so the user knows we at least tried. */
+ if (!doclist) {
+ printk(KERN_INFO "No valid DiskOnChip devices found\n");
+ ret = -ENODEV;
+ goto outerr;
+ }
+ return 0;
+outerr:
+ free_rs(rs_decoder);
+ return ret;
+}
+
+static void __exit cleanup_nanddoc(void)
+{
+ /* Cleanup the nand/DoC resources */
+ release_nanddoc();
+
+ /* Free the reed solomon resources */
+ if (rs_decoder) {
+ free_rs(rs_decoder);
+ }
+}
+
+module_init(init_nanddoc);
+module_exit(cleanup_nanddoc);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
+MODULE_DESCRIPTION("M-Systems DiskOnChip 2000, Millennium and Millennium Plus device driver\n");
diff --git a/linux-2.4.x/drivers/mtd/nand/edb7312.c b/linux-2.4.x/drivers/mtd/nand/edb7312.c
new file mode 100644
index 0000000..9b1fd2f
--- /dev/null
+++ b/linux-2.4.x/drivers/mtd/nand/edb7312.c
@@ -0,0 +1,218 @@
+/*
+ * drivers/mtd/nand/edb7312.c
+ *
+ * Copyright (C) 2002 Marius Gröger (mag@sysgo.de)
+ *
+ * Derived from drivers/mtd/nand/autcpu12.c
+ * Copyright (c) 2001 Thomas Gleixner (gleixner@autronix.de)
+ *
+ * $Id: edb7312.c,v 1.12 2005/11/07 11:14:30 gleixner Exp $
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Overview:
+ * This is a device driver for the NAND flash device found on the
+ * CLEP7312 board which utilizes the Toshiba TC58V64AFT part. This is
+ * a 64Mibit (8MiB x 8 bits) NAND flash device.
+ */
+
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/partitions.h>
+#include <asm/io.h>
+#include <asm/arch/hardware.h> /* for CLPS7111_VIRT_BASE */
+#include <asm/sizes.h>
+#include <asm/hardware/clps7111.h>
+
+/*
+ * MTD structure for EDB7312 board
+ */
+static struct mtd_info *ep7312_mtd = NULL;
+
+/*
+ * Values specific to the EDB7312 board (used with EP7312 processor)
+ */
+#define EP7312_FIO_PBASE 0x10000000 /* Phys address of flash */
+#define EP7312_PXDR 0x0001 /*
+ * IO offset to Port B data register
+ * where the CLE, ALE and NCE pins
+ * are wired to.
+ */
+#define EP7312_PXDDR 0x0041 /*
+ * IO offset to Port B data direction
+ * register so we can control the IO
+ * lines.
+ */
+
+/*
+ * Module stuff
+ */
+
+static unsigned long ep7312_fio_pbase = EP7312_FIO_PBASE;
+static void __iomem * ep7312_pxdr = (void __iomem *) EP7312_PXDR;
+static void __iomem * ep7312_pxddr = (void __iomem *) EP7312_PXDDR;
+
+#ifdef CONFIG_MTD_PARTITIONS
+/*
+ * Define static partitions for flash device
+ */
+static struct mtd_partition partition_info[] = {
+ { .name = "EP7312 Nand Flash",
+ .offset = 0,
+ .size = 8*1024*1024 }
+};
+#define NUM_PARTITIONS 1
+
+#endif
+
+
+/*
+ * hardware specific access to control-lines
+ */
+static void ep7312_hwcontrol(struct mtd_info *mtd, int cmd)
+{
+ switch(cmd) {
+
+ case NAND_CTL_SETCLE:
+ clps_writeb(clps_readb(ep7312_pxdr) | 0x10, ep7312_pxdr);
+ break;
+ case NAND_CTL_CLRCLE:
+ clps_writeb(clps_readb(ep7312_pxdr) & ~0x10, ep7312_pxdr);
+ break;
+
+ case NAND_CTL_SETALE:
+ clps_writeb(clps_readb(ep7312_pxdr) | 0x20, ep7312_pxdr);
+ break;
+ case NAND_CTL_CLRALE:
+ clps_writeb(clps_readb(ep7312_pxdr) & ~0x20, ep7312_pxdr);
+ break;
+
+ case NAND_CTL_SETNCE:
+ clps_writeb((clps_readb(ep7312_pxdr) | 0x80) & ~0x40, ep7312_pxdr);
+ break;
+ case NAND_CTL_CLRNCE:
+ clps_writeb((clps_readb(ep7312_pxdr) | 0x80) | 0x40, ep7312_pxdr);
+ break;
+ }
+}
+
+/*
+ * read device ready pin
+ */
+static int ep7312_device_ready(struct mtd_info *mtd)
+{
+ return 1;
+}
+#ifdef CONFIG_MTD_PARTITIONS
+const char *part_probes[] = { "cmdlinepart", NULL };
+#endif
+
+/*
+ * Main initialization routine
+ */
+static int __init ep7312_init (void)
+{
+ struct nand_chip *this;
+ const char *part_type = 0;
+ int mtd_parts_nb = 0;
+ struct mtd_partition *mtd_parts = 0;
+ void __iomem * ep7312_fio_base;
+
+ /* Allocate memory for MTD device structure and private data */
+ ep7312_mtd = kmalloc(sizeof(struct mtd_info) +
+ sizeof(struct nand_chip),
+ GFP_KERNEL);
+ if (!ep7312_mtd) {
+ printk("Unable to allocate EDB7312 NAND MTD device structure.\n");
+ return -ENOMEM;
+ }
+
+ /* map physical adress */
+ ep7312_fio_base = ioremap(ep7312_fio_pbase, SZ_1K);
+ if(!ep7312_fio_base) {
+ printk("ioremap EDB7312 NAND flash failed\n");
+ kfree(ep7312_mtd);
+ return -EIO;
+ }
+
+ /* Get pointer to private data */
+ this = (struct nand_chip *) (&ep7312_mtd[1]);
+
+ /* Initialize structures */
+ memset((char *) ep7312_mtd, 0, sizeof(struct mtd_info));
+ memset((char *) this, 0, sizeof(struct nand_chip));
+
+ /* Link the private data with the MTD structure */
+ ep7312_mtd->priv = this;
+
+ /*
+ * Set GPIO Port B control register so that the pins are configured
+ * to be outputs for controlling the NAND flash.
+ */
+ clps_writeb(0xf0, ep7312_pxddr);
+
+ /* insert callbacks */
+ this->IO_ADDR_R = ep7312_fio_base;
+ this->IO_ADDR_W = ep7312_fio_base;
+ this->hwcontrol = ep7312_hwcontrol;
+ this->dev_ready = ep7312_device_ready;
+ /* 15 us command delay time */
+ this->chip_delay = 15;
+
+ /* Scan to find existence of the device */
+ if (nand_scan (ep7312_mtd, 1)) {
+ iounmap((void *)ep7312_fio_base);
+ kfree (ep7312_mtd);
+ return -ENXIO;
+ }
+
+#ifdef CONFIG_MTD_PARTITIONS
+ ep7312_mtd->name = "edb7312-nand";
+ mtd_parts_nb = parse_mtd_partitions(ep7312_mtd, part_probes,
+ &mtd_parts, 0);
+ if (mtd_parts_nb > 0)
+ part_type = "command line";
+ else
+ mtd_parts_nb = 0;
+#endif
+ if (mtd_parts_nb == 0) {
+ mtd_parts = partition_info;
+ mtd_parts_nb = NUM_PARTITIONS;
+ part_type = "static";
+ }
+
+ /* Register the partitions */
+ printk(KERN_NOTICE "Using %s partition definition\n", part_type);
+ add_mtd_partitions(ep7312_mtd, mtd_parts, mtd_parts_nb);
+
+ /* Return happy */
+ return 0;
+}
+module_init(ep7312_init);
+
+/*
+ * Clean up routine
+ */
+static void __exit ep7312_cleanup (void)
+{
+ struct nand_chip *this = (struct nand_chip *) &ep7312_mtd[1];
+
+ /* Release resources, unregister device */
+ nand_release (ap7312_mtd);
+
+ /* Free internal data buffer */
+ kfree (this->data_buf);
+
+ /* Free the MTD device structure */
+ kfree (ep7312_mtd);
+}
+module_exit(ep7312_cleanup);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Marius Groeger <mag@sysgo.de>");
+MODULE_DESCRIPTION("MTD map driver for Cogent EDB7312 board");
diff --git a/linux-2.4.x/drivers/mtd/nand/h1910.c b/linux-2.4.x/drivers/mtd/nand/h1910.c
new file mode 100644
index 0000000..7f2f0e1
--- /dev/null
+++ b/linux-2.4.x/drivers/mtd/nand/h1910.c
@@ -0,0 +1,208 @@
+/*
+ * drivers/mtd/nand/h1910.c
+ *
+ * Copyright (C) 2003 Joshua Wise (joshua@joshuawise.com)
+ *
+ * Derived from drivers/mtd/nand/edb7312.c
+ * Copyright (C) 2002 Marius Gröger (mag@sysgo.de)
+ * Copyright (c) 2001 Thomas Gleixner (gleixner@autronix.de)
+ *
+ * $Id: h1910.c,v 1.7 2005/11/29 14:33:32 gleixner Exp $
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Overview:
+ * This is a device driver for the NAND flash device found on the
+ * iPAQ h1910 board which utilizes the Samsung K9F2808 part. This is
+ * a 128Mibit (16MiB x 8 bits) NAND flash device.
+ */
+
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/partitions.h>
+#include <asm/io.h>
+#include <asm/arch/hardware.h> /* for CLPS7111_VIRT_BASE */
+#include <asm/sizes.h>
+#include <asm/arch/h1900-gpio.h>
+#include <asm/arch/ipaq.h>
+
+/*
+ * MTD structure for EDB7312 board
+ */
+static struct mtd_info *h1910_nand_mtd = NULL;
+
+/*
+ * Module stuff
+ */
+
+#ifdef CONFIG_MTD_PARTITIONS
+/*
+ * Define static partitions for flash device
+ */
+static struct mtd_partition partition_info[] = {
+ { name: "h1910 NAND Flash",
+ offset: 0,
+ size: 16*1024*1024 }
+};
+#define NUM_PARTITIONS 1
+
+#endif
+
+
+/*
+ * hardware specific access to control-lines
+ */
+static void h1910_hwcontrol(struct mtd_info *mtd, int cmd)
+{
+ struct nand_chip* this = (struct nand_chip *) (mtd->priv);
+
+ switch(cmd) {
+
+ case NAND_CTL_SETCLE:
+ this->IO_ADDR_R |= (1 << 2);
+ this->IO_ADDR_W |= (1 << 2);
+ break;
+ case NAND_CTL_CLRCLE:
+ this->IO_ADDR_R &= ~(1 << 2);
+ this->IO_ADDR_W &= ~(1 << 2);
+ break;
+
+ case NAND_CTL_SETALE:
+ this->IO_ADDR_R |= (1 << 3);
+ this->IO_ADDR_W |= (1 << 3);
+ break;
+ case NAND_CTL_CLRALE:
+ this->IO_ADDR_R &= ~(1 << 3);
+ this->IO_ADDR_W &= ~(1 << 3);
+ break;
+
+ case NAND_CTL_SETNCE:
+ break;
+ case NAND_CTL_CLRNCE:
+ break;
+ }
+}
+
+/*
+ * read device ready pin
+ */
+#if 0
+static int h1910_device_ready(struct mtd_info *mtd)
+{
+ return (GPLR(55) & GPIO_bit(55));
+}
+#endif
+
+/*
+ * Main initialization routine
+ */
+static int __init h1910_init (void)
+{
+ struct nand_chip *this;
+ const char *part_type = 0;
+ int mtd_parts_nb = 0;
+ struct mtd_partition *mtd_parts = 0;
+ void __iomem *nandaddr;
+
+ if (!machine_is_h1900())
+ return -ENODEV;
+
+ nandaddr = ioremap(0x08000000, 0x1000);
+ if (!nandaddr) {
+ printk("Failed to ioremap nand flash.\n");
+ return -ENOMEM;
+ }
+
+ /* Allocate memory for MTD device structure and private data */
+ h1910_nand_mtd = kmalloc(sizeof(struct mtd_info) +
+ sizeof(struct nand_chip),
+ GFP_KERNEL);
+ if (!h1910_nand_mtd) {
+ printk("Unable to allocate h1910 NAND MTD device structure.\n");
+ iounmap ((void *) nandaddr);
+ return -ENOMEM;
+ }
+
+ /* Get pointer to private data */
+ this = (struct nand_chip *) (&h1910_nand_mtd[1]);
+
+ /* Initialize structures */
+ memset((char *) h1910_nand_mtd, 0, sizeof(struct mtd_info));
+ memset((char *) this, 0, sizeof(struct nand_chip));
+
+ /* Link the private data with the MTD structure */
+ h1910_nand_mtd->priv = this;
+
+ /*
+ * Enable VPEN
+ */
+ GPSR(37) = GPIO_bit(37);
+
+ /* insert callbacks */
+ this->IO_ADDR_R = nandaddr;
+ this->IO_ADDR_W = nandaddr;
+ this->hwcontrol = h1910_hwcontrol;
+ this->dev_ready = NULL; /* unknown whether that was correct or not so we will just do it like this */
+ /* 15 us command delay time */
+ this->chip_delay = 50;
+ this->eccmode = NAND_ECC_SOFT;
+ this->options = NAND_NO_AUTOINCR;
+
+ /* Scan to find existence of the device */
+ if (nand_scan (h1910_nand_mtd, 1)) {
+ printk(KERN_NOTICE "No NAND device - returning -ENXIO\n");
+ kfree (h1910_nand_mtd);
+ iounmap ((void *) nandaddr);
+ return -ENXIO;
+ }
+
+#ifdef CONFIG_MTD_CMDLINE_PARTS
+ mtd_parts_nb = parse_cmdline_partitions(h1910_nand_mtd, &mtd_parts,
+ "h1910-nand");
+ if (mtd_parts_nb > 0)
+ part_type = "command line";
+ else
+ mtd_parts_nb = 0;
+#endif
+ if (mtd_parts_nb == 0)
+ {
+ mtd_parts = partition_info;
+ mtd_parts_nb = NUM_PARTITIONS;
+ part_type = "static";
+ }
+
+ /* Register the partitions */
+ printk(KERN_NOTICE "Using %s partition definition\n", part_type);
+ add_mtd_partitions(h1910_nand_mtd, mtd_parts, mtd_parts_nb);
+
+ /* Return happy */
+ return 0;
+}
+module_init(h1910_init);
+
+/*
+ * Clean up routine
+ */
+static void __exit h1910_cleanup (void)
+{
+ struct nand_chip *this = (struct nand_chip *) &h1910_nand_mtd[1];
+
+ /* Release resources, unregister device */
+ nand_release (h1910_nand_mtd);
+
+ /* Release io resource */
+ iounmap ((void *) this->IO_ADDR_W);
+
+ /* Free the MTD device structure */
+ kfree (h1910_nand_mtd);
+}
+module_exit(h1910_cleanup);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Joshua Wise <joshua at joshuawise dot com>");
+MODULE_DESCRIPTION("NAND flash driver for iPAQ h1910");
diff --git a/linux-2.4.x/drivers/mtd/nand/nand_base.c b/linux-2.4.x/drivers/mtd/nand/nand_base.c
new file mode 100644
index 0000000..a7500b6
--- /dev/null
+++ b/linux-2.4.x/drivers/mtd/nand/nand_base.c
@@ -0,0 +1,3039 @@
+/*
+ * drivers/mtd/nand.c
+ *
+ * Overview:
+ * This is the generic MTD driver for NAND flash devices. It should be
+ * capable of working with almost all NAND chips currently available.
+ * Basic support for AG-AND chips is provided.
+ *
+ * Additional technical information is available on
+ * http://www.linux-mtd.infradead.org/tech/mtdnand/
+ *
+ * Copyright (C) 2000 Steven J. Hill (sjhill@realitydiluted.com)
+ * 2002 Thomas Gleixner (tglx@linutronix.de)
+ *
+ * 02-08-2004 tglx: support for strange chips, which cannot auto increment
+ * pages on read / read_oob
+ *
+ * 03-17-2004 tglx: Check ready before auto increment check. Simon Bayes
+ * pointed this out, as he marked an auto increment capable chip
+ * as NOAUTOINCR in the board driver.
+ * Make reads over block boundaries work too
+ *
+ * 04-14-2004 tglx: first working version for 2k page size chips
+ *
+ * 05-19-2004 tglx: Basic support for Renesas AG-AND chips
+ *
+ * 09-24-2004 tglx: add support for hardware controllers (e.g. ECC) shared
+ * among multiple independend devices. Suggestions and initial patch
+ * from Ben Dooks <ben-mtd@fluff.org>
+ *
+ * 12-05-2004 dmarlin: add workaround for Renesas AG-AND chips "disturb" issue.
+ * Basically, any block not rewritten may lose data when surrounding blocks
+ * are rewritten many times. JFFS2 ensures this doesn't happen for blocks
+ * it uses, but the Bad Block Table(s) may not be rewritten. To ensure they
+ * do not lose data, force them to be rewritten when some of the surrounding
+ * blocks are erased. Rather than tracking a specific nearby block (which
+ * could itself go bad), use a page address 'mask' to select several blocks
+ * in the same area, and rewrite the BBT when any of them are erased.
+ *
+ * 01-03-2005 dmarlin: added support for the device recovery command sequence for Renesas
+ * AG-AND chips. If there was a sudden loss of power during an erase operation,
+ * a "device recovery" operation must be performed when power is restored
+ * to ensure correct operation.
+ *
+ * 01-20-2005 dmarlin: added support for optional hardware specific callback routine to
+ * perform extra error status checks on erase and write failures. This required
+ * adding a wrapper function for nand_read_ecc.
+ *
+ * 08-20-2005 vwool: suspend/resume added
+ *
+ * 11-01-2005: vwool: NAND page layouts introduces for HW ECC handling
+ *
+ * Credits:
+ * David Woodhouse for adding multichip support
+ *
+ * Aleph One Ltd. and Toby Churchill Ltd. for supporting the
+ * rework for 2K page size chips
+ *
+ * TODO:
+ * Enable cached programming for 2k page size chips
+ * Check, if mtd->ecctype should be set to MTD_ECC_HW
+ * if we have HW ecc support.
+ * The AG-AND chips have nice features for speed improvement,
+ * which are not supported yet. Read / program 4 pages in one go.
+ *
+ * $Id: nand_base.c,v 1.166 2006/03/13 06:09:08 vwool Exp $
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/nand_ecc.h>
+#include <linux/mtd/compatmac.h>
+#include <linux/interrupt.h>
+#include <linux/bitops.h>
+#include <asm/io.h>
+
+#ifdef CONFIG_MTD_PARTITIONS
+#include <linux/mtd/partitions.h>
+#endif
+
+/* Define default oob placement schemes for large and small page devices */
+static struct nand_oobinfo nand_oob_8 = {
+ .useecc = MTD_NANDECC_AUTOPLACE,
+ .eccbytes = 3,
+ .eccpos = {0, 1, 2},
+ .oobfree = { {3, 2}, {6, 2} }
+};
+
+static struct nand_oobinfo nand_oob_16 = {
+ .useecc = MTD_NANDECC_AUTOPLACE,
+ .eccbytes = 6,
+ .eccpos = {0, 1, 2, 3, 6, 7},
+ .oobfree = { {8, 8} }
+};
+
+static struct nand_oobinfo nand_oob_64 = {
+ .useecc = MTD_NANDECC_AUTOPLACE,
+ .eccbytes = 24,
+ .eccpos = {
+ 40, 41, 42, 43, 44, 45, 46, 47,
+ 48, 49, 50, 51, 52, 53, 54, 55,
+ 56, 57, 58, 59, 60, 61, 62, 63},
+ .oobfree = { {2, 38} }
+};
+
+/* This is used for padding purposes in nand_write_oob */
+#define FFCHARS_SIZE 2048
+static u_char ffchars[FFCHARS_SIZE];
+
+#define HW_AUTOOOB_LAYOUT_SIZE 32 /* should be enough */
+
+/*
+ * NAND low-level MTD interface functions
+ */
+static void nand_write_buf(struct mtd_info *mtd, const u_char *buf, int len);
+static void nand_read_buf(struct mtd_info *mtd, u_char *buf, int len);
+static int nand_verify_buf(struct mtd_info *mtd, const u_char *buf, int len);
+
+static int nand_read (struct mtd_info *mtd, loff_t from, size_t len, size_t * retlen, u_char * buf);
+static int nand_read_ecc (struct mtd_info *mtd, loff_t from, size_t len,
+ size_t * retlen, u_char * buf, u_char * eccbuf, struct nand_oobinfo *oobsel);
+static int nand_read_oob (struct mtd_info *mtd, loff_t from, size_t len, size_t * retlen, u_char * buf);
+static int nand_write (struct mtd_info *mtd, loff_t to, size_t len, size_t * retlen, const u_char * buf);
+static int nand_write_ecc (struct mtd_info *mtd, loff_t to, size_t len,
+ size_t * retlen, const u_char * buf, u_char * eccbuf, struct nand_oobinfo *oobsel);
+static int nand_write_oob (struct mtd_info *mtd, loff_t to, size_t len, size_t * retlen, const u_char *buf);
+static int nand_writev (struct mtd_info *mtd, const struct kvec *vecs,
+ unsigned long count, loff_t to, size_t * retlen);
+static int nand_writev_ecc (struct mtd_info *mtd, const struct kvec *vecs,
+ unsigned long count, loff_t to, size_t * retlen, u_char *eccbuf, struct nand_oobinfo *oobsel);
+static int nand_erase (struct mtd_info *mtd, struct erase_info *instr);
+static void nand_sync (struct mtd_info *mtd);
+
+/* Some internal functions */
+static int nand_write_page (struct mtd_info *mtd, struct nand_chip *this, int page, u_char *oob_buf,
+ struct nand_oobinfo *oobsel, int mode);
+#ifdef CONFIG_MTD_NAND_VERIFY_WRITE
+static int nand_verify_pages (struct mtd_info *mtd, struct nand_chip *this, int page, int numpages,
+ u_char *oob_buf, struct nand_oobinfo *oobsel, int chipnr, int oobmode);
+#else
+#define nand_verify_pages(...) (0)
+#endif
+
+static int nand_get_device (struct nand_chip *this, struct mtd_info *mtd, int new_state);
+
+/**
+ * nand_release_device - [GENERIC] release chip
+ * @mtd: MTD device structure
+ *
+ * Deselect, release chip lock and wake up anyone waiting on the device
+ */
+static void nand_release_device (struct mtd_info *mtd)
+{
+ struct nand_chip *this = mtd->priv;
+
+ /* De-select the NAND device */
+ this->select_chip(mtd, -1);
+
+ if (this->controller) {
+ /* Release the controller and the chip */
+ spin_lock(&this->controller->lock);
+ this->controller->active = NULL;
+ this->state = FL_READY;
+ wake_up(&this->controller->wq);
+ spin_unlock(&this->controller->lock);
+ } else {
+ /* Release the chip */
+ spin_lock(&this->chip_lock);
+ this->state = FL_READY;
+ wake_up(&this->wq);
+ spin_unlock(&this->chip_lock);
+ }
+}
+
+/**
+ * nand_read_byte - [DEFAULT] read one byte from the chip
+ * @mtd: MTD device structure
+ *
+ * Default read function for 8bit buswith
+ */
+static u_char nand_read_byte(struct mtd_info *mtd)
+{
+ struct nand_chip *this = mtd->priv;
+ return readb(this->IO_ADDR_R);
+}
+
+/**
+ * nand_write_byte - [DEFAULT] write one byte to the chip
+ * @mtd: MTD device structure
+ * @byte: pointer to data byte to write
+ *
+ * Default write function for 8it buswith
+ */
+static void nand_write_byte(struct mtd_info *mtd, u_char byte)
+{
+ struct nand_chip *this = mtd->priv;
+ writeb(byte, this->IO_ADDR_W);
+}
+
+/**
+ * nand_read_byte16 - [DEFAULT] read one byte endianess aware from the chip
+ * @mtd: MTD device structure
+ *
+ * Default read function for 16bit buswith with
+ * endianess conversion
+ */
+static u_char nand_read_byte16(struct mtd_info *mtd)
+{
+ struct nand_chip *this = mtd->priv;
+ return (u_char) cpu_to_le16(readw(this->IO_ADDR_R));
+}
+
+/**
+ * nand_write_byte16 - [DEFAULT] write one byte endianess aware to the chip
+ * @mtd: MTD device structure
+ * @byte: pointer to data byte to write
+ *
+ * Default write function for 16bit buswith with
+ * endianess conversion
+ */
+static void nand_write_byte16(struct mtd_info *mtd, u_char byte)
+{
+ struct nand_chip *this = mtd->priv;
+ writew(le16_to_cpu((u16) byte), this->IO_ADDR_W);
+}
+
+/**
+ * nand_read_word - [DEFAULT] read one word from the chip
+ * @mtd: MTD device structure
+ *
+ * Default read function for 16bit buswith without
+ * endianess conversion
+ */
+static u16 nand_read_word(struct mtd_info *mtd)
+{
+ struct nand_chip *this = mtd->priv;
+ return readw(this->IO_ADDR_R);
+}
+
+/**
+ * nand_write_word - [DEFAULT] write one word to the chip
+ * @mtd: MTD device structure
+ * @word: data word to write
+ *
+ * Default write function for 16bit buswith without
+ * endianess conversion
+ */
+static void nand_write_word(struct mtd_info *mtd, u16 word)
+{
+ struct nand_chip *this = mtd->priv;
+ writew(word, this->IO_ADDR_W);
+}
+
+/**
+ * nand_select_chip - [DEFAULT] control CE line
+ * @mtd: MTD device structure
+ * @chip: chipnumber to select, -1 for deselect
+ *
+ * Default select function for 1 chip devices.
+ */
+static void nand_select_chip(struct mtd_info *mtd, int chip)
+{
+ struct nand_chip *this = mtd->priv;
+ switch(chip) {
+ case -1:
+ this->hwcontrol(mtd, NAND_CTL_CLRNCE);
+ break;
+ case 0:
+ this->hwcontrol(mtd, NAND_CTL_SETNCE);
+ break;
+
+ default:
+ BUG();
+ }
+}
+
+/**
+ * nand_write_buf - [DEFAULT] write buffer to chip
+ * @mtd: MTD device structure
+ * @buf: data buffer
+ * @len: number of bytes to write
+ *
+ * Default write function for 8bit buswith
+ */
+static void nand_write_buf(struct mtd_info *mtd, const u_char *buf, int len)
+{
+ int i;
+ struct nand_chip *this = mtd->priv;
+
+ for (i=0; i<len; i++)
+ writeb(buf[i], this->IO_ADDR_W);
+}
+
+/**
+ * nand_read_buf - [DEFAULT] read chip data into buffer
+ * @mtd: MTD device structure
+ * @buf: buffer to store date
+ * @len: number of bytes to read
+ *
+ * Default read function for 8bit buswith
+ */
+static void nand_read_buf(struct mtd_info *mtd, u_char *buf, int len)
+{
+ int i;
+ struct nand_chip *this = mtd->priv;
+
+ for (i=0; i<len; i++)
+ buf[i] = readb(this->IO_ADDR_R);
+}
+
+/**
+ * nand_verify_buf - [DEFAULT] Verify chip data against buffer
+ * @mtd: MTD device structure
+ * @buf: buffer containing the data to compare
+ * @len: number of bytes to compare
+ *
+ * Default verify function for 8bit buswith
+ */
+static int nand_verify_buf(struct mtd_info *mtd, const u_char *buf, int len)
+{
+ int i;
+ struct nand_chip *this = mtd->priv;
+
+ for (i=0; i<len; i++)
+ if (buf[i] != readb(this->IO_ADDR_R))
+ return -EFAULT;
+
+ return 0;
+}
+
+/**
+ * nand_write_buf16 - [DEFAULT] write buffer to chip
+ * @mtd: MTD device structure
+ * @buf: data buffer
+ * @len: number of bytes to write
+ *
+ * Default write function for 16bit buswith
+ */
+static void nand_write_buf16(struct mtd_info *mtd, const u_char *buf, int len)
+{
+ int i;
+ struct nand_chip *this = mtd->priv;
+ u16 *p = (u16 *) buf;
+ len >>= 1;
+
+ for (i=0; i<len; i++)
+ writew(p[i], this->IO_ADDR_W);
+
+}
+
+/**
+ * nand_read_buf16 - [DEFAULT] read chip data into buffer
+ * @mtd: MTD device structure
+ * @buf: buffer to store date
+ * @len: number of bytes to read
+ *
+ * Default read function for 16bit buswith
+ */
+static void nand_read_buf16(struct mtd_info *mtd, u_char *buf, int len)
+{
+ int i;
+ struct nand_chip *this = mtd->priv;
+ u16 *p = (u16 *) buf;
+ len >>= 1;
+
+ for (i=0; i<len; i++)
+ p[i] = readw(this->IO_ADDR_R);
+}
+
+/**
+ * nand_verify_buf16 - [DEFAULT] Verify chip data against buffer
+ * @mtd: MTD device structure
+ * @buf: buffer containing the data to compare
+ * @len: number of bytes to compare
+ *
+ * Default verify function for 16bit buswith
+ */
+static int nand_verify_buf16(struct mtd_info *mtd, const u_char *buf, int len)
+{
+ int i;
+ struct nand_chip *this = mtd->priv;
+ u16 *p = (u16 *) buf;
+ len >>= 1;
+
+ for (i=0; i<len; i++)
+ if (p[i] != readw(this->IO_ADDR_R))
+ return -EFAULT;
+
+ return 0;
+}
+
+/**
+ * nand_block_bad - [DEFAULT] Read bad block marker from the chip
+ * @mtd: MTD device structure
+ * @ofs: offset from device start
+ * @getchip: 0, if the chip is already selected
+ *
+ * Check, if the block is bad.
+ */
+static int nand_block_bad(struct mtd_info *mtd, loff_t ofs, int getchip)
+{
+ int page, chipnr, res = 0;
+ struct nand_chip *this = mtd->priv;
+ u16 bad;
+
+ if (getchip) {
+ page = (int)(ofs >> this->page_shift);
+ chipnr = (int)(ofs >> this->chip_shift);
+
+ /* Grab the lock and see if the device is available */
+ nand_get_device (this, mtd, FL_READING);
+
+ /* Select the NAND device */
+ this->select_chip(mtd, chipnr);
+ } else
+ page = (int) ofs;
+
+ if (this->options & NAND_BUSWIDTH_16) {
+ this->cmdfunc (mtd, NAND_CMD_READOOB, this->badblockpos & 0xFE, page & this->pagemask);
+ bad = cpu_to_le16(this->read_word(mtd));
+ if (this->badblockpos & 0x1)
+ bad >>= 8;
+ if ((bad & 0xFF) != 0xff)
+ res = 1;
+ } else {
+ this->cmdfunc (mtd, NAND_CMD_READOOB, this->badblockpos, page & this->pagemask);
+ if (this->read_byte(mtd) != 0xff)
+ res = 1;
+ }
+
+ if (getchip) {
+ /* Deselect and wake up anyone waiting on the device */
+ nand_release_device(mtd);
+ }
+
+ return res;
+}
+
+/**
+ * nand_default_block_markbad - [DEFAULT] mark a block bad
+ * @mtd: MTD device structure
+ * @ofs: offset from device start
+ *
+ * This is the default implementation, which can be overridden by
+ * a hardware specific driver.
+*/
+static int nand_default_block_markbad(struct mtd_info *mtd, loff_t ofs)
+{
+ struct nand_chip *this = mtd->priv;
+ u_char buf[2] = {0, 0};
+ size_t retlen;
+ int block;
+
+ /* Get block number */
+ block = ((int) ofs) >> this->bbt_erase_shift;
+ if (this->bbt)
+ this->bbt[block >> 2] |= 0x01 << ((block & 0x03) << 1);
+
+ /* Do we have a flash based bad block table ? */
+ if (this->options & NAND_USE_FLASH_BBT)
+ return nand_update_bbt (mtd, ofs);
+
+ /* We write two bytes, so we dont have to mess with 16 bit access */
+ ofs += mtd->oobsize + (this->badblockpos & ~0x01);
+ return nand_write_oob (mtd, ofs , 2, &retlen, buf);
+}
+
+/**
+ * nand_check_wp - [GENERIC] check if the chip is write protected
+ * @mtd: MTD device structure
+ * Check, if the device is write protected
+ *
+ * The function expects, that the device is already selected
+ */
+static int nand_check_wp (struct mtd_info *mtd)
+{
+ struct nand_chip *this = mtd->priv;
+ /* Check the WP bit */
+ this->cmdfunc (mtd, NAND_CMD_STATUS, -1, -1);
+ return (this->read_byte(mtd) & NAND_STATUS_WP) ? 0 : 1;
+}
+
+/**
+ * nand_block_checkbad - [GENERIC] Check if a block is marked bad
+ * @mtd: MTD device structure
+ * @ofs: offset from device start
+ * @getchip: 0, if the chip is already selected
+ * @allowbbt: 1, if its allowed to access the bbt area
+ *
+ * Check, if the block is bad. Either by reading the bad block table or
+ * calling of the scan function.
+ */
+static int nand_block_checkbad (struct mtd_info *mtd, loff_t ofs, int getchip, int allowbbt)
+{
+ struct nand_chip *this = mtd->priv;
+
+ if (!this->bbt)
+ return this->block_bad(mtd, ofs, getchip);
+
+ /* Return info from the table */
+ return nand_isbad_bbt (mtd, ofs, allowbbt);
+}
+
+/*
+ * Wait for the ready pin, after a command
+ * The timeout is catched later.
+ */
+static void nand_wait_ready(struct mtd_info *mtd)
+{
+ struct nand_chip *this = mtd->priv;
+ unsigned long timeo = jiffies + 2;
+
+ /* wait until command is processed or timeout occures */
+ do {
+ if (this->dev_ready(mtd))
+ return;
+ touch_softlockup_watchdog();
+ } while (time_before(jiffies, timeo));
+}
+
+/**
+ * nand_command - [DEFAULT] Send command to NAND device
+ * @mtd: MTD device structure
+ * @command: the command to be sent
+ * @column: the column address for this command, -1 if none
+ * @page_addr: the page address for this command, -1 if none
+ *
+ * Send command to NAND device. This function is used for small page
+ * devices (256/512 Bytes per page)
+ */
+static void nand_command (struct mtd_info *mtd, unsigned command, int column, int page_addr)
+{
+ register struct nand_chip *this = mtd->priv;
+
+ /* Begin command latch cycle */
+ this->hwcontrol(mtd, NAND_CTL_SETCLE);
+ /*
+ * Write out the command to the device.
+ */
+ if (command == NAND_CMD_SEQIN) {
+ int readcmd;
+
+ if (column >= mtd->oobblock) {
+ /* OOB area */
+ column -= mtd->oobblock;
+ readcmd = NAND_CMD_READOOB;
+ } else if (column < 256) {
+ /* First 256 bytes --> READ0 */
+ readcmd = NAND_CMD_READ0;
+ } else {
+ column -= 256;
+ readcmd = NAND_CMD_READ1;
+ }
+ this->write_byte(mtd, readcmd);
+ }
+ this->write_byte(mtd, command);
+
+ /* Set ALE and clear CLE to start address cycle */
+ this->hwcontrol(mtd, NAND_CTL_CLRCLE);
+
+ if (column != -1 || page_addr != -1) {
+ this->hwcontrol(mtd, NAND_CTL_SETALE);
+
+ /* Serially input address */
+ if (column != -1) {
+ /* Adjust columns for 16 bit buswidth */
+ if (this->options & NAND_BUSWIDTH_16)
+ column >>= 1;
+ this->write_byte(mtd, column);
+ }
+ if (page_addr != -1) {
+ this->write_byte(mtd, (unsigned char) (page_addr & 0xff));
+ this->write_byte(mtd, (unsigned char) ((page_addr >> 8) & 0xff));
+ /* One more address cycle for devices > 32MiB */
+ if (this->chipsize > (32 << 20))
+ this->write_byte(mtd, (unsigned char) ((page_addr >> 16) & 0x0f));
+ }
+ /* Latch in address */
+ this->hwcontrol(mtd, NAND_CTL_CLRALE);
+ }
+
+ /*
+ * program and erase have their own busy handlers
+ * status and sequential in needs no delay
+ */
+ switch (command) {
+
+ case NAND_CMD_PAGEPROG:
+ case NAND_CMD_ERASE1:
+ case NAND_CMD_ERASE2:
+ case NAND_CMD_SEQIN:
+ case NAND_CMD_STATUS:
+ return;
+
+ case NAND_CMD_RESET:
+ if (this->dev_ready)
+ break;
+ udelay(this->chip_delay);
+ this->hwcontrol(mtd, NAND_CTL_SETCLE);
+ this->write_byte(mtd, NAND_CMD_STATUS);
+ this->hwcontrol(mtd, NAND_CTL_CLRCLE);
+ while ( !(this->read_byte(mtd) & NAND_STATUS_READY));
+ return;
+
+ /* This applies to read commands */
+ default:
+ /*
+ * If we don't have access to the busy pin, we apply the given
+ * command delay
+ */
+ if (!this->dev_ready) {
+ udelay (this->chip_delay);
+ return;
+ }
+ }
+ /* Apply this short delay always to ensure that we do wait tWB in
+ * any case on any machine. */
+ ndelay (100);
+
+ nand_wait_ready(mtd);
+}
+
+/**
+ * nand_command_lp - [DEFAULT] Send command to NAND large page device
+ * @mtd: MTD device structure
+ * @command: the command to be sent
+ * @column: the column address for this command, -1 if none
+ * @page_addr: the page address for this command, -1 if none
+ *
+ * Send command to NAND device. This is the version for the new large page devices
+ * We dont have the seperate regions as we have in the small page devices.
+ * We must emulate NAND_CMD_READOOB to keep the code compatible.
+ *
+ */
+static void nand_command_lp (struct mtd_info *mtd, unsigned command, int column, int page_addr)
+{
+ register struct nand_chip *this = mtd->priv;
+
+ /* Emulate NAND_CMD_READOOB */
+ if (command == NAND_CMD_READOOB) {
+ column += mtd->oobblock;
+ command = NAND_CMD_READ0;
+ }
+
+
+ /* Begin command latch cycle */
+ this->hwcontrol(mtd, NAND_CTL_SETCLE);
+ /* Write out the command to the device. */
+ this->write_byte(mtd, (command & 0xff));
+ /* End command latch cycle */
+ this->hwcontrol(mtd, NAND_CTL_CLRCLE);
+
+ if (column != -1 || page_addr != -1) {
+ this->hwcontrol(mtd, NAND_CTL_SETALE);
+
+ /* Serially input address */
+ if (column != -1) {
+ /* Adjust columns for 16 bit buswidth */
+ if (this->options & NAND_BUSWIDTH_16)
+ column >>= 1;
+ this->write_byte(mtd, column & 0xff);
+ this->write_byte(mtd, column >> 8);
+ }
+ if (page_addr != -1) {
+ this->write_byte(mtd, (unsigned char) (page_addr & 0xff));
+ this->write_byte(mtd, (unsigned char) ((page_addr >> 8) & 0xff));
+ /* One more address cycle for devices > 128MiB */
+ if (this->chipsize > (128 << 20))
+ this->write_byte(mtd, (unsigned char) ((page_addr >> 16) & 0xff));
+ }
+ /* Latch in address */
+ this->hwcontrol(mtd, NAND_CTL_CLRALE);
+ }
+
+ /*
+ * program and erase have their own busy handlers
+ * status, sequential in, and deplete1 need no delay
+ */
+ switch (command) {
+
+ case NAND_CMD_CACHEDPROG:
+ case NAND_CMD_PAGEPROG:
+ case NAND_CMD_ERASE1:
+ case NAND_CMD_ERASE2:
+ case NAND_CMD_SEQIN:
+ case NAND_CMD_STATUS:
+ case NAND_CMD_DEPLETE1:
+ return;
+
+ /*
+ * read error status commands require only a short delay
+ */
+ case NAND_CMD_STATUS_ERROR:
+ case NAND_CMD_STATUS_ERROR0:
+ case NAND_CMD_STATUS_ERROR1:
+ case NAND_CMD_STATUS_ERROR2:
+ case NAND_CMD_STATUS_ERROR3:
+ udelay(this->chip_delay);
+ return;
+
+ case NAND_CMD_RESET:
+ if (this->dev_ready)
+ break;
+ udelay(this->chip_delay);
+ this->hwcontrol(mtd, NAND_CTL_SETCLE);
+ this->write_byte(mtd, NAND_CMD_STATUS);
+ this->hwcontrol(mtd, NAND_CTL_CLRCLE);
+ while ( !(this->read_byte(mtd) & NAND_STATUS_READY));
+ return;
+
+ case NAND_CMD_READ0:
+ /* Begin command latch cycle */
+ this->hwcontrol(mtd, NAND_CTL_SETCLE);
+ /* Write out the start read command */
+ this->write_byte(mtd, NAND_CMD_READSTART);
+ /* End command latch cycle */
+ this->hwcontrol(mtd, NAND_CTL_CLRCLE);
+ /* Fall through into ready check */
+
+ /* This applies to read commands */
+ default:
+ /*
+ * If we don't have access to the busy pin, we apply the given
+ * command delay
+ */
+ if (!this->dev_ready) {
+ udelay (this->chip_delay);
+ return;
+ }
+ }
+
+ /* Apply this short delay always to ensure that we do wait tWB in
+ * any case on any machine. */
+ ndelay (100);
+
+ nand_wait_ready(mtd);
+}
+
+/**
+ * nand_get_device - [GENERIC] Get chip for selected access
+ * @this: the nand chip descriptor
+ * @mtd: MTD device structure
+ * @new_state: the state which is requested
+ *
+ * Get the device and lock it for exclusive access
+ */
+static int nand_get_device (struct nand_chip *this, struct mtd_info *mtd, int new_state)
+{
+ struct nand_chip *active;
+ spinlock_t *lock;
+ wait_queue_head_t *wq;
+ DECLARE_WAITQUEUE (wait, current);
+
+ lock = (this->controller) ? &this->controller->lock : &this->chip_lock;
+ wq = (this->controller) ? &this->controller->wq : &this->wq;
+retry:
+ active = this;
+ spin_lock(lock);
+
+ /* Hardware controller shared among independend devices */
+ if (this->controller) {
+ if (this->controller->active)
+ active = this->controller->active;
+ else
+ this->controller->active = this;
+ }
+ if (active == this && this->state == FL_READY) {
+ this->state = new_state;
+ spin_unlock(lock);
+ return 0;
+ }
+ if (new_state == FL_PM_SUSPENDED) {
+ spin_unlock(lock);
+ return (this->state == FL_PM_SUSPENDED) ? 0 : -EAGAIN;
+ }
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ add_wait_queue(wq, &wait);
+ spin_unlock(lock);
+ schedule();
+ remove_wait_queue(wq, &wait);
+ goto retry;
+}
+
+/**
+ * nand_wait - [DEFAULT] wait until the command is done
+ * @mtd: MTD device structure
+ * @this: NAND chip structure
+ * @state: state to select the max. timeout value
+ *
+ * Wait for command done. This applies to erase and program only
+ * Erase can take up to 400ms and program up to 20ms according to
+ * general NAND and SmartMedia specs
+ *
+*/
+static int nand_wait(struct mtd_info *mtd, struct nand_chip *this, int state)
+{
+
+ unsigned long timeo = jiffies;
+ int status;
+
+ if (state == FL_ERASING)
+ timeo += (HZ * 400) / 1000;
+ else
+ timeo += (HZ * 20) / 1000;
+
+ /* Apply this short delay always to ensure that we do wait tWB in
+ * any case on any machine. */
+ ndelay (100);
+
+ if ((state == FL_ERASING) && (this->options & NAND_IS_AND))
+ this->cmdfunc (mtd, NAND_CMD_STATUS_MULTI, -1, -1);
+ else
+ this->cmdfunc (mtd, NAND_CMD_STATUS, -1, -1);
+
+ while (time_before(jiffies, timeo)) {
+ /* Check, if we were interrupted */
+ if (this->state != state)
+ return 0;
+
+ if (this->dev_ready) {
+ if (this->dev_ready(mtd))
+ break;
+ } else {
+ if (this->read_byte(mtd) & NAND_STATUS_READY)
+ break;
+ }
+ cond_resched();
+ }
+ status = (int) this->read_byte(mtd);
+ return status;
+}
+
+/**
+ * nand_write_page - [GENERIC] write one page
+ * @mtd: MTD device structure
+ * @this: NAND chip structure
+ * @page: startpage inside the chip, must be called with (page & this->pagemask)
+ * @oob_buf: out of band data buffer
+ * @oobsel: out of band selecttion structre
+ * @cached: 1 = enable cached programming if supported by chip
+ *
+ * Nand_page_program function is used for write and writev !
+ * This function will always program a full page of data
+ * If you call it with a non page aligned buffer, you're lost :)
+ *
+ * Cached programming is not supported yet.
+ */
+static int nand_write_page (struct mtd_info *mtd, struct nand_chip *this, int page,
+ u_char *oob_buf, struct nand_oobinfo *oobsel, int cached)
+{
+ int i = 0, j = 0, oobidx = 0, status;
+ u_char ecc_code[40];
+ int eccmode = oobsel->useecc ? this->eccmode : NAND_ECC_NONE;
+ int *oob_config = oobsel->eccpos;
+ int datidx = 0, last_datidx = 0, last_oobidx = 0, eccidx = 0;
+ int eccsteps = this->eccsteps;
+ int eccbytes = 0;
+
+ /* FIXME: Enable cached programming */
+ cached = 0;
+
+ /* Send command to begin auto page programming */
+ this->cmdfunc (mtd, NAND_CMD_SEQIN, 0x00, page);
+
+ /* Write out complete page of data, take care of eccmode */
+ switch (eccmode) {
+ /* No ecc, write all */
+ case NAND_ECC_NONE:
+ printk (KERN_WARNING "Writing data without ECC to NAND-FLASH is not recommended\n");
+ this->write_buf(mtd, this->data_poi, mtd->oobblock);
+ break;
+
+ /* Software ecc 3/256, write all */
+ case NAND_ECC_SOFT:
+ for (; eccsteps; eccsteps--) {
+ this->calculate_ecc(mtd, &this->data_poi[datidx], ecc_code);
+ for (i = 0; i < 3; i++, eccidx++)
+ oob_buf[oob_config[eccidx]] = ecc_code[i];
+ datidx += this->eccsize;
+ }
+ this->write_buf(mtd, this->data_poi, mtd->oobblock);
+ this->write_buf(mtd, oob_buf, mtd->oobsize);
+ break;
+ default:
+ eccbytes = this->eccbytes;
+ for (; this->layout[j].length; j++) {
+ int len = this->layout[j].length;
+ int oidx = oobidx;
+ switch (this->layout[j].type) {
+ case ITEM_TYPE_DATA:
+ this->enable_hwecc(mtd, NAND_ECC_WRITE);
+ this->write_buf(mtd, &this->data_poi[datidx], len);
+ datidx += len;
+ break;
+ case ITEM_TYPE_ECC:
+ this->enable_hwecc(mtd, NAND_ECC_WRITESYN);
+ this->calculate_ecc(mtd, &this->data_poi[last_datidx], &ecc_code[eccidx]);
+ for (last_oobidx = oobidx; oobidx < last_oobidx + len; oobidx++, eccidx++)
+ oob_buf[oobidx] = ecc_code[eccidx];
+ if (this->options & NAND_BUSWIDTH_16) {
+ if (oidx & 1) {
+ oidx--;
+ len++;
+ }
+ if (len & 1)
+ len--;
+ }
+ this->write_buf(mtd, &oob_buf[oidx], len);
+ break;
+ case ITEM_TYPE_OOB:
+ this->enable_hwecc(mtd, NAND_ECC_WRITEOOB);
+ if (this->options & NAND_BUSWIDTH_16) {
+ if (oidx & 1) {
+ oidx--;
+ len++;
+ }
+ if (len & 1)
+ len--;
+ }
+ this->write_buf(mtd, &oob_buf[oidx], len);
+ oobidx += len;
+ break;
+ }
+ }
+ break;
+ }
+
+ /* Send command to actually program the data */
+ this->cmdfunc (mtd, cached ? NAND_CMD_CACHEDPROG : NAND_CMD_PAGEPROG, -1, -1);
+
+ if (!cached) {
+ /* call wait ready function */
+ status = this->waitfunc (mtd, this, FL_WRITING);
+
+ /* See if operation failed and additional status checks are available */
+ if ((status & NAND_STATUS_FAIL) && (this->errstat)) {
+ status = this->errstat(mtd, this, FL_WRITING, status, page);
+ }
+
+ /* See if device thinks it succeeded */
+ if (status & NAND_STATUS_FAIL) {
+ DEBUG (MTD_DEBUG_LEVEL0, "%s: " "Failed write, page 0x%08x, ", __FUNCTION__, page);
+ return -EIO;
+ }
+ } else {
+ /* FIXME: Implement cached programming ! */
+ /* wait until cache is ready*/
+ // status = this->waitfunc (mtd, this, FL_CACHEDRPG);
+ }
+ return 0;
+}
+
+#ifdef CONFIG_MTD_NAND_VERIFY_WRITE
+/**
+ * nand_verify_pages - [GENERIC] verify the chip contents after a write
+ * @mtd: MTD device structure
+ * @this: NAND chip structure
+ * @page: startpage inside the chip, must be called with (page & this->pagemask)
+ * @numpages: number of pages to verify
+ * @oob_buf: out of band data buffer
+ * @oobsel: out of band selecttion structre
+ * @chipnr: number of the current chip
+ * @oobmode: 1 = full buffer verify, 0 = ecc only
+ *
+ * The NAND device assumes that it is always writing to a cleanly erased page.
+ * Hence, it performs its internal write verification only on bits that
+ * transitioned from 1 to 0. The device does NOT verify the whole page on a
+ * byte by byte basis. It is possible that the page was not completely erased
+ * or the page is becoming unusable due to wear. The read with ECC would catch
+ * the error later when the ECC page check fails, but we would rather catch
+ * it early in the page write stage. Better to write no data than invalid data.
+ */
+static int nand_verify_pages (struct mtd_info *mtd, struct nand_chip *this, int page, int numpages,
+ u_char *oob_buf, struct nand_oobinfo *oobsel, int chipnr, int oobmode)
+{
+ int i, j, datidx = 0, oobofs = 0, res = -EIO;
+ int eccsteps = this->eccsteps;
+ int hweccbytes;
+ u_char oobdata[64];
+
+ hweccbytes = (this->options & NAND_HWECC_SYNDROME) ? (oobsel->eccbytes / eccsteps) : 0;
+
+ /* Send command to read back the first page */
+ this->cmdfunc (mtd, NAND_CMD_READ0, 0, page);
+
+ for(;;) {
+ for (j = 0; j < eccsteps; j++) {
+ /* Loop through and verify the data */
+ if (this->verify_buf(mtd, &this->data_poi[datidx], mtd->eccsize)) {
+ DEBUG (MTD_DEBUG_LEVEL0, "%s: " "Failed write verify, page 0x%08x ", __FUNCTION__, page);
+ goto out;
+ }
+ datidx += mtd->eccsize;
+ /* Have we a hw generator layout ? */
+ if (!hweccbytes)
+ continue;
+ if (this->verify_buf(mtd, &this->oob_buf[oobofs], hweccbytes)) {
+ DEBUG (MTD_DEBUG_LEVEL0, "%s: " "Failed write verify, page 0x%08x ", __FUNCTION__, page);
+ goto out;
+ }
+ oobofs += hweccbytes;
+ }
+
+ /* check, if we must compare all data or if we just have to
+ * compare the ecc bytes
+ */
+ if (oobmode) {
+ if (this->verify_buf(mtd, &oob_buf[oobofs], mtd->oobsize - hweccbytes * eccsteps)) {
+ DEBUG (MTD_DEBUG_LEVEL0, "%s: " "Failed write verify, page 0x%08x ", __FUNCTION__, page);
+ goto out;
+ }
+ } else {
+ /* Read always, else autoincrement fails */
+ this->read_buf(mtd, oobdata, mtd->oobsize - hweccbytes * eccsteps);
+
+ if (oobsel->useecc != MTD_NANDECC_OFF && !hweccbytes) {
+ int ecccnt = oobsel->eccbytes;
+
+ for (i = 0; i < ecccnt; i++) {
+ int idx = oobsel->eccpos[i];
+ if (oobdata[idx] != oob_buf[oobofs + idx] ) {
+ DEBUG (MTD_DEBUG_LEVEL0,
+ "%s: Failed ECC write "
+ "verify, page 0x%08x, " "%6i bytes were succesful\n", __FUNCTION__, page, i);
+ goto out;
+ }
+ }
+ }
+ }
+ oobofs += mtd->oobsize - hweccbytes * eccsteps;
+ page++;
+ numpages--;
+
+ /* Apply delay or wait for ready/busy pin
+ * Do this before the AUTOINCR check, so no problems
+ * arise if a chip which does auto increment
+ * is marked as NOAUTOINCR by the board driver.
+ * Do this also before returning, so the chip is
+ * ready for the next command.
+ */
+ if (!this->dev_ready)
+ udelay (this->chip_delay);
+ else
+ nand_wait_ready(mtd);
+
+ /* All done, return happy */
+ if (!numpages)
+ return 0;
+
+
+ /* Check, if the chip supports auto page increment */
+ if (!NAND_CANAUTOINCR(this))
+ this->cmdfunc (mtd, NAND_CMD_READ0, 0x00, page);
+ }
+ /*
+ * Terminate the read command. We come here in case of an error
+ * So we must issue a reset command.
+ */
+out:
+ this->cmdfunc (mtd, NAND_CMD_RESET, -1, -1);
+ return res;
+}
+#endif
+
+/**
+ * nand_read - [MTD Interface] MTD compability function for nand_do_read_ecc
+ * @mtd: MTD device structure
+ * @from: offset to read from
+ * @len: number of bytes to read
+ * @retlen: pointer to variable to store the number of read bytes
+ * @buf: the databuffer to put data
+ *
+ * This function simply calls nand_do_read_ecc with oob buffer and oobsel = NULL
+ * and flags = 0xff
+ */
+static int nand_read (struct mtd_info *mtd, loff_t from, size_t len, size_t * retlen, u_char * buf)
+{
+ return nand_do_read_ecc (mtd, from, len, retlen, buf, NULL, &mtd->oobinfo, 0xff);
+}
+
+
+/**
+ * nand_read_ecc - [MTD Interface] MTD compability function for nand_do_read_ecc
+ * @mtd: MTD device structure
+ * @from: offset to read from
+ * @len: number of bytes to read
+ * @retlen: pointer to variable to store the number of read bytes
+ * @buf: the databuffer to put data
+ * @oob_buf: filesystem supplied oob data buffer
+ * @oobsel: oob selection structure
+ *
+ * This function simply calls nand_do_read_ecc with flags = 0xff
+ */
+static int nand_read_ecc (struct mtd_info *mtd, loff_t from, size_t len,
+ size_t * retlen, u_char * buf, u_char * oob_buf, struct nand_oobinfo *oobsel)
+{
+ /* use userspace supplied oobinfo, if zero */
+ if (oobsel == NULL)
+ oobsel = &mtd->oobinfo;
+ return nand_do_read_ecc(mtd, from, len, retlen, buf, oob_buf, oobsel, 0xff);
+}
+
+
+/**
+ * nand_do_read_ecc - [MTD Interface] Read data with ECC
+ * @mtd: MTD device structure
+ * @from: offset to read from
+ * @len: number of bytes to read
+ * @retlen: pointer to variable to store the number of read bytes
+ * @buf: the databuffer to put data
+ * @oob_buf: filesystem supplied oob data buffer (can be NULL)
+ * @oobsel: oob selection structure
+ * @flags: flag to indicate if nand_get_device/nand_release_device should be preformed
+ * and how many corrected error bits are acceptable:
+ * bits 0..7 - number of tolerable errors
+ * bit 8 - 0 == do not get/release chip, 1 == get/release chip
+ *
+ * NAND read with ECC
+ */
+int nand_do_read_ecc (struct mtd_info *mtd, loff_t from, size_t len,
+ size_t * retlen, u_char * buf, u_char * oob_buf,
+ struct nand_oobinfo *oobsel, int flags)
+{
+
+ int i, j, col, realpage, page, end, ecc, chipnr, sndcmd = 1;
+ int read = 0, oob = 0, oobidx, ecc_status = 0, ecc_failed = 0, eccidx;
+ int last_datidx, last_oobidx;
+ struct nand_chip *this = mtd->priv;
+ u_char *data_poi, *oob_data = oob_buf;
+ u_char ecc_calc[32];
+ u_char ecc_code[32];
+ int eccmode, eccsteps;
+ int *oob_config, datidx;
+ int blockcheck = (1 << (this->phys_erase_shift - this->page_shift)) - 1;
+ int eccbytes;
+ int compareecc = 1;
+ int oobreadlen;
+
+
+ DEBUG (MTD_DEBUG_LEVEL3, "nand_read_ecc: from = 0x%08x, len = %i\n", (unsigned int) from, (int) len);
+
+ /* Do not allow reads past end of device */
+ if ((from + len) > mtd->size) {
+ DEBUG (MTD_DEBUG_LEVEL0, "nand_read_ecc: Attempt read beyond end of device\n");
+ *retlen = 0;
+ return -EINVAL;
+ }
+
+ /* Grab the lock and see if the device is available */
+ if (flags & NAND_GET_DEVICE)
+ nand_get_device (this, mtd, FL_READING);
+
+ /* Autoplace of oob data ? Use the default placement scheme */
+ if (oobsel->useecc == MTD_NANDECC_AUTOPLACE)
+ oobsel = this->autooob;
+
+ eccmode = oobsel->useecc ? this->eccmode : NAND_ECC_NONE;
+ oob_config = oobsel->eccpos;
+
+ /* Select the NAND device */
+ chipnr = (int)(from >> this->chip_shift);
+ this->select_chip(mtd, chipnr);
+
+ /* First we calculate the starting page */
+ realpage = (int) (from >> this->page_shift);
+ page = realpage & this->pagemask;
+
+ /* Get raw starting column */
+ col = from & (mtd->oobblock - 1);
+
+ end = mtd->oobblock;
+ ecc = this->eccsize;
+ eccbytes = this->eccbytes;
+
+ if ((eccmode == NAND_ECC_NONE) || (this->options & NAND_HWECC_SYNDROME))
+ compareecc = 0;
+
+ oobreadlen = mtd->oobsize;
+ if (this->options & NAND_HWECC_SYNDROME)
+ oobreadlen -= oobsel->eccbytes;
+
+ /* Loop until all data read */
+ while (read < len) {
+
+ int aligned = (!col && (len - read) >= end);
+ /*
+ * If the read is not page aligned, we have to read into data buffer
+ * due to ecc, else we read into return buffer direct
+ */
+ if (aligned)
+ data_poi = &buf[read];
+ else
+ data_poi = this->data_buf;
+
+ /* Check, if we have this page in the buffer
+ *
+ * FIXME: Make it work when we must provide oob data too,
+ * check the usage of data_buf oob field
+ */
+ if (realpage == this->pagebuf && !oob_buf) {
+ /* aligned read ? */
+ if (aligned)
+ memcpy (data_poi, this->data_buf, end);
+ goto readdata;
+ }
+
+ /* Check, if we must send the read command */
+ if (sndcmd) {
+ this->cmdfunc (mtd, NAND_CMD_READ0, 0x00, page);
+ sndcmd = 0;
+ }
+
+ /* get oob area, if we have no oob buffer from fs-driver */
+ if (!oob_buf || oobsel->useecc == MTD_NANDECC_AUTOPLACE ||
+ oobsel->useecc == MTD_NANDECC_AUTOPL_USR)
+ oob_data = &this->data_buf[end];
+
+ eccsteps = this->eccsteps;
+
+ switch (eccmode) {
+ case NAND_ECC_NONE: { /* No ECC, Read in a page */
+ static unsigned long lastwhinge = 0;
+ if ((lastwhinge / HZ) != (jiffies / HZ)) {
+ printk (KERN_WARNING "Reading data from NAND FLASH without ECC is not recommended\n");
+ lastwhinge = jiffies;
+ }
+ this->read_buf(mtd, data_poi, end);
+ break;
+ }
+
+ case NAND_ECC_SOFT: /* Software ECC 3/256: Read in a page + oob data */
+ this->read_buf(mtd, data_poi, end);
+ for (i = 0, datidx = 0; eccsteps; eccsteps--, i+=3, datidx += ecc)
+ this->calculate_ecc(mtd, &data_poi[datidx], &ecc_calc[i]);
+ this->read_buf(mtd, &oob_data[mtd->oobsize - oobreadlen], oobreadlen);
+ break;
+
+ default:
+ oobidx = 0;
+ datidx = 0;
+ eccidx = 0;
+ last_datidx = datidx;
+ last_oobidx = oobidx;
+ for (j = 0; this->layout[j].length; j++) {
+ int len = this->layout[j].length;
+ int oidx = oobidx;
+ switch (this->layout[j].type) {
+ case ITEM_TYPE_DATA:
+ DEBUG (MTD_DEBUG_LEVEL3, "%s: reading %d bytes of data\n", __FUNCTION__, this->layout[j].length);
+ this->enable_hwecc(mtd, NAND_ECC_READ);
+ this->read_buf(mtd, &data_poi[datidx], len);
+ datidx += this->layout[j].length;
+ break;
+
+ case ITEM_TYPE_ECC:
+ DEBUG (MTD_DEBUG_LEVEL3, "%s: reading %d ecc bytes\n", __FUNCTION__, this->layout[j].length);
+ /* let the particular driver decide whether to read ECC */
+ this->enable_hwecc(mtd, NAND_ECC_READSYN);
+ if (this->options & NAND_BUSWIDTH_16) {
+ if (oidx & 1) {
+ oidx--;
+ len++;
+ }
+ if (len & 1)
+ len--;
+ }
+
+ this->read_buf(mtd, &oob_data[oidx], len);
+ if (!compareecc) {
+ /* We calc error correction directly, it checks the hw
+ * generator for an error, reads back the syndrome and
+ * does the error correction on the fly */
+ ecc_status = this->correct_data(mtd, &data_poi[last_datidx], &oob_data[last_oobidx], &ecc_code[eccidx]);
+ if ((ecc_status == -1) || (ecc_status > (flags && 0xff))) {
+ DEBUG (MTD_DEBUG_LEVEL0, "nand_read_ecc: "
+ "Failed ECC read, page 0x%08x on chip %d\n", page, chipnr);
+ ecc_failed++;
+ }
+ } else
+ this->calculate_ecc(mtd, &data_poi[last_datidx], &ecc_calc[eccidx]);
+ oobidx += this->layout[j].length;
+ eccidx += this->layout[j].length;
+ break;
+ case ITEM_TYPE_OOB:
+ DEBUG (MTD_DEBUG_LEVEL3, "%s: reading %d free oob bytes\n", __FUNCTION__, this->layout[j].length);
+ this->enable_hwecc(mtd, NAND_ECC_READOOB);
+ if (this->options & NAND_BUSWIDTH_16) {
+ if (oidx & 1) {
+ oidx--;
+ len++;
+ }
+ if (len & 1)
+ len--;
+ }
+
+ this->read_buf(mtd, &oob_data[oidx], len);
+ oobidx += this->layout[j].length;
+ break;
+ }
+ }
+ break;
+ }
+
+ /* Skip ECC check, if not requested (ECC_NONE or HW_ECC with syndromes) */
+ if (!compareecc)
+ goto readoob;
+
+ /* Pick the ECC bytes out of the oob data */
+ for (j = 0; j < oobsel->eccbytes; j++)
+ ecc_code[j] = oob_data[oob_config[j]];
+
+ /* correct data, if neccecary */
+ for (i = 0, j = 0, datidx = 0; i < this->eccsteps; i++, datidx += ecc) {
+ ecc_status = this->correct_data(mtd, &data_poi[datidx], &ecc_code[j], &ecc_calc[j]);
+
+ /* Get next chunk of ecc bytes */
+ j += eccbytes;
+
+ /* Check, if we have a fs supplied oob-buffer,
+ * This is the legacy mode. Used by YAFFS1
+ * Should go away some day
+ */
+ if (oob_buf && oobsel->useecc == MTD_NANDECC_PLACE) {
+ int *p = (int *)(&oob_data[mtd->oobsize]);
+ p[i] = ecc_status;
+ }
+
+ if ((ecc_status == -1) || (ecc_status > (flags && 0xff))) {
+ DEBUG (MTD_DEBUG_LEVEL0, "nand_read_ecc: " "Failed ECC read, page 0x%08x\n", page);
+ ecc_failed++;
+ }
+ }
+
+ readoob:
+ /* check, if we have a fs supplied oob-buffer */
+ if (oob_buf) {
+ /* without autoplace. Legacy mode used by YAFFS1 */
+ switch(oobsel->useecc) {
+ case MTD_NANDECC_AUTOPLACE:
+ case MTD_NANDECC_AUTOPL_USR:
+ /* Walk through the autoplace chunks */
+ for (i = 0; oobsel->oobfree[i][1]; i++) {
+ int from = oobsel->oobfree[i][0];
+ int num = oobsel->oobfree[i][1];
+ memcpy(&oob_buf[oob], &oob_data[from], num);
+ oob += num;
+ }
+ break;
+ case MTD_NANDECC_PLACE:
+ /* YAFFS1 legacy mode */
+ oob_data += this->eccsteps * sizeof (int);
+ default:
+ oob_data += mtd->oobsize;
+ }
+ }
+ readdata:
+ /* Partial page read, transfer data into fs buffer */
+ if (!aligned) {
+ for (j = col; j < end && read < len; j++)
+ buf[read++] = data_poi[j];
+ this->pagebuf = realpage;
+ } else
+ read += mtd->oobblock;
+
+ /* Apply delay or wait for ready/busy pin
+ * Do this before the AUTOINCR check, so no problems
+ * arise if a chip which does auto increment
+ * is marked as NOAUTOINCR by the board driver.
+ */
+ if (!this->dev_ready)
+ udelay (this->chip_delay);
+ else
+ nand_wait_ready(mtd);
+
+ if (read == len)
+ break;
+
+ /* For subsequent reads align to page boundary. */
+ col = 0;
+ /* Increment page address */
+ realpage++;
+
+ page = realpage & this->pagemask;
+ /* Check, if we cross a chip boundary */
+ if (!page) {
+ chipnr++;
+ this->select_chip(mtd, -1);
+ this->select_chip(mtd, chipnr);
+ }
+ /* Check, if the chip supports auto page increment
+ * or if we have hit a block boundary.
+ */
+ if (!NAND_CANAUTOINCR(this) || !(page & blockcheck))
+ sndcmd = 1;
+ }
+
+ /* Deselect and wake up anyone waiting on the device */
+ if (flags & NAND_GET_DEVICE)
+ nand_release_device(mtd);
+
+ /*
+ * Return success, if no ECC failures, else -EBADMSG
+ * fs driver will take care of that, because
+ * retlen == desired len and result == -EBADMSG
+ */
+ *retlen = read;
+ return ecc_failed ? -EBADMSG : 0;
+}
+
+/**
+ * nand_read_oob - [MTD Interface] NAND read out-of-band
+ * @mtd: MTD device structure
+ * @from: offset to read from
+ * @len: number of bytes to read
+ * @retlen: pointer to variable to store the number of read bytes
+ * @buf: the databuffer to put data
+ *
+ * NAND read out-of-band data from the spare area
+ */
+static int nand_read_oob (struct mtd_info *mtd, loff_t from, size_t len, size_t * retlen, u_char * oob_buf)
+{
+
+ int i, j, col, realpage, page, end, ecc, chipnr, sndcmd = 1, reallen = 0;
+ int read = 0;
+ struct nand_chip *this = mtd->priv;
+ u_char *oob_data = oob_buf;
+ int eccsteps;
+ int blockcheck = (1 << (this->phys_erase_shift - this->page_shift)) - 1;
+
+
+ DEBUG (MTD_DEBUG_LEVEL3, "%s: from = 0x%08x, len = %i\n", __FUNCTION__, (unsigned int) from, (int) len);
+
+ /* Do not allow reads past end of device */
+ if ((from + len) > mtd->size) {
+ *retlen = 0;
+ return -EINVAL;
+ }
+
+ /* Grab the lock and see if the device is available */
+ nand_get_device (this, mtd, FL_READING);
+
+ /* Select the NAND device */
+ chipnr = (int)(from >> this->chip_shift);
+ this->select_chip(mtd, chipnr);
+
+ /* First we calculate the starting page */
+ realpage = (int) (from >> this->page_shift);
+ page = realpage & this->pagemask;
+
+ /* Get raw starting column */
+ col = from & (mtd->oobblock - 1);
+
+ end = mtd->oobblock;
+ ecc = this->eccsize;
+
+ /* Loop until all data read */
+ while (read < len) {
+ if (this->eccmode == NAND_ECC_SOFT || this->eccmode == NAND_ECC_NONE) {
+ int thislen = mtd->oobsize - col;
+ if (sndcmd) {
+ this->cmdfunc (mtd, NAND_CMD_READOOB, col, page);
+ col = 0;
+ sndcmd = 0;
+ }
+ thislen = min_t(int, thislen, len);
+ this->read_buf(mtd, &oob_buf[read], thislen);
+ read += thislen;
+ } else {
+ /* Check, if we must send the read command */
+ if (sndcmd) {
+ this->cmdfunc (mtd, NAND_CMD_READ0, 0x00, page);
+ sndcmd = 0;
+ }
+
+ eccsteps = this->eccsteps;
+ for (j = 0; this->layout[j].length; j++) {
+ i = 0;
+ switch (this->layout[j].type) {
+ case ITEM_TYPE_DATA:
+ DEBUG (MTD_DEBUG_LEVEL3, "%s: dummy data read\n", __FUNCTION__);
+ reallen += this->layout[j].length;
+ if (this->options & NAND_BUSWIDTH_16)
+ this->cmdfunc (mtd, NAND_CMD_READ0, reallen & ~1, page);
+ else
+ this->cmdfunc (mtd, NAND_CMD_READ0, reallen, page);
+ break;
+
+ case ITEM_TYPE_ECC:
+ case ITEM_TYPE_OOB:
+ DEBUG (MTD_DEBUG_LEVEL3, "%s: %s bytes read\n", __FUNCTION__, this->layout[j].type == ITEM_TYPE_ECC ? "ecc" : "oob");
+ i = min_t(int, col, this->layout[j].length);
+ if (i) {
+ reallen += i;
+ if (this->options & NAND_BUSWIDTH_16)
+ this->cmdfunc (mtd, NAND_CMD_READ0, reallen & ~1, page);
+ else
+ this->cmdfunc (mtd, NAND_CMD_READ0, reallen, page);
+ }
+ col -= i;
+
+ if (this->layout[j].type == ITEM_TYPE_ECC)
+ this->enable_hwecc(mtd, NAND_ECC_READSYN);
+ else
+ this->enable_hwecc(mtd, NAND_ECC_READOOB);
+ i = min_t(int, len - read, this->layout[j].length - i);
+ if (i) {
+ if (this->options & NAND_BUSWIDTH_16) {
+ if (reallen & 1) {
+ oob_data[0] = cpu_to_le16(this->read_word(mtd)) >> 8;
+ oob_data++; i--; reallen++;
+ }
+ if (i & 1)
+ this->read_buf(mtd, oob_data, i - 1);
+ else
+ this->read_buf(mtd, oob_data, i);
+ }
+ else
+ this->read_buf(mtd, oob_data, i);
+ reallen += i;
+ }
+ if (oob_buf + len == oob_data + i) {
+ read += i;
+ goto out;
+ }
+ break;
+ }
+ read += i;
+ oob_data += i;
+ }
+ }
+out:
+
+ /* Apply delay or wait for ready/busy pin
+ * Do this before the AUTOINCR check, so no problems
+ * arise if a chip which does auto increment
+ * is marked as NOAUTOINCR by the board driver.
+ */
+ if (!this->dev_ready)
+ udelay (this->chip_delay);
+ else
+ nand_wait_ready(mtd);
+
+ if (read == len)
+ break;
+
+ /* For subsequent reads align to page boundary. */
+ reallen = col = 0;
+ /* Increment page address */
+ realpage++;
+
+ page = realpage & this->pagemask;
+ /* Check, if we cross a chip boundary */
+ if (!page) {
+ chipnr++;
+ this->select_chip(mtd, -1);
+ this->select_chip(mtd, chipnr);
+ }
+ /* Check, if the chip supports auto page increment
+ * or if we have hit a block boundary.
+ */
+ if (!NAND_CANAUTOINCR(this) || !(page & blockcheck))
+ sndcmd = 1;
+ }
+
+ /* Deselect and wake up anyone waiting on the device */
+ nand_release_device(mtd);
+
+ *retlen = read;
+ /*
+ * Return success
+ */
+ return 0;
+
+}
+
+
+/**
+ * nand_read_raw - [GENERIC] Read raw data including oob into buffer
+ * @mtd: MTD device structure
+ * @buf: temporary buffer
+ * @from: offset to read from
+ * @len: number of bytes to read
+ * @ooblen: number of oob data bytes to read
+ *
+ * Read raw data including oob into buffer
+ */
+int nand_read_raw (struct mtd_info *mtd, uint8_t *buf, loff_t from, size_t len, size_t ooblen)
+{
+ struct nand_chip *this = mtd->priv;
+ int page = (int) (from >> this->page_shift);
+ int chip = (int) (from >> this->chip_shift);
+ int sndcmd = 1;
+ int cnt = 0;
+ int pagesize = mtd->oobblock + mtd->oobsize;
+ int blockcheck = (1 << (this->phys_erase_shift - this->page_shift)) - 1;
+
+ /* Do not allow reads past end of device */
+ if ((from + len) > mtd->size) {
+ DEBUG (MTD_DEBUG_LEVEL0, "nand_read_raw: Attempt read beyond end of device\n");
+ return -EINVAL;
+ }
+
+ /* Grab the lock and see if the device is available */
+ nand_get_device (this, mtd , FL_READING);
+
+ this->select_chip (mtd, chip);
+
+ /* Add requested oob length */
+ len += ooblen;
+
+ while (len) {
+ if (sndcmd)
+ this->cmdfunc (mtd, NAND_CMD_READ0, 0, page & this->pagemask);
+ sndcmd = 0;
+
+ this->read_buf (mtd, &buf[cnt], pagesize);
+
+ len -= pagesize;
+ cnt += pagesize;
+ page++;
+
+ if (!this->dev_ready)
+ udelay (this->chip_delay);
+ else
+ nand_wait_ready(mtd);
+
+ /* Check, if the chip supports auto page increment */
+ if (!NAND_CANAUTOINCR(this) || !(page & blockcheck))
+ sndcmd = 1;
+ }
+
+ /* Deselect and wake up anyone waiting on the device */
+ nand_release_device(mtd);
+ return 0;
+}
+
+
+/**
+ * nand_prepare_oobbuf - [GENERIC] Prepare the out of band buffer
+ * @mtd: MTD device structure
+ * @fsbuf: buffer given by fs driver
+ * @oobsel: out of band selection structre
+ * @autoplace: 1 = place given buffer into the oob bytes
+ * @numpages: number of pages to prepare
+ *
+ * Return:
+ * 1. Filesystem buffer available and autoplacement is off,
+ * return filesystem buffer
+ * 2. No filesystem buffer or autoplace is off, return internal
+ * buffer
+ * 3. Filesystem buffer is given and autoplace selected
+ * put data from fs buffer into internal buffer and
+ * retrun internal buffer
+ *
+ * Note: The internal buffer is filled with 0xff. This must
+ * be done only once, when no autoplacement happens
+ * Autoplacement sets the buffer dirty flag, which
+ * forces the 0xff fill before using the buffer again.
+ *
+*/
+static u_char * nand_prepare_oobbuf (struct mtd_info *mtd, u_char *fsbuf, struct nand_oobinfo *oobsel,
+ int autoplace, int numpages)
+{
+ struct nand_chip *this = mtd->priv;
+ int i, len, ofs;
+
+ /* Zero copy fs supplied buffer */
+ if (fsbuf && !autoplace)
+ return fsbuf;
+
+ /* Check, if the buffer must be filled with ff again */
+ if (this->oobdirty) {
+ memset (this->oob_buf, 0xff,
+ mtd->oobsize << (this->phys_erase_shift - this->page_shift));
+ this->oobdirty = 0;
+ }
+
+ /* If we have no autoplacement or no fs buffer use the internal one */
+ if (!autoplace || !fsbuf)
+ return this->oob_buf;
+
+ /* Walk through the pages and place the data */
+ this->oobdirty = 1;
+ ofs = 0;
+ while (numpages--) {
+ for (i = 0, len = 0; len < mtd->oobavail; i++) {
+ int to = ofs + oobsel->oobfree[i][0];
+ int num = oobsel->oobfree[i][1];
+ memcpy (&this->oob_buf[to], fsbuf, num);
+ len += num;
+ fsbuf += num;
+ }
+ ofs += mtd->oobavail;
+ }
+ return this->oob_buf;
+}
+
+#define NOTALIGNED(x) (x & (mtd->oobblock-1)) != 0
+
+/**
+ * nand_write - [MTD Interface] compability function for nand_write_ecc
+ * @mtd: MTD device structure
+ * @to: offset to write to
+ * @len: number of bytes to write
+ * @retlen: pointer to variable to store the number of written bytes
+ * @buf: the data to write
+ *
+ * This function simply calls nand_write_ecc with oob buffer and oobsel = NULL
+ *
+*/
+static int nand_write (struct mtd_info *mtd, loff_t to, size_t len, size_t * retlen, const u_char * buf)
+{
+ return (nand_write_ecc (mtd, to, len, retlen, buf, NULL, NULL));
+}
+
+/**
+ * nand_write_ecc - [MTD Interface] NAND write with ECC
+ * @mtd: MTD device structure
+ * @to: offset to write to
+ * @len: number of bytes to write
+ * @retlen: pointer to variable to store the number of written bytes
+ * @buf: the data to write
+ * @eccbuf: filesystem supplied oob data buffer
+ * @oobsel: oob selection structure
+ *
+ * NAND write with ECC
+ */
+static int nand_write_ecc (struct mtd_info *mtd, loff_t to, size_t len,
+ size_t * retlen, const u_char * buf, u_char * eccbuf, struct nand_oobinfo *oobsel)
+{
+ int startpage, page, ret = -EIO, oob = 0, written = 0, chipnr;
+ int autoplace = 0, numpages, totalpages;
+ struct nand_chip *this = mtd->priv;
+ u_char *oobbuf, *bufstart;
+ int ppblock = (1 << (this->phys_erase_shift - this->page_shift));
+
+ DEBUG (MTD_DEBUG_LEVEL3, "nand_write_ecc: to = 0x%08x, len = %i\n", (unsigned int) to, (int) len);
+
+ /* Initialize retlen, in case of early exit */
+ *retlen = 0;
+
+ /* Do not allow write past end of device */
+ if ((to + len) > mtd->size) {
+ DEBUG (MTD_DEBUG_LEVEL0, "nand_write_ecc: Attempt to write past end of page\n");
+ return -EINVAL;
+ }
+
+ /* reject writes, which are not page aligned */
+ if (NOTALIGNED (to) || NOTALIGNED(len)) {
+ printk (KERN_NOTICE "nand_write_ecc: Attempt to write not page aligned data\n");
+ return -EINVAL;
+ }
+
+ /* Grab the lock and see if the device is available */
+ nand_get_device (this, mtd, FL_WRITING);
+
+ /* Calculate chipnr */
+ chipnr = (int)(to >> this->chip_shift);
+ /* Select the NAND device */
+ this->select_chip(mtd, chipnr);
+
+ /* Check, if it is write protected */
+ if (nand_check_wp(mtd))
+ goto out;
+
+ /* if oobsel is NULL, use chip defaults */
+ if (oobsel == NULL)
+ oobsel = &mtd->oobinfo;
+
+ /* Autoplace of oob data ? Use the default placement scheme */
+ if (oobsel->useecc == MTD_NANDECC_AUTOPLACE) {
+ oobsel = this->autooob;
+ autoplace = 1;
+ }
+ if (oobsel->useecc == MTD_NANDECC_AUTOPL_USR)
+ autoplace = 1;
+
+ /* Setup variables and oob buffer */
+ totalpages = len >> this->page_shift;
+ page = (int) (to >> this->page_shift);
+ /* Invalidate the page cache, if we write to the cached page */
+ if (page <= this->pagebuf && this->pagebuf < (page + totalpages))
+ this->pagebuf = -1;
+
+ /* Set it relative to chip */
+ page &= this->pagemask;
+ startpage = page;
+ /* Calc number of pages we can write in one go */
+ numpages = min (ppblock - (startpage & (ppblock - 1)), totalpages);
+ oobbuf = nand_prepare_oobbuf (mtd, eccbuf, oobsel, autoplace, numpages);
+ bufstart = (u_char *)buf;
+
+ /* Loop until all data is written */
+ while (written < len) {
+
+ this->data_poi = (u_char*) &buf[written];
+ /* Write one page. If this is the last page to write
+ * or the last page in this block, then use the
+ * real pageprogram command, else select cached programming
+ * if supported by the chip.
+ */
+ ret = nand_write_page (mtd, this, page, &oobbuf[oob], oobsel, (--numpages > 0));
+ if (ret) {
+ DEBUG (MTD_DEBUG_LEVEL0, "nand_write_ecc: write_page failed %d\n", ret);
+ goto out;
+ }
+ /* Next oob page */
+ oob += mtd->oobsize;
+ /* Update written bytes count */
+ written += mtd->oobblock;
+ if (written == len)
+ goto cmp;
+
+ /* Increment page address */
+ page++;
+
+ /* Have we hit a block boundary ? Then we have to verify and
+ * if verify is ok, we have to setup the oob buffer for
+ * the next pages.
+ */
+ if (!(page & (ppblock - 1))){
+ int ofs;
+ this->data_poi = bufstart;
+ ret = nand_verify_pages (mtd, this, startpage,
+ page - startpage,
+ oobbuf, oobsel, chipnr, (eccbuf != NULL));
+ if (ret) {
+ DEBUG (MTD_DEBUG_LEVEL0, "nand_write_ecc: verify_pages failed %d\n", ret);
+ goto out;
+ }
+ *retlen = written;
+
+ ofs = autoplace ? mtd->oobavail : mtd->oobsize;
+ if (eccbuf)
+ eccbuf += (page - startpage) * ofs;
+ totalpages -= page - startpage;
+ numpages = min (totalpages, ppblock);
+ page &= this->pagemask;
+ startpage = page;
+ oobbuf = nand_prepare_oobbuf (mtd, eccbuf, oobsel,
+ autoplace, numpages);
+ oob = 0;
+ /* Check, if we cross a chip boundary */
+ if (!page) {
+ chipnr++;
+ this->select_chip(mtd, -1);
+ this->select_chip(mtd, chipnr);
+ }
+ }
+ }
+ /* Verify the remaining pages */
+cmp:
+ this->data_poi = bufstart;
+ ret = nand_verify_pages (mtd, this, startpage, totalpages,
+ oobbuf, oobsel, chipnr, (eccbuf != NULL));
+ if (!ret)
+ *retlen = written;
+ else
+ DEBUG (MTD_DEBUG_LEVEL0, "nand_write_ecc: verify_pages failed %d\n", ret);
+
+out:
+ /* Deselect and wake up anyone waiting on the device */
+ nand_release_device(mtd);
+
+ return ret;
+}
+
+
+/**
+ * nand_write_oob - [MTD Interface] NAND write out-of-band
+ * @mtd: MTD device structure
+ * @to: offset to write to
+ * @len: number of bytes to write
+ * @retlen: pointer to variable to store the number of written bytes
+ * @buf: the data to write
+ *
+ * NAND write out-of-band
+ */
+static int nand_write_oob (struct mtd_info *mtd, loff_t to, size_t len, size_t * retlen, const u_char * oob_buf)
+{
+ int column, page, status, ret = -EIO, chipnr, eccsteps;
+ struct nand_chip *this = mtd->priv;
+
+ DEBUG (MTD_DEBUG_LEVEL3, "%s: to = 0x%08x, len = %i\n", __FUNCTION__, (unsigned int) to, (int) len);
+
+ /* Shift to get page */
+ page = (int) (to >> this->page_shift);
+ chipnr = (int) (to >> this->chip_shift);
+
+ /* Mask to get column */
+ column = to & (mtd->oobsize - 1);
+
+ /* Initialize return length value */
+ *retlen = 0;
+
+ /* Do not allow write past end of page */
+ if ((column + len) > mtd->oobsize) {
+ DEBUG (MTD_DEBUG_LEVEL0, "%s: Attempt to write past end of page\n", __FUNCTION__);
+ return -EINVAL;
+ }
+
+ /* Grab the lock and see if the device is available */
+ nand_get_device (this, mtd, FL_WRITING);
+
+ /* Select the NAND device */
+ this->select_chip(mtd, chipnr);
+
+ /* Reset the chip. Some chips (like the Toshiba TC5832DC found
+ in one of my DiskOnChip 2000 test units) will clear the whole
+ data page too if we don't do this. I have no clue why, but
+ I seem to have 'fixed' it in the doc2000 driver in
+ August 1999. dwmw2. */
+ this->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
+
+ /* Check, if it is write protected */
+ if (nand_check_wp(mtd))
+ goto out;
+
+ /* Invalidate the page cache, if we write to the cached page */
+ if (page == this->pagebuf)
+ this->pagebuf = -1;
+
+ if (this->eccmode == NAND_ECC_SOFT || this->eccmode == NAND_ECC_NONE) {
+ if (NAND_MUST_PAD(this)) {
+ /* Write out desired data */
+ this->cmdfunc (mtd, NAND_CMD_SEQIN, mtd->oobblock, page & this->pagemask);
+ /* prepad 0xff for partial programming */
+ this->write_buf(mtd, ffchars, column);
+ /* write data */
+ this->write_buf(mtd, oob_buf, len);
+ /* postpad 0xff for partial programming */
+ this->write_buf(mtd, ffchars, mtd->oobsize - (len+column));
+ } else {
+ /* Write out desired data */
+ this->cmdfunc (mtd, NAND_CMD_SEQIN, mtd->oobblock + column, page & this->pagemask);
+ /* write data */
+ this->write_buf(mtd, oob_buf, len);
+ }
+ } else {
+ int i = 0, j = 0;
+ int fflen = 0, old_fflen = 0, ooblen = 0;
+
+ /* Write out desired data */
+ this->cmdfunc (mtd, NAND_CMD_SEQIN, 0, page & this->pagemask);
+
+ eccsteps = this->eccsteps;
+ for (j = 0; this->layout[j].length; j++) {
+ switch (this->layout[j].type) {
+ case ITEM_TYPE_DATA:
+ if (this->options & NAND_COMPLEX_OOB_WRITE) {
+ this->enable_hwecc(mtd, NAND_ECC_WRITE);
+ this->write_buf(mtd, ffchars, this->layout[j].length);
+ fflen += this->layout[j].length;
+ } else {
+ if (old_fflen < fflen) {
+ this->cmdfunc (mtd, NAND_CMD_PAGEPROG, -1, -1);
+ status = this->waitfunc (mtd, this, FL_WRITING);
+ if (status & NAND_STATUS_FAIL) {
+ DEBUG (MTD_DEBUG_LEVEL0, "%s: Failed write, page 0x%08x\n", __FUNCTION__, page);
+ ret = -EIO;
+ goto out;
+ }
+ }
+ fflen += this->layout[j].length;
+ if (this->options & NAND_BUSWIDTH_16 && (fflen + ooblen) & 1)
+ this->cmdfunc (mtd, NAND_CMD_SEQIN, fflen + ooblen - 1, page & this->pagemask);
+ else
+ this->cmdfunc (mtd, NAND_CMD_SEQIN, fflen + ooblen, page & this->pagemask);
+ old_fflen = fflen;
+ }
+ break;
+
+ case ITEM_TYPE_ECC:
+ case ITEM_TYPE_OOB:
+ if (this->layout[j].type == ITEM_TYPE_ECC)
+ this->enable_hwecc(mtd, NAND_ECC_WRITESYN);
+ else
+ this->enable_hwecc(mtd, NAND_ECC_WRITEOOB);
+ i = min_t(int, column, this->layout[j].length);
+ if (i) {
+ if (this->options & NAND_BUSWIDTH_16 && i & 1)
+ i--;
+ if (i == 0) {
+ this->write_word(mtd, cpu_to_le16((oob_buf[0] << 8) | 0xff));
+ i++;
+ ooblen++;
+ } else
+ this->write_buf(mtd, ffchars, i);
+ }
+ column -= i;
+ fflen += i;
+ i = min_t(int, len + column - ooblen, this->layout[j].length - i);
+ if (i) {
+ if (column) {
+ this->write_word(mtd, cpu_to_le16((oob_buf[0] << 8) | 0xff));
+ i--;
+ ooblen++;
+ }
+ if (i & 1)
+ i--;
+ this->write_buf(mtd, &oob_buf[ooblen], i);
+ }
+ ooblen += i;
+ if (ooblen == len - 1) {
+ this->write_word(mtd, cpu_to_le16(oob_buf[ooblen]) | 0xff00);
+ ooblen += 2;
+ }
+ if (ooblen >= len) {
+ if (NAND_MUST_PAD(this))
+ this->write_buf(mtd, ffchars, mtd->oobsize + mtd->oobblock - fflen - ooblen);
+ goto finish;
+ }
+ break;
+ }
+ }
+ }
+finish:
+ /* Send command to program the OOB data */
+ this->cmdfunc (mtd, NAND_CMD_PAGEPROG, -1, -1);
+
+ status = this->waitfunc (mtd, this, FL_WRITING);
+
+ /* See if device thinks it succeeded */
+ if (status & NAND_STATUS_FAIL) {
+ DEBUG (MTD_DEBUG_LEVEL0, "%s: Failed write, page 0x%08x\n", __FUNCTION__, page);
+ ret = -EIO;
+ goto out;
+ }
+ /* Return happy */
+ *retlen = len;
+
+#ifdef CONFIG_MTD_NAND_VERIFY_WRITE
+ if (this->eccmode == NAND_ECC_SOFT || this->eccmode == NAND_ECC_NONE) {
+ /* Send command to read back the data */
+ this->cmdfunc (mtd, NAND_CMD_READOOB, column, page & this->pagemask);
+
+ if (this->verify_buf(mtd, oob_buf, len)) {
+ DEBUG (MTD_DEBUG_LEVEL0, "nand_write_oob: " "Failed write verify, page 0x%08x\n", page);
+ ret = -EIO;
+ goto out;
+ }
+ }
+#warning "Verify for OOB data in HW ECC case is NOT YET implemented"
+#endif
+ ret = 0;
+out:
+ /* Deselect and wake up anyone waiting on the device */
+ nand_release_device(mtd);
+
+ return ret;
+}
+
+/**
+ * nand_writev - [MTD Interface] compabilty function for nand_writev_ecc
+ * @mtd: MTD device structure
+ * @vecs: the iovectors to write
+ * @count: number of vectors
+ * @to: offset to write to
+ * @retlen: pointer to variable to store the number of written bytes
+ *
+ * NAND write with kvec. This just calls the ecc function
+ */
+static int nand_writev (struct mtd_info *mtd, const struct kvec *vecs, unsigned long count,
+ loff_t to, size_t * retlen)
+{
+ return (nand_writev_ecc (mtd, vecs, count, to, retlen, NULL, NULL));
+}
+
+/**
+ * nand_writev_ecc - [MTD Interface] write with iovec with ecc
+ * @mtd: MTD device structure
+ * @vecs: the iovectors to write
+ * @count: number of vectors
+ * @to: offset to write to
+ * @retlen: pointer to variable to store the number of written bytes
+ * @eccbuf: filesystem supplied oob data buffer
+ * @oobsel: oob selection structure
+ *
+ * NAND write with iovec with ecc
+ */
+static int nand_writev_ecc (struct mtd_info *mtd, const struct kvec *vecs, unsigned long count,
+ loff_t to, size_t * retlen, u_char *eccbuf, struct nand_oobinfo *oobsel)
+{
+ int i, page, len, total_len, ret = -EIO, written = 0, chipnr;
+ int oob, numpages, autoplace = 0, startpage;
+ struct nand_chip *this = mtd->priv;
+ int ppblock = (1 << (this->phys_erase_shift - this->page_shift));
+ u_char *oobbuf, *bufstart;
+
+ /* Preset written len for early exit */
+ *retlen = 0;
+
+ /* Calculate total length of data */
+ total_len = 0;
+ for (i = 0; i < count; i++)
+ total_len += (int) vecs[i].iov_len;
+
+ DEBUG (MTD_DEBUG_LEVEL3,
+ "nand_writev: to = 0x%08x, len = %i, count = %ld\n", (unsigned int) to, (unsigned int) total_len, count);
+
+ /* Do not allow write past end of page */
+ if ((to + total_len) > mtd->size) {
+ DEBUG (MTD_DEBUG_LEVEL0, "nand_writev: Attempted write past end of device\n");
+ return -EINVAL;
+ }
+
+ /* reject writes, which are not page aligned */
+ if (NOTALIGNED (to) || NOTALIGNED(total_len)) {
+ printk (KERN_NOTICE "nand_write_ecc: Attempt to write not page aligned data\n");
+ return -EINVAL;
+ }
+
+ /* Grab the lock and see if the device is available */
+ nand_get_device (this, mtd, FL_WRITING);
+
+ /* Get the current chip-nr */
+ chipnr = (int) (to >> this->chip_shift);
+ /* Select the NAND device */
+ this->select_chip(mtd, chipnr);
+
+ /* Check, if it is write protected */
+ if (nand_check_wp(mtd))
+ goto out;
+
+ /* if oobsel is NULL, use chip defaults */
+ if (oobsel == NULL)
+ oobsel = &mtd->oobinfo;
+
+ /* Autoplace of oob data ? Use the default placement scheme */
+ if (oobsel->useecc == MTD_NANDECC_AUTOPLACE) {
+ oobsel = this->autooob;
+ autoplace = 1;
+ }
+ if (oobsel->useecc == MTD_NANDECC_AUTOPL_USR)
+ autoplace = 1;
+
+ /* Setup start page */
+ page = (int) (to >> this->page_shift);
+ /* Invalidate the page cache, if we write to the cached page */
+ if (page <= this->pagebuf && this->pagebuf < ((to + total_len) >> this->page_shift))
+ this->pagebuf = -1;
+
+ startpage = page & this->pagemask;
+
+ /* Loop until all kvec' data has been written */
+ len = 0;
+ while (count) {
+ /* If the given tuple is >= pagesize then
+ * write it out from the iov
+ */
+ if ((vecs->iov_len - len) >= mtd->oobblock) {
+ /* Calc number of pages we can write
+ * out of this iov in one go */
+ numpages = (vecs->iov_len - len) >> this->page_shift;
+ /* Do not cross block boundaries */
+ numpages = min (ppblock - (startpage & (ppblock - 1)), numpages);
+ oobbuf = nand_prepare_oobbuf (mtd, NULL, oobsel, autoplace, numpages);
+ bufstart = (u_char *)vecs->iov_base;
+ bufstart += len;
+ this->data_poi = bufstart;
+ oob = 0;
+ for (i = 1; i <= numpages; i++) {
+ /* Write one page. If this is the last page to write
+ * then use the real pageprogram command, else select
+ * cached programming if supported by the chip.
+ */
+ ret = nand_write_page (mtd, this, page & this->pagemask,
+ &oobbuf[oob], oobsel, i != numpages);
+ if (ret)
+ goto out;
+ this->data_poi += mtd->oobblock;
+ len += mtd->oobblock;
+ oob += mtd->oobsize;
+ page++;
+ }
+ /* Check, if we have to switch to the next tuple */
+ if (len >= (int) vecs->iov_len) {
+ vecs++;
+ len = 0;
+ count--;
+ }
+ } else {
+ /* We must use the internal buffer, read data out of each
+ * tuple until we have a full page to write
+ */
+ int cnt = 0;
+ while (cnt < mtd->oobblock) {
+ if (vecs->iov_base != NULL && vecs->iov_len)
+ this->data_buf[cnt++] = ((u_char *) vecs->iov_base)[len++];
+ /* Check, if we have to switch to the next tuple */
+ if (len >= (int) vecs->iov_len) {
+ vecs++;
+ len = 0;
+ count--;
+ }
+ }
+ this->pagebuf = page;
+ this->data_poi = this->data_buf;
+ bufstart = this->data_poi;
+ numpages = 1;
+ oobbuf = nand_prepare_oobbuf (mtd, NULL, oobsel, autoplace, numpages);
+ ret = nand_write_page (mtd, this, page & this->pagemask,
+ oobbuf, oobsel, 0);
+ if (ret)
+ goto out;
+ page++;
+ }
+
+ this->data_poi = bufstart;
+ ret = nand_verify_pages (mtd, this, startpage, numpages, oobbuf, oobsel, chipnr, 0);
+ if (ret)
+ goto out;
+
+ written += mtd->oobblock * numpages;
+ /* All done ? */
+ if (!count)
+ break;
+
+ startpage = page & this->pagemask;
+ /* Check, if we cross a chip boundary */
+ if (!startpage) {
+ chipnr++;
+ this->select_chip(mtd, -1);
+ this->select_chip(mtd, chipnr);
+ }
+ }
+ ret = 0;
+out:
+ /* Deselect and wake up anyone waiting on the device */
+ nand_release_device(mtd);
+
+ *retlen = written;
+ return ret;
+}
+
+/**
+ * single_erease_cmd - [GENERIC] NAND standard block erase command function
+ * @mtd: MTD device structure
+ * @page: the page address of the block which will be erased
+ *
+ * Standard erase command for NAND chips
+ */
+static void single_erase_cmd (struct mtd_info *mtd, int page)
+{
+ struct nand_chip *this = mtd->priv;
+ /* Send commands to erase a block */
+ this->cmdfunc (mtd, NAND_CMD_ERASE1, -1, page);
+ this->cmdfunc (mtd, NAND_CMD_ERASE2, -1, -1);
+}
+
+/**
+ * multi_erease_cmd - [GENERIC] AND specific block erase command function
+ * @mtd: MTD device structure
+ * @page: the page address of the block which will be erased
+ *
+ * AND multi block erase command function
+ * Erase 4 consecutive blocks
+ */
+static void multi_erase_cmd (struct mtd_info *mtd, int page)
+{
+ struct nand_chip *this = mtd->priv;
+ /* Send commands to erase a block */
+ this->cmdfunc (mtd, NAND_CMD_ERASE1, -1, page++);
+ this->cmdfunc (mtd, NAND_CMD_ERASE1, -1, page++);
+ this->cmdfunc (mtd, NAND_CMD_ERASE1, -1, page++);
+ this->cmdfunc (mtd, NAND_CMD_ERASE1, -1, page);
+ this->cmdfunc (mtd, NAND_CMD_ERASE2, -1, -1);
+}
+
+/**
+ * nand_erase - [MTD Interface] erase block(s)
+ * @mtd: MTD device structure
+ * @instr: erase instruction
+ *
+ * Erase one ore more blocks
+ */
+static int nand_erase (struct mtd_info *mtd, struct erase_info *instr)
+{
+ return nand_erase_nand (mtd, instr, 0);
+}
+
+#define BBT_PAGE_MASK 0xffffff3f
+/**
+ * nand_erase_intern - [NAND Interface] erase block(s)
+ * @mtd: MTD device structure
+ * @instr: erase instruction
+ * @allowbbt: allow erasing the bbt area
+ *
+ * Erase one ore more blocks
+ */
+int nand_erase_nand (struct mtd_info *mtd, struct erase_info *instr, int allowbbt)
+{
+ int page, len, status, pages_per_block, ret, chipnr;
+ struct nand_chip *this = mtd->priv;
+ int rewrite_bbt[NAND_MAX_CHIPS]={0}; /* flags to indicate the page, if bbt needs to be rewritten. */
+ unsigned int bbt_masked_page; /* bbt mask to compare to page being erased. */
+ /* It is used to see if the current page is in the same */
+ /* 256 block group and the same bank as the bbt. */
+
+ DEBUG (MTD_DEBUG_LEVEL3,
+ "nand_erase: start = 0x%08x, len = %i\n", (unsigned int) instr->addr, (unsigned int) instr->len);
+
+ /* Start address must align on block boundary */
+ if (instr->addr & ((1 << this->phys_erase_shift) - 1)) {
+ DEBUG (MTD_DEBUG_LEVEL0, "nand_erase: Unaligned address\n");
+ return -EINVAL;
+ }
+
+ /* Length must align on block boundary */
+ if (instr->len & ((1 << this->phys_erase_shift) - 1)) {
+ DEBUG (MTD_DEBUG_LEVEL0, "nand_erase: Length not block aligned\n");
+ return -EINVAL;
+ }
+
+ /* Do not allow erase past end of device */
+ if ((instr->len + instr->addr) > mtd->size) {
+ DEBUG (MTD_DEBUG_LEVEL0, "nand_erase: Erase past end of device\n");
+ return -EINVAL;
+ }
+
+ instr->fail_addr = 0xffffffff;
+
+ /* Grab the lock and see if the device is available */
+ nand_get_device (this, mtd, FL_ERASING);
+
+ /* Shift to get first page */
+ page = (int) (instr->addr >> this->page_shift);
+ chipnr = (int) (instr->addr >> this->chip_shift);
+
+ /* Calculate pages in each block */
+ pages_per_block = 1 << (this->phys_erase_shift - this->page_shift);
+
+ /* Select the NAND device */
+ this->select_chip(mtd, chipnr);
+
+ /* Check the WP bit */
+ /* Check, if it is write protected */
+ if (nand_check_wp(mtd)) {
+ DEBUG (MTD_DEBUG_LEVEL0, "nand_erase: Device is write protected!!!\n");
+ instr->state = MTD_ERASE_FAILED;
+ goto erase_exit;
+ }
+
+ /* if BBT requires refresh, set the BBT page mask to see if the BBT should be rewritten */
+ if (this->options & BBT_AUTO_REFRESH) {
+ bbt_masked_page = this->bbt_td->pages[chipnr] & BBT_PAGE_MASK;
+ } else {
+ bbt_masked_page = 0xffffffff; /* should not match anything */
+ }
+
+ /* Loop through the pages */
+ len = instr->len;
+
+ instr->state = MTD_ERASING;
+
+ while (len) {
+ /* Check if we have a bad block, we do not erase bad blocks ! */
+ if (nand_block_checkbad(mtd, ((loff_t) page) << this->page_shift, 0, allowbbt)) {
+ printk (KERN_WARNING "nand_erase: attempt to erase a bad block at page 0x%08x\n", page);
+ instr->state = MTD_ERASE_FAILED;
+ goto erase_exit;
+ }
+
+ /* Invalidate the page cache, if we erase the block which contains
+ the current cached page */
+ if (page <= this->pagebuf && this->pagebuf < (page + pages_per_block))
+ this->pagebuf = -1;
+
+ this->erase_cmd (mtd, page & this->pagemask);
+
+ status = this->waitfunc (mtd, this, FL_ERASING);
+
+ /* See if operation failed and additional status checks are available */
+ if ((status & NAND_STATUS_FAIL) && (this->errstat)) {
+ status = this->errstat(mtd, this, FL_ERASING, status, page);
+ }
+
+ /* See if block erase succeeded */
+ if (status & NAND_STATUS_FAIL) {
+ DEBUG (MTD_DEBUG_LEVEL0, "nand_erase: " "Failed erase, page 0x%08x\n", page);
+ instr->state = MTD_ERASE_FAILED;
+ instr->fail_addr = (page << this->page_shift);
+ goto erase_exit;
+ }
+
+ /* if BBT requires refresh, set the BBT rewrite flag to the page being erased */
+ if (this->options & BBT_AUTO_REFRESH) {
+ if (((page & BBT_PAGE_MASK) == bbt_masked_page) &&
+ (page != this->bbt_td->pages[chipnr])) {
+ rewrite_bbt[chipnr] = (page << this->page_shift);
+ }
+ }
+
+ /* Increment page address and decrement length */
+ len -= (1 << this->phys_erase_shift);
+ page += pages_per_block;
+
+ /* Check, if we cross a chip boundary */
+ if (len && !(page & this->pagemask)) {
+ chipnr++;
+ this->select_chip(mtd, -1);
+ this->select_chip(mtd, chipnr);
+
+ /* if BBT requires refresh and BBT-PERCHIP,
+ * set the BBT page mask to see if this BBT should be rewritten */
+ if ((this->options & BBT_AUTO_REFRESH) && (this->bbt_td->options & NAND_BBT_PERCHIP)) {
+ bbt_masked_page = this->bbt_td->pages[chipnr] & BBT_PAGE_MASK;
+ }
+
+ }
+ }
+ instr->state = MTD_ERASE_DONE;
+
+erase_exit:
+
+ ret = instr->state == MTD_ERASE_DONE ? 0 : -EIO;
+ /* Do call back function */
+ if (!ret)
+ mtd_erase_callback(instr);
+
+ /* Deselect and wake up anyone waiting on the device */
+ nand_release_device(mtd);
+
+ /* if BBT requires refresh and erase was successful, rewrite any selected bad block tables */
+ if ((this->options & BBT_AUTO_REFRESH) && (!ret)) {
+ for (chipnr = 0; chipnr < this->numchips; chipnr++) {
+ if (rewrite_bbt[chipnr]) {
+ /* update the BBT for chip */
+ DEBUG (MTD_DEBUG_LEVEL0, "nand_erase_nand: nand_update_bbt (%d:0x%0x 0x%0x)\n",
+ chipnr, rewrite_bbt[chipnr], this->bbt_td->pages[chipnr]);
+ nand_update_bbt (mtd, rewrite_bbt[chipnr]);
+ }
+ }
+ }
+
+ /* Return more or less happy */
+ return ret;
+}
+
+/**
+ * nand_sync - [MTD Interface] sync
+ * @mtd: MTD device structure
+ *
+ * Sync is actually a wait for chip ready function
+ */
+static void nand_sync (struct mtd_info *mtd)
+{
+ struct nand_chip *this = mtd->priv;
+
+ DEBUG (MTD_DEBUG_LEVEL3, "nand_sync: called\n");
+
+ /* Grab the lock and see if the device is available */
+ nand_get_device (this, mtd, FL_SYNCING);
+ /* Release it and go back */
+ nand_release_device (mtd);
+}
+
+
+/**
+ * nand_block_isbad - [MTD Interface] Check whether the block at the given offset is bad
+ * @mtd: MTD device structure
+ * @ofs: offset relative to mtd start
+ */
+static int nand_block_isbad (struct mtd_info *mtd, loff_t ofs)
+{
+ /* Check for invalid offset */
+ if (ofs > mtd->size)
+ return -EINVAL;
+
+ return nand_block_checkbad (mtd, ofs, 1, 0);
+}
+
+/**
+ * nand_block_markbad - [MTD Interface] Mark the block at the given offset as bad
+ * @mtd: MTD device structure
+ * @ofs: offset relative to mtd start
+ */
+static int nand_block_markbad (struct mtd_info *mtd, loff_t ofs)
+{
+ struct nand_chip *this = mtd->priv;
+ int ret;
+
+ if ((ret = nand_block_isbad(mtd, ofs))) {
+ /* If it was bad already, return success and do nothing. */
+ if (ret > 0)
+ return 0;
+ return ret;
+ }
+
+ return this->block_markbad(mtd, ofs);
+}
+
+/**
+ * nand_suspend - [MTD Interface] Suspend the NAND flash
+ * @mtd: MTD device structure
+ */
+static int nand_suspend(struct mtd_info *mtd)
+{
+ struct nand_chip *this = mtd->priv;
+
+ return nand_get_device (this, mtd, FL_PM_SUSPENDED);
+}
+
+/**
+ * nand_resume - [MTD Interface] Resume the NAND flash
+ * @mtd: MTD device structure
+ */
+static void nand_resume(struct mtd_info *mtd)
+{
+ struct nand_chip *this = mtd->priv;
+
+ if (this->state == FL_PM_SUSPENDED)
+ nand_release_device(mtd);
+ else
+ printk(KERN_ERR "resume() called for the chip which is not "
+ "in suspended state\n");
+
+}
+
+/**
+ * fill_autooob_layout - [NAND Interface] build the layout for hardware ECC case
+ * @mtd: MTD device structure
+ * @eccbytes: Number of ECC bytes per page
+ *
+ * Build the page_layout array for NAND page handling for hardware ECC
+ * handling basing on the nand_oobinfo structure supplied for the chip
+ */
+static int fill_autooob_layout(struct mtd_info *mtd)
+{
+ struct nand_chip *this = mtd->priv;
+ struct nand_oobinfo *oob = this->autooob;
+ int oobfreesize = 0;
+ int i = 1, res = 0;
+ int eccpos = 0, eccbytes = 0, cur = 0, oobcur = 0;
+
+ this->layout = kmalloc(HW_AUTOOOB_LAYOUT_SIZE * sizeof (struct page_layout_item), GFP_KERNEL);
+
+ if (this->layout == NULL)
+ return -ENOMEM;
+ else
+ this->layout_allocated = 1;
+
+ memset(this->layout, 0, HW_AUTOOOB_LAYOUT_SIZE * sizeof (struct page_layout_item));
+
+
+ this->layout[0].type = ITEM_TYPE_DATA;
+ this->layout[0].length = mtd->oobblock;
+ DEBUG (MTD_DEBUG_LEVEL3, "fill_autooob_layout: data type, length %d\n", this->layout[0].length);
+
+ while (i < HW_AUTOOOB_LAYOUT_SIZE / this->eccsteps - 1 && cur < mtd->oobsize / this->eccsteps) {
+ if (oob->oobfree[oobcur][0] == cur) {
+ int len = oob->oobfree[oobcur][1];
+ oobfreesize += this->layout[i].length;
+ oobcur++;
+ if (i > 0 && this->layout[i-1].type == ITEM_TYPE_OOB) {
+ i--;
+ cur -= this->layout[i].length;
+ this->layout[i].length += len;
+ DEBUG (MTD_DEBUG_LEVEL3, "fill_autooob_layout: oob concatenated, aggregate length %d\n", this->layout[i].length);
+ } else {
+ this->layout[i].type = ITEM_TYPE_OOB;
+ this->layout[i].length = len;
+ DEBUG (MTD_DEBUG_LEVEL3, "fill_autooob_layout: oob type, length %d\n", this->layout[i].length);
+ }
+ } else if (oob->eccpos[eccpos] == cur) {
+ int eccpos_cur = eccpos;
+ do {
+ eccpos++;
+ eccbytes++;
+ } while (eccbytes < oob->eccbytes && oob->eccpos[eccpos] == oob->eccpos[eccpos+1] - 1);
+ eccpos++;
+ eccbytes++;
+ this->layout[i].type = ITEM_TYPE_ECC;
+ this->layout[i].length = eccpos - eccpos_cur;
+ DEBUG (MTD_DEBUG_LEVEL3, "fill_autooob_layout: ecc type, length %d\n", this->layout[i].length);
+ } else {
+ int len = min_t(int, oob->eccpos[eccpos], oob->oobfree[oobcur][0]);
+ if (i > 0 && this->layout[i-1].type == ITEM_TYPE_OOB) {
+ i--;
+ cur -= this->layout[i].length;
+ this->layout[i].length += len;
+ DEBUG (MTD_DEBUG_LEVEL3, "fill_autooob_layout: oob concatenated, aggregate length %d\n", this->layout[i].length);
+ } else {
+ this->layout[i].type = ITEM_TYPE_OOB;
+ this->layout[i].length = len;
+ DEBUG (MTD_DEBUG_LEVEL3, "fill_autooob_layout: oob type, length %d\n", this->layout[i].length);
+ }
+ }
+ cur += this->layout[i].length;
+ i++;
+ }
+ if (cur < mtd->oobsize / this->eccsteps - 1) {
+ kfree(this->layout);
+ res = -1;
+ } else if (this->eccsteps > 1) {
+ int j = i;
+ i--;
+ while (j < this->eccsteps * i + 1) {
+ this->layout[j].length = this->layout[j-i].length;
+ this->layout[j].type = this->layout[j-i].type;
+ j++;
+ }
+ }
+ return res;
+}
+
+/**
+ * nand_scan - [NAND Interface] Scan for the NAND device
+ * @mtd: MTD device structure
+ * @maxchips: Number of chips to scan for
+ *
+ * This fills out all the not initialized function pointers
+ * with the defaults.
+ * The flash ID is read and the mtd/chip structures are
+ * filled with the appropriate values. Buffers are allocated if
+ * they are not provided by the board driver
+ *
+ */
+int nand_scan (struct mtd_info *mtd, int maxchips)
+{
+ int i, nand_maf_id, nand_dev_id, busw, maf_id;
+ struct nand_chip *this = mtd->priv;
+
+ /* Get buswidth to select the correct functions*/
+ busw = this->options & NAND_BUSWIDTH_16;
+
+ /* check for proper chip_delay setup, set 20us if not */
+ if (!this->chip_delay)
+ this->chip_delay = 20;
+
+ /* check, if a user supplied command function given */
+ if (this->cmdfunc == NULL)
+ this->cmdfunc = nand_command;
+
+ /* check, if a user supplied wait function given */
+ if (this->waitfunc == NULL)
+ this->waitfunc = nand_wait;
+
+ if (!this->select_chip)
+ this->select_chip = nand_select_chip;
+ if (!this->write_byte)
+ this->write_byte = busw ? nand_write_byte16 : nand_write_byte;
+ if (!this->read_byte)
+ this->read_byte = busw ? nand_read_byte16 : nand_read_byte;
+ if (!this->write_word)
+ this->write_word = nand_write_word;
+ if (!this->read_word)
+ this->read_word = nand_read_word;
+ if (!this->block_bad)
+ this->block_bad = nand_block_bad;
+ if (!this->block_markbad)
+ this->block_markbad = nand_default_block_markbad;
+ if (!this->write_buf)
+ this->write_buf = busw ? nand_write_buf16 : nand_write_buf;
+ if (!this->read_buf)
+ this->read_buf = busw ? nand_read_buf16 : nand_read_buf;
+ if (!this->verify_buf)
+ this->verify_buf = busw ? nand_verify_buf16 : nand_verify_buf;
+ if (!this->scan_bbt)
+ this->scan_bbt = nand_default_bbt;
+
+ /* 'ff' the ffchars */
+ memset(ffchars, 0xff, FFCHARS_SIZE);
+
+ /* Select the device */
+ this->select_chip(mtd, 0);
+
+ /* Send the command for reading device ID */
+ this->cmdfunc (mtd, NAND_CMD_READID, 0x00, -1);
+
+ /* Read manufacturer and device IDs */
+ nand_maf_id = this->read_byte(mtd);
+ nand_dev_id = this->read_byte(mtd);
+
+ /* Print and store flash device information */
+ for (i = 0; nand_flash_ids[i].name != NULL; i++) {
+
+ if (nand_dev_id != nand_flash_ids[i].id)
+ continue;
+
+ if (!mtd->name) mtd->name = nand_flash_ids[i].name;
+ this->chipsize = nand_flash_ids[i].chipsize << 20;
+
+ /* New devices have all the information in additional id bytes */
+ if (!nand_flash_ids[i].pagesize) {
+ int extid;
+ /* The 3rd id byte contains non relevant data ATM */
+ extid = this->read_byte(mtd);
+ /* The 4th id byte is the important one */
+ extid = this->read_byte(mtd);
+ /* Calc pagesize */
+ mtd->oobblock = 1024 << (extid & 0x3);
+ extid >>= 2;
+ /* Calc oobsize */
+ mtd->oobsize = (8 << (extid & 0x01)) * (mtd->oobblock >> 9);
+ extid >>= 2;
+ /* Calc blocksize. Blocksize is multiples of 64KiB */
+ mtd->erasesize = (64 * 1024) << (extid & 0x03);
+ extid >>= 2;
+ /* Get buswidth information */
+ busw = (extid & 0x01) ? NAND_BUSWIDTH_16 : 0;
+
+ } else {
+ /* Old devices have this data hardcoded in the
+ * device id table */
+ mtd->erasesize = nand_flash_ids[i].erasesize;
+ mtd->oobblock = nand_flash_ids[i].pagesize;
+ mtd->oobsize = mtd->oobblock / 32;
+ busw = nand_flash_ids[i].options & NAND_BUSWIDTH_16;
+ }
+
+ /* Try to identify manufacturer */
+ for (maf_id = 0; nand_manuf_ids[maf_id].id != 0x0; maf_id++) {
+ if (nand_manuf_ids[maf_id].id == nand_maf_id)
+ break;
+ }
+
+ /* Check, if buswidth is correct. Hardware drivers should set
+ * this correct ! */
+ if (busw != (this->options & NAND_BUSWIDTH_16)) {
+ printk (KERN_INFO "NAND device: Manufacturer ID:"
+ " 0x%02x, Chip ID: 0x%02x (%s %s)\n", nand_maf_id, nand_dev_id,
+ nand_manuf_ids[maf_id].name , mtd->name);
+ printk (KERN_WARNING
+ "NAND bus width %d instead %d bit\n",
+ (this->options & NAND_BUSWIDTH_16) ? 16 : 8,
+ busw ? 16 : 8);
+ this->select_chip(mtd, -1);
+ return 1;
+ }
+
+ /* Calculate the address shift from the page size */
+ this->page_shift = ffs(mtd->oobblock) - 1;
+ this->bbt_erase_shift = this->phys_erase_shift = ffs(mtd->erasesize) - 1;
+ this->chip_shift = ffs(this->chipsize) - 1;
+
+ /* Set the bad block position */
+ this->badblockpos = mtd->oobblock > 512 ?
+ NAND_LARGE_BADBLOCK_POS : NAND_SMALL_BADBLOCK_POS;
+
+ /* Get chip options, preserve non chip based options */
+ this->options &= ~NAND_CHIPOPTIONS_MSK;
+ this->options |= nand_flash_ids[i].options & NAND_CHIPOPTIONS_MSK;
+ /* Set this as a default. Board drivers can override it, if neccecary */
+ this->options |= NAND_NO_AUTOINCR;
+ /* Check if this is a not a samsung device. Do not clear the options
+ * for chips which are not having an extended id.
+ */
+ if (nand_maf_id != NAND_MFR_SAMSUNG && !nand_flash_ids[i].pagesize)
+ this->options &= ~NAND_SAMSUNG_LP_OPTIONS;
+
+ /* Check for AND chips with 4 page planes */
+ if (this->options & NAND_4PAGE_ARRAY)
+ this->erase_cmd = multi_erase_cmd;
+ else
+ this->erase_cmd = single_erase_cmd;
+
+ /* Do not replace user supplied command function ! */
+ if (mtd->oobblock > 512 && this->cmdfunc == nand_command)
+ this->cmdfunc = nand_command_lp;
+
+ printk (KERN_INFO "NAND device: Manufacturer ID:"
+ " 0x%02x, Chip ID: 0x%02x (%s %s)\n", nand_maf_id, nand_dev_id,
+ nand_manuf_ids[maf_id].name , nand_flash_ids[i].name);
+ break;
+ }
+
+ if (!nand_flash_ids[i].name) {
+ printk (KERN_WARNING "No NAND device found!!!\n");
+ this->select_chip(mtd, -1);
+ return 1;
+ }
+
+ for (i=1; i < maxchips; i++) {
+ this->select_chip(mtd, i);
+
+ /* Send the command for reading device ID */
+ this->cmdfunc (mtd, NAND_CMD_READID, 0x00, -1);
+
+ /* Read manufacturer and device IDs */
+ if (nand_maf_id != this->read_byte(mtd) ||
+ nand_dev_id != this->read_byte(mtd))
+ break;
+ }
+ if (i > 1)
+ printk(KERN_INFO "%d NAND chips detected\n", i);
+
+ /* Allocate buffers, if neccecary */
+ if (!this->oob_buf) {
+ size_t len;
+ len = mtd->oobsize << (this->phys_erase_shift - this->page_shift);
+ this->oob_buf = kmalloc (len, GFP_KERNEL);
+ if (!this->oob_buf) {
+ printk (KERN_ERR "nand_scan(): Cannot allocate oob_buf\n");
+ return -ENOMEM;
+ }
+ this->options |= NAND_OOBBUF_ALLOC;
+ }
+
+ if (!this->data_buf) {
+ size_t len;
+ len = mtd->oobblock + mtd->oobsize;
+ this->data_buf = kmalloc (len, GFP_KERNEL);
+ if (!this->data_buf) {
+ if (this->options & NAND_OOBBUF_ALLOC)
+ kfree (this->oob_buf);
+ printk (KERN_ERR "nand_scan(): Cannot allocate data_buf\n");
+ return -ENOMEM;
+ }
+ this->options |= NAND_DATABUF_ALLOC;
+ }
+
+ /* Store the number of chips and calc total size for mtd */
+ this->numchips = i;
+ mtd->size = i * this->chipsize;
+ /* Convert chipsize to number of pages per chip -1. */
+ this->pagemask = (this->chipsize >> this->page_shift) - 1;
+ /* Preset the internal oob buffer */
+ memset(this->oob_buf, 0xff, mtd->oobsize << (this->phys_erase_shift - this->page_shift));
+
+ /* If no default placement scheme is given, select an
+ * appropriate one */
+ if (!this->autooob) {
+ /* Select the appropriate default oob placement scheme for
+ * placement agnostic filesystems */
+ switch (mtd->oobsize) {
+ case 8:
+ this->autooob = &nand_oob_8;
+ break;
+ case 16:
+ this->autooob = &nand_oob_16;
+ break;
+ case 64:
+ this->autooob = &nand_oob_64;
+ break;
+ default:
+ printk (KERN_WARNING "No oob scheme defined for oobsize %d\n",
+ mtd->oobsize);
+ BUG();
+ }
+ }
+
+ /* The number of bytes available for the filesystem to place fs dependend
+ * oob data */
+ mtd->oobavail = 0;
+ for (i = 0; this->autooob->oobfree[i][1]; i++)
+ mtd->oobavail += this->autooob->oobfree[i][1];
+
+ /*
+ * check ECC mode, default to software
+ * if 3byte/512byte hardware ECC is selected and we have 256 byte pagesize
+ * fallback to software ECC
+ */
+ this->eccsize = 256; /* set default eccsize */
+ this->eccbytes = 3;
+
+ switch (this->eccmode) {
+ case NAND_ECC_HW12_2048:
+ if (mtd->oobblock < 2048) {
+ printk(KERN_WARNING "2048 byte HW ECC not possible on %d byte page size, fallback to SW ECC\n",
+ mtd->oobblock);
+ this->eccmode = NAND_ECC_SOFT;
+ this->calculate_ecc = nand_calculate_ecc;
+ this->correct_data = nand_correct_data;
+ } else
+ this->eccsize = 2048;
+ break;
+
+ case NAND_ECC_HW3_512:
+ case NAND_ECC_HW6_512:
+ case NAND_ECC_HW8_512:
+ if (mtd->oobblock == 256) {
+ printk (KERN_WARNING "512 byte HW ECC not possible on 256 Byte pagesize, fallback to SW ECC \n");
+ this->eccmode = NAND_ECC_SOFT;
+ this->calculate_ecc = nand_calculate_ecc;
+ this->correct_data = nand_correct_data;
+ } else
+ this->eccsize = 512; /* set eccsize to 512 */
+ break;
+
+ case NAND_ECC_HW3_256:
+ break;
+
+ case NAND_ECC_NONE:
+ printk (KERN_WARNING "NAND_ECC_NONE selected by board driver. This is not recommended !!\n");
+ this->eccmode = NAND_ECC_NONE;
+ break;
+
+ case NAND_ECC_SOFT:
+ this->calculate_ecc = nand_calculate_ecc;
+ this->correct_data = nand_correct_data;
+ break;
+
+ default:
+ printk (KERN_WARNING "Invalid NAND_ECC_MODE %d\n", this->eccmode);
+ BUG();
+ }
+
+ /* Check hardware ecc function availability and adjust number of ecc bytes per
+ * calculation step
+ */
+ switch (this->eccmode) {
+ case NAND_ECC_HW12_2048:
+ this->eccbytes += 4;
+ case NAND_ECC_HW8_512:
+ this->eccbytes += 2;
+ case NAND_ECC_HW6_512:
+ this->eccbytes += 3;
+ case NAND_ECC_HW3_512:
+ case NAND_ECC_HW3_256:
+ if (this->calculate_ecc && this->correct_data && this->enable_hwecc)
+ break;
+ printk (KERN_WARNING "No ECC functions supplied, Hardware ECC not possible\n");
+ BUG();
+ }
+
+ mtd->eccsize = this->eccsize;
+
+ /* Set the number of read / write steps for one page to ensure ECC generation */
+ switch (this->eccmode) {
+ case NAND_ECC_HW12_2048:
+ this->eccsteps = mtd->oobblock / 2048;
+ break;
+ case NAND_ECC_HW3_512:
+ case NAND_ECC_HW6_512:
+ case NAND_ECC_HW8_512:
+ this->eccsteps = mtd->oobblock / 512;
+ break;
+ case NAND_ECC_HW3_256:
+ case NAND_ECC_SOFT:
+ this->eccsteps = mtd->oobblock / 256;
+ break;
+
+ case NAND_ECC_NONE:
+ this->eccsteps = 1;
+ break;
+ }
+
+ mtd->eccsize = this->eccsize;
+
+ /* We consider only layout allocation performed in nand_base */
+ this->layout_allocated = 0;
+ if (!this->layout && this->autooob)
+ if (fill_autooob_layout(mtd) < 0)
+ BUG();
+
+ /* Initialize state, waitqueue and spinlock */
+ this->state = FL_READY;
+ init_waitqueue_head (&this->wq);
+ spin_lock_init (&this->chip_lock);
+
+ /* De-select the device */
+ this->select_chip(mtd, -1);
+
+ /* Invalidate the pagebuffer reference */
+ this->pagebuf = -1;
+
+ /* Fill in remaining MTD driver data */
+ mtd->type = MTD_NANDFLASH;
+ mtd->flags = MTD_CAP_NANDFLASH | MTD_ECC;
+ mtd->ecctype = MTD_ECC_SW;
+ mtd->erase = nand_erase;
+ mtd->point = NULL;
+ mtd->unpoint = NULL;
+ mtd->read = nand_read;
+ mtd->write = nand_write;
+ mtd->read_ecc = nand_read_ecc;
+ mtd->write_ecc = nand_write_ecc;
+ mtd->read_oob = nand_read_oob;
+ mtd->write_oob = nand_write_oob;
+ mtd->readv = NULL;
+ mtd->writev = nand_writev;
+ mtd->writev_ecc = nand_writev_ecc;
+ mtd->sync = nand_sync;
+ mtd->lock = NULL;
+ mtd->unlock = NULL;
+ mtd->suspend = nand_suspend;
+ mtd->resume = nand_resume;
+ mtd->block_isbad = nand_block_isbad;
+ mtd->block_markbad = nand_block_markbad;
+
+ /* and make the autooob the default one */
+ memcpy(&mtd->oobinfo, this->autooob, sizeof(mtd->oobinfo));
+
+ mtd->owner = THIS_MODULE;
+
+ /* Check, if we should skip the bad block table scan */
+ if (this->options & NAND_SKIP_BBTSCAN)
+ return 0;
+
+ /* Build bad block table */
+ return this->scan_bbt (mtd);
+}
+
+/**
+ * nand_release - [NAND Interface] Free resources held by the NAND device
+ * @mtd: MTD device structure
+*/
+void nand_release (struct mtd_info *mtd)
+{
+ struct nand_chip *this = mtd->priv;
+
+#ifdef CONFIG_MTD_PARTITIONS
+ /* Deregister partitions */
+ del_mtd_partitions (mtd);
+#endif
+ /* Deregister the device */
+ del_mtd_device (mtd);
+
+ /* Free bad block table memory, if allocated */
+ if (this->bbt)
+ kfree (this->bbt);
+ /* Buffer allocated by nand_scan ? */
+ if (this->options & NAND_OOBBUF_ALLOC)
+ kfree (this->oob_buf);
+ /* Buffer allocated by nand_scan ? */
+ if (this->options & NAND_DATABUF_ALLOC)
+ kfree (this->data_buf);
+ /* Free layout array if it was allocated by fill_autooob_layout */
+ if (this->layout_allocated)
+ kfree(this->layout);
+}
+
+EXPORT_SYMBOL_GPL (nand_scan);
+EXPORT_SYMBOL_GPL (nand_release);
+
+MODULE_LICENSE ("GPL");
+MODULE_AUTHOR ("Steven J. Hill <sjhill@realitydiluted.com>, Thomas Gleixner <tglx@linutronix.de>");
+MODULE_DESCRIPTION ("Generic NAND flash driver code");
diff --git a/linux-2.4.x/drivers/mtd/nand/nand_bbt.c b/linux-2.4.x/drivers/mtd/nand/nand_bbt.c
new file mode 100644
index 0000000..ca28699
--- /dev/null
+++ b/linux-2.4.x/drivers/mtd/nand/nand_bbt.c
@@ -0,0 +1,1112 @@
+/*
+ * drivers/mtd/nand_bbt.c
+ *
+ * Overview:
+ * Bad block table support for the NAND driver
+ *
+ * Copyright (C) 2004 Thomas Gleixner (tglx@linutronix.de)
+ *
+ * $Id: nand_bbt.c,v 1.36 2005/11/07 11:14:30 gleixner Exp $
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Description:
+ *
+ * When nand_scan_bbt is called, then it tries to find the bad block table
+ * depending on the options in the bbt descriptor(s). If a bbt is found
+ * then the contents are read and the memory based bbt is created. If a
+ * mirrored bbt is selected then the mirror is searched too and the
+ * versions are compared. If the mirror has a greater version number
+ * than the mirror bbt is used to build the memory based bbt.
+ * If the tables are not versioned, then we "or" the bad block information.
+ * If one of the bbt's is out of date or does not exist it is (re)created.
+ * If no bbt exists at all then the device is scanned for factory marked
+ * good / bad blocks and the bad block tables are created.
+ *
+ * For manufacturer created bbts like the one found on M-SYS DOC devices
+ * the bbt is searched and read but never created
+ *
+ * The autogenerated bad block table is located in the last good blocks
+ * of the device. The table is mirrored, so it can be updated eventually.
+ * The table is marked in the oob area with an ident pattern and a version
+ * number which indicates which of both tables is more up to date.
+ *
+ * The table uses 2 bits per block
+ * 11b: block is good
+ * 00b: block is factory marked bad
+ * 01b, 10b: block is marked bad due to wear
+ *
+ * The memory bad block table uses the following scheme:
+ * 00b: block is good
+ * 01b: block is marked bad due to wear
+ * 10b: block is reserved (to protect the bbt area)
+ * 11b: block is factory marked bad
+ *
+ * Multichip devices like DOC store the bad block info per floor.
+ *
+ * Following assumptions are made:
+ * - bbts start at a page boundary, if autolocated on a block boundary
+ * - the space neccecary for a bbt in FLASH does not exceed a block boundary
+ *
+ */
+
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/nand_ecc.h>
+#include <linux/mtd/compatmac.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
+
+
+/**
+ * check_pattern - [GENERIC] check if a pattern is in the buffer
+ * @buf: the buffer to search
+ * @len: the length of buffer to search
+ * @paglen: the pagelength
+ * @td: search pattern descriptor
+ *
+ * Check for a pattern at the given place. Used to search bad block
+ * tables and good / bad block identifiers.
+ * If the SCAN_EMPTY option is set then check, if all bytes except the
+ * pattern area contain 0xff
+ *
+*/
+static int check_pattern (uint8_t *buf, int len, int paglen, struct nand_bbt_descr *td)
+{
+ int i, end = 0;
+ uint8_t *p = buf;
+
+ end = paglen + td->offs;
+ if (td->options & NAND_BBT_SCANEMPTY) {
+ for (i = 0; i < end; i++) {
+ if (p[i] != 0xff)
+ return -1;
+ }
+ }
+ p += end;
+
+ /* Compare the pattern */
+ for (i = 0; i < td->len; i++) {
+ if (p[i] != td->pattern[i])
+ return -1;
+ }
+
+ if (td->options & NAND_BBT_SCANEMPTY) {
+ p += td->len;
+ end += td->len;
+ for (i = end; i < len; i++) {
+ if (*p++ != 0xff)
+ return -1;
+ }
+ }
+ return 0;
+}
+
+/**
+ * check_short_pattern - [GENERIC] check if a pattern is in the buffer
+ * @buf: the buffer to search
+ * @td: search pattern descriptor
+ *
+ * Check for a pattern at the given place. Used to search bad block
+ * tables and good / bad block identifiers. Same as check_pattern, but
+ * no optional empty check
+ *
+*/
+static int check_short_pattern (uint8_t *buf, struct nand_bbt_descr *td)
+{
+ int i;
+ uint8_t *p = buf;
+
+ /* Compare the pattern */
+ for (i = 0; i < td->len; i++) {
+ if (p[td->offs + i] != td->pattern[i])
+ return -1;
+ }
+ return 0;
+}
+
+/**
+ * read_bbt - [GENERIC] Read the bad block table starting from page
+ * @mtd: MTD device structure
+ * @buf: temporary buffer
+ * @page: the starting page
+ * @num: the number of bbt descriptors to read
+ * @bits: number of bits per block
+ * @offs: offset in the memory table
+ * @reserved_block_code: Pattern to identify reserved blocks
+ *
+ * Read the bad block table starting from page.
+ *
+ */
+static int read_bbt (struct mtd_info *mtd, uint8_t *buf, int page, int num,
+ int bits, int offs, int reserved_block_code)
+{
+ int res, i, j, act = 0;
+ struct nand_chip *this = mtd->priv;
+ size_t retlen, len, totlen;
+ loff_t from;
+ uint8_t msk = (uint8_t) ((1 << bits) - 1);
+
+ totlen = (num * bits) >> 3;
+ from = ((loff_t)page) << this->page_shift;
+
+ while (totlen) {
+ len = min (totlen, (size_t) (1 << this->bbt_erase_shift));
+ res = mtd->read_ecc (mtd, from, len, &retlen, buf, NULL, this->autooob);
+ if (res < 0) {
+ if (retlen != len) {
+ printk (KERN_INFO "nand_bbt: Error reading bad block table\n");
+ return res;
+ }
+ printk (KERN_WARNING "nand_bbt: ECC error while reading bad block table\n");
+ }
+
+ /* Analyse data */
+ for (i = 0; i < len; i++) {
+ uint8_t dat = buf[i];
+ for (j = 0; j < 8; j += bits, act += 2) {
+ uint8_t tmp = (dat >> j) & msk;
+ if (tmp == msk)
+ continue;
+ if (reserved_block_code &&
+ (tmp == reserved_block_code)) {
+ printk (KERN_DEBUG "nand_read_bbt: Reserved block at 0x%08x\n",
+ ((offs << 2) + (act >> 1)) << this->bbt_erase_shift);
+ this->bbt[offs + (act >> 3)] |= 0x2 << (act & 0x06);
+ continue;
+ }
+ /* Leave it for now, if its matured we can move this
+ * message to MTD_DEBUG_LEVEL0 */
+ printk (KERN_DEBUG "nand_read_bbt: Bad block at 0x%08x\n",
+ ((offs << 2) + (act >> 1)) << this->bbt_erase_shift);
+ /* Factory marked bad or worn out ? */
+ if (tmp == 0)
+ this->bbt[offs + (act >> 3)] |= 0x3 << (act & 0x06);
+ else
+ this->bbt[offs + (act >> 3)] |= 0x1 << (act & 0x06);
+ }
+ }
+ totlen -= len;
+ from += len;
+ }
+ return 0;
+}
+
+/**
+ * read_abs_bbt - [GENERIC] Read the bad block table starting at a given page
+ * @mtd: MTD device structure
+ * @buf: temporary buffer
+ * @td: descriptor for the bad block table
+ * @chip: read the table for a specific chip, -1 read all chips.
+ * Applies only if NAND_BBT_PERCHIP option is set
+ *
+ * Read the bad block table for all chips starting at a given page
+ * We assume that the bbt bits are in consecutive order.
+*/
+static int read_abs_bbt (struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr *td, int chip)
+{
+ struct nand_chip *this = mtd->priv;
+ int res = 0, i;
+ int bits;
+
+ bits = td->options & NAND_BBT_NRBITS_MSK;
+ if (td->options & NAND_BBT_PERCHIP) {
+ int offs = 0;
+ for (i = 0; i < this->numchips; i++) {
+ if (chip == -1 || chip == i)
+ res = read_bbt (mtd, buf, td->pages[i], this->chipsize >> this->bbt_erase_shift, bits, offs, td->reserved_block_code);
+ if (res)
+ return res;
+ offs += this->chipsize >> (this->bbt_erase_shift + 2);
+ }
+ } else {
+ res = read_bbt (mtd, buf, td->pages[0], mtd->size >> this->bbt_erase_shift, bits, 0, td->reserved_block_code);
+ if (res)
+ return res;
+ }
+ return 0;
+}
+
+/**
+ * read_abs_bbts - [GENERIC] Read the bad block table(s) for all chips starting at a given page
+ * @mtd: MTD device structure
+ * @buf: temporary buffer
+ * @td: descriptor for the bad block table
+ * @md: descriptor for the bad block table mirror
+ *
+ * Read the bad block table(s) for all chips starting at a given page
+ * We assume that the bbt bits are in consecutive order.
+ *
+*/
+static int read_abs_bbts (struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr *td,
+ struct nand_bbt_descr *md)
+{
+ struct nand_chip *this = mtd->priv;
+
+ /* Read the primary version, if available */
+ if (td->options & NAND_BBT_VERSION) {
+ nand_read_raw (mtd, buf, td->pages[0] << this->page_shift, mtd->oobblock, mtd->oobsize);
+ td->version[0] = buf[mtd->oobblock + td->veroffs];
+ printk (KERN_DEBUG "Bad block table at page %d, version 0x%02X\n", td->pages[0], td->version[0]);
+ }
+
+ /* Read the mirror version, if available */
+ if (md && (md->options & NAND_BBT_VERSION)) {
+ nand_read_raw (mtd, buf, md->pages[0] << this->page_shift, mtd->oobblock, mtd->oobsize);
+ md->version[0] = buf[mtd->oobblock + md->veroffs];
+ printk (KERN_DEBUG "Bad block table at page %d, version 0x%02X\n", md->pages[0], md->version[0]);
+ }
+
+ return 1;
+}
+
+/**
+ * create_bbt - [GENERIC] Create a bad block table by scanning the device
+ * @mtd: MTD device structure
+ * @buf: temporary buffer
+ * @bd: descriptor for the good/bad block search pattern
+ * @chip: create the table for a specific chip, -1 read all chips.
+ * Applies only if NAND_BBT_PERCHIP option is set
+ *
+ * Create a bad block table by scanning the device
+ * for the given good/bad block identify pattern
+ */
+static int create_bbt (struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr *bd, int chip)
+{
+ struct nand_chip *this = mtd->priv;
+ int i, j, numblocks, len, scanlen;
+ int startblock;
+ loff_t from;
+ size_t readlen, ooblen;
+
+ printk (KERN_INFO "Scanning device for bad blocks\n");
+
+ if (bd->options & NAND_BBT_SCANALLPAGES)
+ len = 1 << (this->bbt_erase_shift - this->page_shift);
+ else {
+ if (bd->options & NAND_BBT_SCAN2NDPAGE)
+ len = 2;
+ else
+ len = 1;
+ }
+
+ if (!(bd->options & NAND_BBT_SCANEMPTY)) {
+ /* We need only read few bytes from the OOB area */
+ scanlen = ooblen = 0;
+ readlen = bd->len;
+ } else {
+ /* Full page content should be read */
+ scanlen = mtd->oobblock + mtd->oobsize;
+ readlen = len * mtd->oobblock;
+ ooblen = len * mtd->oobsize;
+ }
+
+ if (chip == -1) {
+ /* Note that numblocks is 2 * (real numblocks) here, see i+=2 below as it
+ * makes shifting and masking less painful */
+ numblocks = mtd->size >> (this->bbt_erase_shift - 1);
+ startblock = 0;
+ from = 0;
+ } else {
+ if (chip >= this->numchips) {
+ printk (KERN_WARNING "create_bbt(): chipnr (%d) > available chips (%d)\n",
+ chip + 1, this->numchips);
+ return -EINVAL;
+ }
+ numblocks = this->chipsize >> (this->bbt_erase_shift - 1);
+ startblock = chip * numblocks;
+ numblocks += startblock;
+ from = startblock << (this->bbt_erase_shift - 1);
+ }
+
+ for (i = startblock; i < numblocks;) {
+ int ret;
+
+ if (bd->options & NAND_BBT_SCANEMPTY)
+ if ((ret = nand_read_raw (mtd, buf, from, readlen, ooblen)))
+ return ret;
+
+ for (j = 0; j < len; j++) {
+ if (!(bd->options & NAND_BBT_SCANEMPTY)) {
+ size_t retlen;
+
+ /* Read the full oob until read_oob is fixed to
+ * handle single byte reads for 16 bit buswidth */
+ ret = mtd->read_oob(mtd, from + j * mtd->oobblock,
+ mtd->oobsize, &retlen, buf);
+ if (ret)
+ return ret;
+
+ if (check_short_pattern (buf, bd)) {
+ this->bbt[i >> 3] |= 0x03 << (i & 0x6);
+ printk (KERN_WARNING "Bad eraseblock %d at 0x%08x\n",
+ i >> 1, (unsigned int) from);
+ break;
+ }
+ } else {
+ if (check_pattern (&buf[j * scanlen], scanlen, mtd->oobblock, bd)) {
+ this->bbt[i >> 3] |= 0x03 << (i & 0x6);
+ printk (KERN_WARNING "Bad eraseblock %d at 0x%08x\n",
+ i >> 1, (unsigned int) from);
+ break;
+ }
+ }
+ }
+ i += 2;
+ from += (1 << this->bbt_erase_shift);
+ }
+ return 0;
+}
+
+/**
+ * search_bbt - [GENERIC] scan the device for a specific bad block table
+ * @mtd: MTD device structure
+ * @buf: temporary buffer
+ * @td: descriptor for the bad block table
+ *
+ * Read the bad block table by searching for a given ident pattern.
+ * Search is preformed either from the beginning up or from the end of
+ * the device downwards. The search starts always at the start of a
+ * block.
+ * If the option NAND_BBT_PERCHIP is given, each chip is searched
+ * for a bbt, which contains the bad block information of this chip.
+ * This is neccecary to provide support for certain DOC devices.
+ *
+ * The bbt ident pattern resides in the oob area of the first page
+ * in a block.
+ */
+static int search_bbt (struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr *td)
+{
+ struct nand_chip *this = mtd->priv;
+ int i, chips;
+ int bits, startblock, block, dir;
+ int scanlen = mtd->oobblock + mtd->oobsize;
+ int bbtblocks;
+
+ /* Search direction top -> down ? */
+ if (td->options & NAND_BBT_LASTBLOCK) {
+ startblock = (mtd->size >> this->bbt_erase_shift) -1;
+ dir = -1;
+ } else {
+ startblock = 0;
+ dir = 1;
+ }
+
+ /* Do we have a bbt per chip ? */
+ if (td->options & NAND_BBT_PERCHIP) {
+ chips = this->numchips;
+ bbtblocks = this->chipsize >> this->bbt_erase_shift;
+ startblock &= bbtblocks - 1;
+ } else {
+ chips = 1;
+ bbtblocks = mtd->size >> this->bbt_erase_shift;
+ }
+
+ /* Number of bits for each erase block in the bbt */
+ bits = td->options & NAND_BBT_NRBITS_MSK;
+
+ for (i = 0; i < chips; i++) {
+ /* Reset version information */
+ td->version[i] = 0;
+ td->pages[i] = -1;
+ /* Scan the maximum number of blocks */
+ for (block = 0; block < td->maxblocks; block++) {
+ int actblock = startblock + dir * block;
+ /* Read first page */
+ nand_read_raw (mtd, buf, actblock << this->bbt_erase_shift, mtd->oobblock, mtd->oobsize);
+ if (!check_pattern(buf, scanlen, mtd->oobblock, td)) {
+ td->pages[i] = actblock << (this->bbt_erase_shift - this->page_shift);
+ if (td->options & NAND_BBT_VERSION) {
+ td->version[i] = buf[mtd->oobblock + td->veroffs];
+ }
+ break;
+ }
+ }
+ startblock += this->chipsize >> this->bbt_erase_shift;
+ }
+ /* Check, if we found a bbt for each requested chip */
+ for (i = 0; i < chips; i++) {
+ if (td->pages[i] == -1)
+ printk (KERN_WARNING "Bad block table not found for chip %d\n", i);
+ else
+ printk (KERN_DEBUG "Bad block table found at page %d, version 0x%02X\n", td->pages[i], td->version[i]);
+ }
+ return 0;
+}
+
+/**
+ * search_read_bbts - [GENERIC] scan the device for bad block table(s)
+ * @mtd: MTD device structure
+ * @buf: temporary buffer
+ * @td: descriptor for the bad block table
+ * @md: descriptor for the bad block table mirror
+ *
+ * Search and read the bad block table(s)
+*/
+static int search_read_bbts (struct mtd_info *mtd, uint8_t *buf,
+ struct nand_bbt_descr *td, struct nand_bbt_descr *md)
+{
+ /* Search the primary table */
+ search_bbt (mtd, buf, td);
+
+ /* Search the mirror table */
+ if (md)
+ search_bbt (mtd, buf, md);
+
+ /* Force result check */
+ return 1;
+}
+
+
+/**
+ * write_bbt - [GENERIC] (Re)write the bad block table
+ *
+ * @mtd: MTD device structure
+ * @buf: temporary buffer
+ * @td: descriptor for the bad block table
+ * @md: descriptor for the bad block table mirror
+ * @chipsel: selector for a specific chip, -1 for all
+ *
+ * (Re)write the bad block table
+ *
+*/
+static int write_bbt (struct mtd_info *mtd, uint8_t *buf,
+ struct nand_bbt_descr *td, struct nand_bbt_descr *md, int chipsel)
+{
+ struct nand_chip *this = mtd->priv;
+ struct nand_oobinfo oobinfo;
+ struct erase_info einfo;
+ int i, j, res, chip = 0;
+ int bits, startblock, dir, page, offs, numblocks, sft, sftmsk;
+ int nrchips, bbtoffs, pageoffs;
+ uint8_t msk[4];
+ uint8_t rcode = td->reserved_block_code;
+ size_t retlen, len = 0;
+ loff_t to;
+
+ if (!rcode)
+ rcode = 0xff;
+ /* Write bad block table per chip rather than per device ? */
+ if (td->options & NAND_BBT_PERCHIP) {
+ numblocks = (int) (this->chipsize >> this->bbt_erase_shift);
+ /* Full device write or specific chip ? */
+ if (chipsel == -1) {
+ nrchips = this->numchips;
+ } else {
+ nrchips = chipsel + 1;
+ chip = chipsel;
+ }
+ } else {
+ numblocks = (int) (mtd->size >> this->bbt_erase_shift);
+ nrchips = 1;
+ }
+
+ /* Loop through the chips */
+ for (; chip < nrchips; chip++) {
+
+ /* There was already a version of the table, reuse the page
+ * This applies for absolute placement too, as we have the
+ * page nr. in td->pages.
+ */
+ if (td->pages[chip] != -1) {
+ page = td->pages[chip];
+ goto write;
+ }
+
+ /* Automatic placement of the bad block table */
+ /* Search direction top -> down ? */
+ if (td->options & NAND_BBT_LASTBLOCK) {
+ startblock = numblocks * (chip + 1) - 1;
+ dir = -1;
+ } else {
+ startblock = chip * numblocks;
+ dir = 1;
+ }
+
+ for (i = 0; i < td->maxblocks; i++) {
+ int block = startblock + dir * i;
+ /* Check, if the block is bad */
+ switch ((this->bbt[block >> 2] >> (2 * (block & 0x03))) & 0x03) {
+ case 0x01:
+ case 0x03:
+ continue;
+ }
+ page = block << (this->bbt_erase_shift - this->page_shift);
+ /* Check, if the block is used by the mirror table */
+ if (!md || md->pages[chip] != page)
+ goto write;
+ }
+ printk (KERN_ERR "No space left to write bad block table\n");
+ return -ENOSPC;
+write:
+
+ /* Set up shift count and masks for the flash table */
+ bits = td->options & NAND_BBT_NRBITS_MSK;
+ switch (bits) {
+ case 1: sft = 3; sftmsk = 0x07; msk[0] = 0x00; msk[1] = 0x01; msk[2] = ~rcode; msk[3] = 0x01; break;
+ case 2: sft = 2; sftmsk = 0x06; msk[0] = 0x00; msk[1] = 0x01; msk[2] = ~rcode; msk[3] = 0x03; break;
+ case 4: sft = 1; sftmsk = 0x04; msk[0] = 0x00; msk[1] = 0x0C; msk[2] = ~rcode; msk[3] = 0x0f; break;
+ case 8: sft = 0; sftmsk = 0x00; msk[0] = 0x00; msk[1] = 0x0F; msk[2] = ~rcode; msk[3] = 0xff; break;
+ default: return -EINVAL;
+ }
+
+ bbtoffs = chip * (numblocks >> 2);
+
+ to = ((loff_t) page) << this->page_shift;
+
+ memcpy (&oobinfo, this->autooob, sizeof(oobinfo));
+ oobinfo.useecc = MTD_NANDECC_PLACEONLY;
+
+ /* Must we save the block contents ? */
+ if (td->options & NAND_BBT_SAVECONTENT) {
+ /* Make it block aligned */
+ to &= ~((loff_t) ((1 << this->bbt_erase_shift) - 1));
+ len = 1 << this->bbt_erase_shift;
+ res = mtd->read_ecc (mtd, to, len, &retlen, buf, &buf[len], &oobinfo);
+ if (res < 0) {
+ if (retlen != len) {
+ printk (KERN_INFO "nand_bbt: Error reading block for writing the bad block table\n");
+ return res;
+ }
+ printk (KERN_WARNING "nand_bbt: ECC error while reading block for writing bad block table\n");
+ }
+ /* Calc the byte offset in the buffer */
+ pageoffs = page - (int)(to >> this->page_shift);
+ offs = pageoffs << this->page_shift;
+ /* Preset the bbt area with 0xff */
+ memset (&buf[offs], 0xff, (size_t)(numblocks >> sft));
+ /* Preset the bbt's oob area with 0xff */
+ memset (&buf[len + pageoffs * mtd->oobsize], 0xff,
+ ((len >> this->page_shift) - pageoffs) * mtd->oobsize);
+ if (td->options & NAND_BBT_VERSION) {
+ buf[len + (pageoffs * mtd->oobsize) + td->veroffs] = td->version[chip];
+ }
+ } else {
+ /* Calc length */
+ len = (size_t) (numblocks >> sft);
+ /* Make it page aligned ! */
+ len = (len + (mtd->oobblock-1)) & ~(mtd->oobblock-1);
+ /* Preset the buffer with 0xff */
+ memset (buf, 0xff, len + (len >> this->page_shift) * mtd->oobsize);
+ offs = 0;
+ /* Pattern is located in oob area of first page */
+ memcpy (&buf[len + td->offs], td->pattern, td->len);
+ if (td->options & NAND_BBT_VERSION) {
+ buf[len + td->veroffs] = td->version[chip];
+ }
+ }
+
+ /* walk through the memory table */
+ for (i = 0; i < numblocks; ) {
+ uint8_t dat;
+ dat = this->bbt[bbtoffs + (i >> 2)];
+ for (j = 0; j < 4; j++ , i++) {
+ int sftcnt = (i << (3 - sft)) & sftmsk;
+ /* Do not store the reserved bbt blocks ! */
+ buf[offs + (i >> sft)] &= ~(msk[dat & 0x03] << sftcnt);
+ dat >>= 2;
+ }
+ }
+
+ memset (&einfo, 0, sizeof (einfo));
+ einfo.mtd = mtd;
+ einfo.addr = (unsigned long) to;
+ einfo.len = 1 << this->bbt_erase_shift;
+ res = nand_erase_nand (mtd, &einfo, 1);
+ if (res < 0) {
+ printk (KERN_WARNING "nand_bbt: Error during block erase: %d\n", res);
+ return res;
+ }
+
+ res = mtd->write_ecc (mtd, to, len, &retlen, buf, &buf[len], &oobinfo);
+ if (res < 0) {
+ printk (KERN_WARNING "nand_bbt: Error while writing bad block table %d\n", res);
+ return res;
+ }
+ printk (KERN_DEBUG "Bad block table written to 0x%08x, version 0x%02X\n",
+ (unsigned int) to, td->version[chip]);
+
+ /* Mark it as used */
+ td->pages[chip] = page;
+ }
+ return 0;
+}
+
+/**
+ * nand_memory_bbt - [GENERIC] create a memory based bad block table
+ * @mtd: MTD device structure
+ * @bd: descriptor for the good/bad block search pattern
+ *
+ * The function creates a memory based bbt by scanning the device
+ * for manufacturer / software marked good / bad blocks
+*/
+static inline int nand_memory_bbt (struct mtd_info *mtd, struct nand_bbt_descr *bd)
+{
+ struct nand_chip *this = mtd->priv;
+
+ bd->options &= ~NAND_BBT_SCANEMPTY;
+ return create_bbt (mtd, this->data_buf, bd, -1);
+}
+
+/**
+ * check_create - [GENERIC] create and write bbt(s) if neccecary
+ * @mtd: MTD device structure
+ * @buf: temporary buffer
+ * @bd: descriptor for the good/bad block search pattern
+ *
+ * The function checks the results of the previous call to read_bbt
+ * and creates / updates the bbt(s) if neccecary
+ * Creation is neccecary if no bbt was found for the chip/device
+ * Update is neccecary if one of the tables is missing or the
+ * version nr. of one table is less than the other
+*/
+static int check_create (struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr *bd)
+{
+ int i, chips, writeops, chipsel, res;
+ struct nand_chip *this = mtd->priv;
+ struct nand_bbt_descr *td = this->bbt_td;
+ struct nand_bbt_descr *md = this->bbt_md;
+ struct nand_bbt_descr *rd, *rd2;
+
+ /* Do we have a bbt per chip ? */
+ if (td->options & NAND_BBT_PERCHIP)
+ chips = this->numchips;
+ else
+ chips = 1;
+
+ for (i = 0; i < chips; i++) {
+ writeops = 0;
+ rd = NULL;
+ rd2 = NULL;
+ /* Per chip or per device ? */
+ chipsel = (td->options & NAND_BBT_PERCHIP) ? i : -1;
+ /* Mirrored table avilable ? */
+ if (md) {
+ if (td->pages[i] == -1 && md->pages[i] == -1) {
+ writeops = 0x03;
+ goto create;
+ }
+
+ if (td->pages[i] == -1) {
+ rd = md;
+ td->version[i] = md->version[i];
+ writeops = 1;
+ goto writecheck;
+ }
+
+ if (md->pages[i] == -1) {
+ rd = td;
+ md->version[i] = td->version[i];
+ writeops = 2;
+ goto writecheck;
+ }
+
+ if (td->version[i] == md->version[i]) {
+ rd = td;
+ if (!(td->options & NAND_BBT_VERSION))
+ rd2 = md;
+ goto writecheck;
+ }
+
+ if (((int8_t) (td->version[i] - md->version[i])) > 0) {
+ rd = td;
+ md->version[i] = td->version[i];
+ writeops = 2;
+ } else {
+ rd = md;
+ td->version[i] = md->version[i];
+ writeops = 1;
+ }
+
+ goto writecheck;
+
+ } else {
+ if (td->pages[i] == -1) {
+ writeops = 0x01;
+ goto create;
+ }
+ rd = td;
+ goto writecheck;
+ }
+create:
+ /* Create the bad block table by scanning the device ? */
+ if (!(td->options & NAND_BBT_CREATE))
+ continue;
+
+ /* Create the table in memory by scanning the chip(s) */
+ create_bbt (mtd, buf, bd, chipsel);
+
+ td->version[i] = 1;
+ if (md)
+ md->version[i] = 1;
+writecheck:
+ /* read back first ? */
+ if (rd)
+ read_abs_bbt (mtd, buf, rd, chipsel);
+ /* If they weren't versioned, read both. */
+ if (rd2)
+ read_abs_bbt (mtd, buf, rd2, chipsel);
+
+ /* Write the bad block table to the device ? */
+ if ((writeops & 0x01) && (td->options & NAND_BBT_WRITE)) {
+ res = write_bbt (mtd, buf, td, md, chipsel);
+ if (res < 0)
+ return res;
+ }
+
+ /* Write the mirror bad block table to the device ? */
+ if ((writeops & 0x02) && md && (md->options & NAND_BBT_WRITE)) {
+ res = write_bbt (mtd, buf, md, td, chipsel);
+ if (res < 0)
+ return res;
+ }
+ }
+ return 0;
+}
+
+/**
+ * mark_bbt_regions - [GENERIC] mark the bad block table regions
+ * @mtd: MTD device structure
+ * @td: bad block table descriptor
+ *
+ * The bad block table regions are marked as "bad" to prevent
+ * accidental erasures / writes. The regions are identified by
+ * the mark 0x02.
+*/
+static void mark_bbt_region (struct mtd_info *mtd, struct nand_bbt_descr *td)
+{
+ struct nand_chip *this = mtd->priv;
+ int i, j, chips, block, nrblocks, update;
+ uint8_t oldval, newval;
+
+ /* Do we have a bbt per chip ? */
+ if (td->options & NAND_BBT_PERCHIP) {
+ chips = this->numchips;
+ nrblocks = (int)(this->chipsize >> this->bbt_erase_shift);
+ } else {
+ chips = 1;
+ nrblocks = (int)(mtd->size >> this->bbt_erase_shift);
+ }
+
+ for (i = 0; i < chips; i++) {
+ if ((td->options & NAND_BBT_ABSPAGE) ||
+ !(td->options & NAND_BBT_WRITE)) {
+ if (td->pages[i] == -1) continue;
+ block = td->pages[i] >> (this->bbt_erase_shift - this->page_shift);
+ block <<= 1;
+ oldval = this->bbt[(block >> 3)];
+ newval = oldval | (0x2 << (block & 0x06));
+ this->bbt[(block >> 3)] = newval;
+ if ((oldval != newval) && td->reserved_block_code)
+ nand_update_bbt(mtd, block << (this->bbt_erase_shift - 1));
+ continue;
+ }
+ update = 0;
+ if (td->options & NAND_BBT_LASTBLOCK)
+ block = ((i + 1) * nrblocks) - td->maxblocks;
+ else
+ block = i * nrblocks;
+ block <<= 1;
+ for (j = 0; j < td->maxblocks; j++) {
+ oldval = this->bbt[(block >> 3)];
+ newval = oldval | (0x2 << (block & 0x06));
+ this->bbt[(block >> 3)] = newval;
+ if (oldval != newval) update = 1;
+ block += 2;
+ }
+ /* If we want reserved blocks to be recorded to flash, and some
+ new ones have been marked, then we need to update the stored
+ bbts. This should only happen once. */
+ if (update && td->reserved_block_code)
+ nand_update_bbt(mtd, (block - 2) << (this->bbt_erase_shift - 1));
+ }
+}
+
+/**
+ * nand_scan_bbt - [NAND Interface] scan, find, read and maybe create bad block table(s)
+ * @mtd: MTD device structure
+ * @bd: descriptor for the good/bad block search pattern
+ *
+ * The function checks, if a bad block table(s) is/are already
+ * available. If not it scans the device for manufacturer
+ * marked good / bad blocks and writes the bad block table(s) to
+ * the selected place.
+ *
+ * The bad block table memory is allocated here. It must be freed
+ * by calling the nand_free_bbt function.
+ *
+*/
+int nand_scan_bbt (struct mtd_info *mtd, struct nand_bbt_descr *bd)
+{
+ struct nand_chip *this = mtd->priv;
+ int len, res = 0;
+ uint8_t *buf;
+ struct nand_bbt_descr *td = this->bbt_td;
+ struct nand_bbt_descr *md = this->bbt_md;
+
+ len = mtd->size >> (this->bbt_erase_shift + 2);
+ /* Allocate memory (2bit per block) */
+ this->bbt = kmalloc (len, GFP_KERNEL);
+ if (!this->bbt) {
+ printk (KERN_ERR "nand_scan_bbt: Out of memory\n");
+ return -ENOMEM;
+ }
+ /* Clear the memory bad block table */
+ memset (this->bbt, 0x00, len);
+
+ /* If no primary table decriptor is given, scan the device
+ * to build a memory based bad block table
+ */
+ if (!td) {
+ if ((res = nand_memory_bbt(mtd, bd))) {
+ printk (KERN_ERR "nand_bbt: Can't scan flash and build the RAM-based BBT\n");
+ kfree (this->bbt);
+ this->bbt = NULL;
+ }
+ return res;
+ }
+
+ /* Allocate a temporary buffer for one eraseblock incl. oob */
+ len = (1 << this->bbt_erase_shift);
+ len += (len >> this->page_shift) * mtd->oobsize;
+ buf = kmalloc (len, GFP_KERNEL);
+ if (!buf) {
+ printk (KERN_ERR "nand_bbt: Out of memory\n");
+ kfree (this->bbt);
+ this->bbt = NULL;
+ return -ENOMEM;
+ }
+
+ /* Is the bbt at a given page ? */
+ if (td->options & NAND_BBT_ABSPAGE) {
+ res = read_abs_bbts (mtd, buf, td, md);
+ } else {
+ /* Search the bad block table using a pattern in oob */
+ res = search_read_bbts (mtd, buf, td, md);
+ }
+
+ if (res)
+ res = check_create (mtd, buf, bd);
+
+ /* Prevent the bbt regions from erasing / writing */
+ mark_bbt_region (mtd, td);
+ if (md)
+ mark_bbt_region (mtd, md);
+
+ kfree (buf);
+ return res;
+}
+
+
+/**
+ * nand_update_bbt - [NAND Interface] update bad block table(s)
+ * @mtd: MTD device structure
+ * @offs: the offset of the newly marked block
+ *
+ * The function updates the bad block table(s)
+*/
+int nand_update_bbt (struct mtd_info *mtd, loff_t offs)
+{
+ struct nand_chip *this = mtd->priv;
+ int len, res = 0, writeops = 0;
+ int chip, chipsel;
+ uint8_t *buf;
+ struct nand_bbt_descr *td = this->bbt_td;
+ struct nand_bbt_descr *md = this->bbt_md;
+
+ if (!this->bbt || !td)
+ return -EINVAL;
+
+ len = mtd->size >> (this->bbt_erase_shift + 2);
+ /* Allocate a temporary buffer for one eraseblock incl. oob */
+ len = (1 << this->bbt_erase_shift);
+ len += (len >> this->page_shift) * mtd->oobsize;
+ buf = kmalloc (len, GFP_KERNEL);
+ if (!buf) {
+ printk (KERN_ERR "nand_update_bbt: Out of memory\n");
+ return -ENOMEM;
+ }
+
+ writeops = md != NULL ? 0x03 : 0x01;
+
+ /* Do we have a bbt per chip ? */
+ if (td->options & NAND_BBT_PERCHIP) {
+ chip = (int) (offs >> this->chip_shift);
+ chipsel = chip;
+ } else {
+ chip = 0;
+ chipsel = -1;
+ }
+
+ td->version[chip]++;
+ if (md)
+ md->version[chip]++;
+
+ /* Write the bad block table to the device ? */
+ if ((writeops & 0x01) && (td->options & NAND_BBT_WRITE)) {
+ res = write_bbt (mtd, buf, td, md, chipsel);
+ if (res < 0)
+ goto out;
+ }
+ /* Write the mirror bad block table to the device ? */
+ if ((writeops & 0x02) && md && (md->options & NAND_BBT_WRITE)) {
+ res = write_bbt (mtd, buf, md, td, chipsel);
+ }
+
+out:
+ kfree (buf);
+ return res;
+}
+
+/* Define some generic bad / good block scan pattern which are used
+ * while scanning a device for factory marked good / bad blocks. */
+static uint8_t scan_ff_pattern[] = { 0xff, 0xff };
+
+static struct nand_bbt_descr smallpage_memorybased = {
+ .options = NAND_BBT_SCAN2NDPAGE,
+ .offs = 5,
+ .len = 1,
+ .pattern = scan_ff_pattern
+};
+
+static struct nand_bbt_descr largepage_memorybased = {
+ .options = 0,
+ .offs = 0,
+ .len = 2,
+ .pattern = scan_ff_pattern
+};
+
+static struct nand_bbt_descr smallpage_flashbased = {
+ .options = NAND_BBT_SCANEMPTY | NAND_BBT_SCANALLPAGES,
+ .offs = 5,
+ .len = 1,
+ .pattern = scan_ff_pattern
+};
+
+static struct nand_bbt_descr largepage_flashbased = {
+ .options = NAND_BBT_SCANEMPTY | NAND_BBT_SCANALLPAGES,
+ .offs = 0,
+ .len = 2,
+ .pattern = scan_ff_pattern
+};
+
+static uint8_t scan_agand_pattern[] = { 0x1C, 0x71, 0xC7, 0x1C, 0x71, 0xC7 };
+
+static struct nand_bbt_descr agand_flashbased = {
+ .options = NAND_BBT_SCANEMPTY | NAND_BBT_SCANALLPAGES,
+ .offs = 0x20,
+ .len = 6,
+ .pattern = scan_agand_pattern
+};
+
+/* Generic flash bbt decriptors
+*/
+static uint8_t bbt_pattern[] = {'B', 'b', 't', '0' };
+static uint8_t mirror_pattern[] = {'1', 't', 'b', 'B' };
+
+static struct nand_bbt_descr bbt_main_descr = {
+ .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
+ | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
+ .offs = 8,
+ .len = 4,
+ .veroffs = 12,
+ .maxblocks = 4,
+ .pattern = bbt_pattern
+};
+
+static struct nand_bbt_descr bbt_mirror_descr = {
+ .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
+ | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
+ .offs = 8,
+ .len = 4,
+ .veroffs = 12,
+ .maxblocks = 4,
+ .pattern = mirror_pattern
+};
+
+/**
+ * nand_default_bbt - [NAND Interface] Select a default bad block table for the device
+ * @mtd: MTD device structure
+ *
+ * This function selects the default bad block table
+ * support for the device and calls the nand_scan_bbt function
+ *
+*/
+int nand_default_bbt (struct mtd_info *mtd)
+{
+ struct nand_chip *this = mtd->priv;
+
+ /* Default for AG-AND. We must use a flash based
+ * bad block table as the devices have factory marked
+ * _good_ blocks. Erasing those blocks leads to loss
+ * of the good / bad information, so we _must_ store
+ * this information in a good / bad table during
+ * startup
+ */
+ if (this->options & NAND_IS_AND) {
+ /* Use the default pattern descriptors */
+ if (!this->bbt_td) {
+ this->bbt_td = &bbt_main_descr;
+ this->bbt_md = &bbt_mirror_descr;
+ }
+ this->options |= NAND_USE_FLASH_BBT;
+ return nand_scan_bbt (mtd, &agand_flashbased);
+ }
+
+
+ /* Is a flash based bad block table requested ? */
+ if (this->options & NAND_USE_FLASH_BBT) {
+ /* Use the default pattern descriptors */
+ if (!this->bbt_td) {
+ this->bbt_td = &bbt_main_descr;
+ this->bbt_md = &bbt_mirror_descr;
+ }
+ if (!this->badblock_pattern) {
+ this->badblock_pattern = (mtd->oobblock > 512) ?
+ &largepage_flashbased : &smallpage_flashbased;
+ }
+ } else {
+ this->bbt_td = NULL;
+ this->bbt_md = NULL;
+ if (!this->badblock_pattern) {
+ this->badblock_pattern = (mtd->oobblock > 512) ?
+ &largepage_memorybased : &smallpage_memorybased;
+ }
+ }
+ return nand_scan_bbt (mtd, this->badblock_pattern);
+}
+
+/**
+ * nand_isbad_bbt - [NAND Interface] Check if a block is bad
+ * @mtd: MTD device structure
+ * @offs: offset in the device
+ * @allowbbt: allow access to bad block table region
+ *
+*/
+int nand_isbad_bbt (struct mtd_info *mtd, loff_t offs, int allowbbt)
+{
+ struct nand_chip *this = mtd->priv;
+ int block;
+ uint8_t res;
+
+ /* Get block number * 2 */
+ block = (int) (offs >> (this->bbt_erase_shift - 1));
+ res = (this->bbt[block >> 3] >> (block & 0x06)) & 0x03;
+
+ DEBUG (MTD_DEBUG_LEVEL2, "nand_isbad_bbt(): bbt info for offs 0x%08x: (block %d) 0x%02x\n",
+ (unsigned int)offs, block >> 1, res);
+
+ switch ((int)res) {
+ case 0x00: return 0;
+ case 0x01: return 1;
+ case 0x02: return allowbbt ? 0 : 1;
+ }
+ return 1;
+}
+
+EXPORT_SYMBOL (nand_scan_bbt);
+EXPORT_SYMBOL (nand_default_bbt);
diff --git a/linux-2.4.x/drivers/mtd/nand/nand_ecc.c b/linux-2.4.x/drivers/mtd/nand/nand_ecc.c
index 8149559..40ac909 100644
--- a/linux-2.4.x/drivers/mtd/nand/nand_ecc.c
+++ b/linux-2.4.x/drivers/mtd/nand/nand_ecc.c
@@ -1,22 +1,44 @@
/*
- * drivers/mtd/nand_ecc.c
+ * This file contains an ECC algorithm from Toshiba that detects and
+ * corrects 1 bit errors in a 256 byte block of data.
*
- * Copyright (C) 2000 Steven J. Hill (sjhill@cotw.com)
- * Toshiba America Electronics Components, Inc.
+ * drivers/mtd/nand/nand_ecc.c
*
- * $Id: nand_ecc.c,v 1.7 2002/03/21 14:13:50 dwmw2 Exp $
+ * Copyright (C) 2000-2004 Steven J. Hill (sjhill@realitydiluted.com)
+ * Toshiba America Electronics Components, Inc.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
+ * $Id: nand_ecc.c,v 1.15 2005/11/07 11:14:30 gleixner Exp $
*
- * This file contains an ECC algorithm from Toshiba that detects and
- * corrects 1 bit errors in a 256 byte block of data.
+ * This file is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 or (at your option) any
+ * later version.
+ *
+ * This file is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this file; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * As a special exception, if other files instantiate templates or use
+ * macros or inline functions from these files, or you compile these
+ * files and link them with other works to produce a work based on these
+ * files, these files do not by themselves cause the resulting work to be
+ * covered by the GNU General Public License. However the source code for
+ * these files must still be made available in accordance with section (3)
+ * of the GNU General Public License.
+ *
+ * This exception does not invalidate any other reasons why a work based on
+ * this file might be covered by the GNU General Public License.
*/
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/mtd/nand_ecc.h>
/*
* Pre-calculated 256-way 1 byte column parity
@@ -41,18 +63,23 @@ static const u_char nand_ecc_precalc_table[] = {
};
-/*
+/**
+ * nand_trans_result - [GENERIC] create non-inverted ECC
+ * @reg2: line parity reg 2
+ * @reg3: line parity reg 3
+ * @ecc_code: ecc
+ *
* Creates non-inverted ECC code from line parity
*/
static void nand_trans_result(u_char reg2, u_char reg3,
u_char *ecc_code)
{
u_char a, b, i, tmp1, tmp2;
-
+
/* Initialize variables */
a = b = 0x80;
tmp1 = tmp2 = 0;
-
+
/* Calculate first ECC byte */
for (i = 0; i < 4; i++) {
if (reg3 & a) /* LP15,13,11,9 --> ecc_code[0] */
@@ -63,7 +90,7 @@ static void nand_trans_result(u_char reg2, u_char reg3,
b >>= 1;
a >>= 1;
}
-
+
/* Calculate second ECC byte */
b = 0x80;
for (i = 0; i < 4; i++) {
@@ -75,59 +102,69 @@ static void nand_trans_result(u_char reg2, u_char reg3,
b >>= 1;
a >>= 1;
}
-
+
/* Store two of the ECC bytes */
ecc_code[0] = tmp1;
ecc_code[1] = tmp2;
}
-/*
- * Calculate 3 byte ECC code for 256 byte block
+/**
+ * nand_calculate_ecc - [NAND Interface] Calculate 3 byte ECC code for 256 byte block
+ * @mtd: MTD block structure
+ * @dat: raw data
+ * @ecc_code: buffer for ECC
*/
-void nand_calculate_ecc (const u_char *dat, u_char *ecc_code)
+int nand_calculate_ecc(struct mtd_info *mtd, const u_char *dat, u_char *ecc_code)
{
u_char idx, reg1, reg2, reg3;
int j;
-
+
/* Initialize variables */
reg1 = reg2 = reg3 = 0;
ecc_code[0] = ecc_code[1] = ecc_code[2] = 0;
-
- /* Build up column parity */
+
+ /* Build up column parity */
for(j = 0; j < 256; j++) {
-
+
/* Get CP0 - CP5 from table */
idx = nand_ecc_precalc_table[dat[j]];
reg1 ^= (idx & 0x3f);
-
+
/* All bit XOR = 1 ? */
if (idx & 0x40) {
reg3 ^= (u_char) j;
reg2 ^= ~((u_char) j);
}
}
-
+
/* Create non-inverted ECC code from line parity */
nand_trans_result(reg2, reg3, ecc_code);
-
+
/* Calculate final ECC code */
ecc_code[0] = ~ecc_code[0];
ecc_code[1] = ~ecc_code[1];
ecc_code[2] = ((~reg1) << 2) | 0x03;
+ return 0;
}
-/*
+/**
+ * nand_correct_data - [NAND Interface] Detect and correct bit error(s)
+ * @mtd: MTD block structure
+ * @dat: raw data read from the chip
+ * @read_ecc: ECC from the chip
+ * @calc_ecc: the ECC calculated from raw data
+ *
* Detect and correct a 1 bit error for 256 byte block
*/
-int nand_correct_data (u_char *dat, u_char *read_ecc, u_char *calc_ecc)
+int nand_correct_data(struct mtd_info *mtd, u_char *dat, u_char *read_ecc, u_char *calc_ecc)
{
u_char a, b, c, d1, d2, d3, add, bit, i;
-
- /* Do error detection */
+
+ /* Do error detection */
d1 = calc_ecc[0] ^ read_ecc[0];
d2 = calc_ecc[1] ^ read_ecc[1];
d3 = calc_ecc[2] ^ read_ecc[2];
-
+
if ((d1 | d2 | d3) == 0) {
/* No errors */
return 0;
@@ -136,7 +173,7 @@ int nand_correct_data (u_char *dat, u_char *read_ecc, u_char *calc_ecc)
a = (d1 ^ (d1 >> 1)) & 0x55;
b = (d2 ^ (d2 >> 1)) & 0x55;
c = (d3 ^ (d3 >> 1)) & 0x54;
-
+
/* Found and will correct single bit error in the data */
if ((a == 0x55) && (b == 0x55) && (c == 0x54)) {
c = 0x80;
@@ -200,7 +237,7 @@ int nand_correct_data (u_char *dat, u_char *read_ecc, u_char *calc_ecc)
}
}
}
-
+
/* Should never happen */
return -1;
}
@@ -209,5 +246,5 @@ EXPORT_SYMBOL(nand_calculate_ecc);
EXPORT_SYMBOL(nand_correct_data);
MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Steven J. Hill <sjhill@cotw.com>");
+MODULE_AUTHOR("Steven J. Hill <sjhill@realitydiluted.com>");
MODULE_DESCRIPTION("Generic NAND ECC support");
diff --git a/linux-2.4.x/drivers/mtd/nand/nand_ids.c b/linux-2.4.x/drivers/mtd/nand/nand_ids.c
new file mode 100644
index 0000000..dbc7e55
--- /dev/null
+++ b/linux-2.4.x/drivers/mtd/nand/nand_ids.c
@@ -0,0 +1,137 @@
+/*
+ * drivers/mtd/nandids.c
+ *
+ * Copyright (C) 2002 Thomas Gleixner (tglx@linutronix.de)
+ *
+ * $Id: nand_ids.c,v 1.16 2005/11/07 11:14:31 gleixner Exp $
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+#include <linux/module.h>
+#include <linux/mtd/nand.h>
+/*
+* Chip ID list
+*
+* Name. ID code, pagesize, chipsize in MegaByte, eraseblock size,
+* options
+*
+* Pagesize; 0, 256, 512
+* 0 get this information from the extended chip ID
++ 256 256 Byte page size
+* 512 512 Byte page size
+*/
+struct nand_flash_dev nand_flash_ids[] = {
+ {"NAND 1MiB 5V 8-bit", 0x6e, 256, 1, 0x1000, 0},
+ {"NAND 2MiB 5V 8-bit", 0x64, 256, 2, 0x1000, 0},
+ {"NAND 4MiB 5V 8-bit", 0x6b, 512, 4, 0x2000, 0},
+ {"NAND 1MiB 3,3V 8-bit", 0xe8, 256, 1, 0x1000, 0},
+ {"NAND 1MiB 3,3V 8-bit", 0xec, 256, 1, 0x1000, 0},
+ {"NAND 2MiB 3,3V 8-bit", 0xea, 256, 2, 0x1000, 0},
+ {"NAND 4MiB 3,3V 8-bit", 0xd5, 512, 4, 0x2000, 0},
+ {"NAND 4MiB 3,3V 8-bit", 0xe3, 512, 4, 0x2000, 0},
+ {"NAND 4MiB 3,3V 8-bit", 0xe5, 512, 4, 0x2000, 0},
+ {"NAND 8MiB 3,3V 8-bit", 0xd6, 512, 8, 0x2000, 0},
+
+ {"NAND 8MiB 1,8V 8-bit", 0x39, 512, 8, 0x2000, 0},
+ {"NAND 8MiB 3,3V 8-bit", 0xe6, 512, 8, 0x2000, 0},
+ {"NAND 8MiB 1,8V 16-bit", 0x49, 512, 8, 0x2000, NAND_BUSWIDTH_16},
+ {"NAND 8MiB 3,3V 16-bit", 0x59, 512, 8, 0x2000, NAND_BUSWIDTH_16},
+
+ {"NAND 16MiB 1,8V 8-bit", 0x33, 512, 16, 0x4000, 0},
+ {"NAND 16MiB 3,3V 8-bit", 0x73, 512, 16, 0x4000, 0},
+ {"NAND 16MiB 1,8V 16-bit", 0x43, 512, 16, 0x4000, NAND_BUSWIDTH_16},
+ {"NAND 16MiB 3,3V 16-bit", 0x53, 512, 16, 0x4000, NAND_BUSWIDTH_16},
+
+ {"NAND 32MiB 1,8V 8-bit", 0x35, 512, 32, 0x4000, 0},
+ {"NAND 32MiB 3,3V 8-bit", 0x75, 512, 32, 0x4000, 0},
+ {"NAND 32MiB 1,8V 16-bit", 0x45, 512, 32, 0x4000, NAND_BUSWIDTH_16},
+ {"NAND 32MiB 3,3V 16-bit", 0x55, 512, 32, 0x4000, NAND_BUSWIDTH_16},
+
+ {"NAND 64MiB 1,8V 8-bit", 0x36, 512, 64, 0x4000, 0},
+ {"NAND 64MiB 3,3V 8-bit", 0x76, 512, 64, 0x4000, 0},
+ {"NAND 64MiB 1,8V 16-bit", 0x46, 512, 64, 0x4000, NAND_BUSWIDTH_16},
+ {"NAND 64MiB 3,3V 16-bit", 0x56, 512, 64, 0x4000, NAND_BUSWIDTH_16},
+
+ {"NAND 128MiB 1,8V 8-bit", 0x78, 512, 128, 0x4000, 0},
+ {"NAND 128MiB 1,8V 8-bit", 0x39, 512, 128, 0x4000, 0},
+ {"NAND 128MiB 3,3V 8-bit", 0x79, 512, 128, 0x4000, 0},
+ {"NAND 128MiB 1,8V 16-bit", 0x72, 512, 128, 0x4000, NAND_BUSWIDTH_16},
+ {"NAND 128MiB 1,8V 16-bit", 0x49, 512, 128, 0x4000, NAND_BUSWIDTH_16},
+ {"NAND 128MiB 3,3V 16-bit", 0x74, 512, 128, 0x4000, NAND_BUSWIDTH_16},
+ {"NAND 128MiB 3,3V 16-bit", 0x59, 512, 128, 0x4000, NAND_BUSWIDTH_16},
+
+ {"NAND 256MiB 3,3V 8-bit", 0x71, 512, 256, 0x4000, 0},
+
+ /* These are the new chips with large page size. The pagesize
+ * and the erasesize is determined from the extended id bytes
+ */
+ /*512 Megabit */
+ {"NAND 64MiB 1,8V 8-bit", 0xA2, 0, 64, 0, NAND_SAMSUNG_LP_OPTIONS | NAND_NO_AUTOINCR},
+ {"NAND 64MiB 3,3V 8-bit", 0xF2, 0, 64, 0, NAND_SAMSUNG_LP_OPTIONS | NAND_NO_AUTOINCR},
+ {"NAND 64MiB 1,8V 16-bit", 0xB2, 0, 64, 0, NAND_SAMSUNG_LP_OPTIONS | NAND_BUSWIDTH_16 | NAND_NO_AUTOINCR},
+ {"NAND 64MiB 3,3V 16-bit", 0xC2, 0, 64, 0, NAND_SAMSUNG_LP_OPTIONS | NAND_BUSWIDTH_16 | NAND_NO_AUTOINCR},
+
+ /* 1 Gigabit */
+ {"NAND 128MiB 1,8V 8-bit", 0xA1, 0, 128, 0, NAND_SAMSUNG_LP_OPTIONS | NAND_NO_AUTOINCR},
+ {"NAND 128MiB 3,3V 8-bit", 0xF1, 0, 128, 0, NAND_SAMSUNG_LP_OPTIONS | NAND_NO_AUTOINCR},
+ {"NAND 128MiB 1,8V 16-bit", 0xB1, 0, 128, 0, NAND_SAMSUNG_LP_OPTIONS | NAND_BUSWIDTH_16 | NAND_NO_AUTOINCR},
+ {"NAND 128MiB 3,3V 16-bit", 0xC1, 0, 128, 0, NAND_SAMSUNG_LP_OPTIONS | NAND_BUSWIDTH_16 | NAND_NO_AUTOINCR},
+
+ /* 2 Gigabit */
+ {"NAND 256MiB 1,8V 8-bit", 0xAA, 0, 256, 0, NAND_SAMSUNG_LP_OPTIONS | NAND_NO_AUTOINCR},
+ {"NAND 256MiB 3,3V 8-bit", 0xDA, 0, 256, 0, NAND_SAMSUNG_LP_OPTIONS | NAND_NO_AUTOINCR},
+ {"NAND 256MiB 1,8V 16-bit", 0xBA, 0, 256, 0, NAND_SAMSUNG_LP_OPTIONS | NAND_BUSWIDTH_16 | NAND_NO_AUTOINCR},
+ {"NAND 256MiB 3,3V 16-bit", 0xCA, 0, 256, 0, NAND_SAMSUNG_LP_OPTIONS | NAND_BUSWIDTH_16 | NAND_NO_AUTOINCR},
+
+ /* 4 Gigabit */
+ {"NAND 512MiB 1,8V 8-bit", 0xAC, 0, 512, 0, NAND_SAMSUNG_LP_OPTIONS | NAND_NO_AUTOINCR},
+ {"NAND 512MiB 3,3V 8-bit", 0xDC, 0, 512, 0, NAND_SAMSUNG_LP_OPTIONS | NAND_NO_AUTOINCR},
+ {"NAND 512MiB 1,8V 16-bit", 0xBC, 0, 512, 0, NAND_SAMSUNG_LP_OPTIONS | NAND_BUSWIDTH_16 | NAND_NO_AUTOINCR},
+ {"NAND 512MiB 3,3V 16-bit", 0xCC, 0, 512, 0, NAND_SAMSUNG_LP_OPTIONS | NAND_BUSWIDTH_16 | NAND_NO_AUTOINCR},
+
+ /* 8 Gigabit */
+ {"NAND 1GiB 1,8V 8-bit", 0xA3, 0, 1024, 0, NAND_SAMSUNG_LP_OPTIONS | NAND_NO_AUTOINCR},
+ {"NAND 1GiB 3,3V 8-bit", 0xD3, 0, 1024, 0, NAND_SAMSUNG_LP_OPTIONS | NAND_NO_AUTOINCR},
+ {"NAND 1GiB 1,8V 16-bit", 0xB3, 0, 1024, 0, NAND_SAMSUNG_LP_OPTIONS | NAND_BUSWIDTH_16 | NAND_NO_AUTOINCR},
+ {"NAND 1GiB 3,3V 16-bit", 0xC3, 0, 1024, 0, NAND_SAMSUNG_LP_OPTIONS | NAND_BUSWIDTH_16 | NAND_NO_AUTOINCR},
+
+ /* 16 Gigabit */
+ {"NAND 2GiB 1,8V 8-bit", 0xA5, 0, 2048, 0, NAND_SAMSUNG_LP_OPTIONS | NAND_NO_AUTOINCR},
+ {"NAND 2GiB 3,3V 8-bit", 0xD5, 0, 2048, 0, NAND_SAMSUNG_LP_OPTIONS | NAND_NO_AUTOINCR},
+ {"NAND 2GiB 1,8V 16-bit", 0xB5, 0, 2048, 0, NAND_SAMSUNG_LP_OPTIONS | NAND_BUSWIDTH_16 | NAND_NO_AUTOINCR},
+ {"NAND 2GiB 3,3V 16-bit", 0xC5, 0, 2048, 0, NAND_SAMSUNG_LP_OPTIONS | NAND_BUSWIDTH_16 | NAND_NO_AUTOINCR},
+
+ /* Renesas AND 1 Gigabit. Those chips do not support extended id and have a strange page/block layout !
+ * The chosen minimum erasesize is 4 * 2 * 2048 = 16384 Byte, as those chips have an array of 4 page planes
+ * 1 block = 2 pages, but due to plane arrangement the blocks 0-3 consists of page 0 + 4,1 + 5, 2 + 6, 3 + 7
+ * Anyway JFFS2 would increase the eraseblock size so we chose a combined one which can be erased in one go
+ * There are more speed improvements for reads and writes possible, but not implemented now
+ */
+ {"AND 128MiB 3,3V 8-bit", 0x01, 2048, 128, 0x4000, NAND_IS_AND | NAND_NO_AUTOINCR | NAND_4PAGE_ARRAY | BBT_AUTO_REFRESH},
+
+ {NULL,}
+};
+
+/*
+* Manufacturer ID list
+*/
+struct nand_manufacturers nand_manuf_ids[] = {
+ {NAND_MFR_TOSHIBA, "Toshiba"},
+ {NAND_MFR_SAMSUNG, "Samsung"},
+ {NAND_MFR_FUJITSU, "Fujitsu"},
+ {NAND_MFR_NATIONAL, "National"},
+ {NAND_MFR_RENESAS, "Renesas"},
+ {NAND_MFR_STMICRO, "ST Micro"},
+ {NAND_MFR_HYNIX, "Hynix"},
+ {0x0, "Unknown"}
+};
+
+EXPORT_SYMBOL (nand_manuf_ids);
+EXPORT_SYMBOL (nand_flash_ids);
+
+MODULE_LICENSE ("GPL");
+MODULE_AUTHOR ("Thomas Gleixner <tglx@linutronix.de>");
+MODULE_DESCRIPTION ("Nand device & manufacturer ID's");
diff --git a/linux-2.4.x/drivers/mtd/nand/nandsim.c b/linux-2.4.x/drivers/mtd/nand/nandsim.c
new file mode 100644
index 0000000..62a63ec
--- /dev/null
+++ b/linux-2.4.x/drivers/mtd/nand/nandsim.c
@@ -0,0 +1,1609 @@
+/*
+ * NAND flash simulator.
+ *
+ * Author: Artem B. Bityuckiy <dedekind@oktetlabs.ru>, <dedekind@infradead.org>
+ *
+ * Copyright (C) 2004 Nokia Corporation
+ *
+ * Note: NS means "NAND Simulator".
+ * Note: Input means input TO flash chip, output means output FROM chip.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any later
+ * version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
+ * Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA
+ *
+ * $Id: nandsim.c,v 1.13 2005/11/29 14:48:32 gleixner Exp $
+ */
+
+#include <linux/config.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/vmalloc.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/partitions.h>
+#include <linux/delay.h>
+
+#ifdef CONFIG_NS_ABS_POS
+#include <asm/io.h>
+#endif
+
+#define CONFIG_NANDSIM_FIRST_ID_BYTE CONFIG_MTD_NAND_NANDSIM_FIRST_ID
+#define CONFIG_NANDSIM_SECOND_ID_BYTE CONFIG_MTD_NAND_NANDSIM_SECOND_ID
+#define CONFIG_NANDSIM_THIRD_ID_BYTE CONFIG_MTD_NAND_NANDSIM_THIRD_ID
+#define CONFIG_NANDSIM_FOURTH_ID_BYTE CONFIG_MTD_NAND_NANDSIM_FOURTH_ID
+
+#ifndef CONFIG_MTD_NAND_NANDSIM_ACCESS_DELAY
+# define CONFIG_MTD_NAND_NANDSIM_ACCESS_DELAY 25
+#endif
+
+#ifndef CONFIG_MTD_NAND_NANDSIM_PROGRAM_DELAY
+# define CONFIG_MTD_NAND_NANDSIM_PROGRAM_DELAY 200
+#endif
+
+#ifndef CONFIG_MTD_NAND_NANDSIM_ERASE_DELAY
+# define CONFIG_MTD_NAND_NANDSIM_ERASE_DELAY 2
+#endif
+
+#ifndef CONFIG_MTD_NAND_NANDSIM_OUTPUT_CYCLE
+# define CONFIG_MTD_NAND_NANDSIM_OUTPUT_CYCLE 40
+#endif
+
+#ifndef CONFIG_MTD_NAND_NANDSIM_INPUT_CYCLE
+# define CONFIG_MTD_NAND_NANDSIM_INPUT_CYCLE 50
+#endif
+
+#ifdef CONFIG_MTD_NAND_NANDSIM_16BIT_BUS
+# define CONFIG_NANDSIM_BUS_WIDTH 16
+#else
+# define CONFIG_NANDSIM_BUS_WIDTH 8
+#endif
+
+#ifndef CONFIG_MTD_NAND_NANDSIM_DELAY
+# define CONFIG_MTD_NAND_NANDSIM_DELAY 0
+#else
+# undef CONFIG_MTD_NAND_NANDSIM_DELAY
+# define CONFIG_MTD_NAND_NANDSIM_DELAY 1
+#endif
+
+#ifndef CONFIG_MTD_NAND_NANDSIM_LOG
+# define CONFIG_MTD_NAND_NANDSIM_LOG 0
+#else
+# undef CONFIG_MTD_NAND_NANDSIM_LOG
+# define CONFIG_MTD_NAND_NANDSIM_LOG 1
+#endif
+
+#ifndef CONFIG_MTD_NAND_NANDSIM_DEBUG
+# define CONFIG_MTD_NAND_NANDSIM_DEBUG 0
+#else
+# undef CONFIG_MTD_NAND_NANDSIM_DEBUG
+# define CONFIG_MTD_NAND_NANDSIM_DEBUG 1
+#endif
+
+static uint first_id_byte = CONFIG_NANDSIM_FIRST_ID_BYTE;
+static uint second_id_byte = CONFIG_NANDSIM_SECOND_ID_BYTE;
+static uint third_id_byte = CONFIG_NANDSIM_THIRD_ID_BYTE;
+static uint fourth_id_byte = CONFIG_NANDSIM_FOURTH_ID_BYTE;
+static uint access_delay = CONFIG_MTD_NAND_NANDSIM_ACCESS_DELAY;
+static uint programm_delay = CONFIG_MTD_NAND_NANDSIM_PROGRAM_DELAY;
+static uint erase_delay = CONFIG_MTD_NAND_NANDSIM_ERASE_DELAY;
+static uint output_cycle = CONFIG_MTD_NAND_NANDSIM_OUTPUT_CYCLE;
+static uint input_cycle = CONFIG_MTD_NAND_NANDSIM_INPUT_CYCLE;
+static uint bus_width = CONFIG_NANDSIM_BUS_WIDTH;
+static uint do_delays = CONFIG_MTD_NAND_NANDSIM_DELAY;
+static uint log = CONFIG_MTD_NAND_NANDSIM_LOG;
+static uint dbg = CONFIG_MTD_NAND_NANDSIM_DEBUG;
+
+module_param(first_id_byte, uint, 0400);
+module_param(second_id_byte, uint, 0400);
+module_param(third_id_byte, uint, 0400);
+module_param(fourth_id_byte, uint, 0400);
+module_param(access_delay, uint, 0400);
+module_param(programm_delay, uint, 0400);
+module_param(erase_delay, uint, 0400);
+module_param(output_cycle, uint, 0400);
+module_param(input_cycle, uint, 0400);
+module_param(bus_width, uint, 0400);
+module_param(do_delays, uint, 0400);
+module_param(log, uint, 0400);
+module_param(dbg, uint, 0400);
+
+MODULE_PARM_DESC(first_id_byte, "The fist byte returned by NAND Flash 'read ID' command (manufaturer ID)");
+MODULE_PARM_DESC(second_id_byte, "The second byte returned by NAND Flash 'read ID' command (chip ID)");
+MODULE_PARM_DESC(third_id_byte, "The third byte returned by NAND Flash 'read ID' command");
+MODULE_PARM_DESC(fourth_id_byte, "The fourth byte returned by NAND Flash 'read ID' command");
+MODULE_PARM_DESC(access_delay, "Initial page access delay (microiseconds)");
+MODULE_PARM_DESC(programm_delay, "Page programm delay (microseconds");
+MODULE_PARM_DESC(erase_delay, "Sector erase delay (milliseconds)");
+MODULE_PARM_DESC(output_cycle, "Word output (from flash) time (nanoseconds)");
+MODULE_PARM_DESC(input_cycle, "Word input (to flash) time (nanoseconds)");
+MODULE_PARM_DESC(bus_width, "Chip's bus width (8- or 16-bit)");
+MODULE_PARM_DESC(do_delays, "Simulate NAND delays using busy-waits if not zero");
+MODULE_PARM_DESC(log, "Perform logging if not zero");
+MODULE_PARM_DESC(dbg, "Output debug information if not zero");
+
+/* The largest possible page size */
+#define NS_LARGEST_PAGE_SIZE 2048
+
+/* The prefix for simulator output */
+#define NS_OUTPUT_PREFIX "[nandsim]"
+
+/* Simulator's output macros (logging, debugging, warning, error) */
+#define NS_LOG(args...) \
+ do { if (log) printk(KERN_DEBUG NS_OUTPUT_PREFIX " log: " args); } while(0)
+#define NS_DBG(args...) \
+ do { if (dbg) printk(KERN_DEBUG NS_OUTPUT_PREFIX " debug: " args); } while(0)
+#define NS_WARN(args...) \
+ do { printk(KERN_WARNING NS_OUTPUT_PREFIX " warnig: " args); } while(0)
+#define NS_ERR(args...) \
+ do { printk(KERN_ERR NS_OUTPUT_PREFIX " errorr: " args); } while(0)
+
+/* Busy-wait delay macros (microseconds, milliseconds) */
+#define NS_UDELAY(us) \
+ do { if (do_delays) udelay(us); } while(0)
+#define NS_MDELAY(us) \
+ do { if (do_delays) mdelay(us); } while(0)
+
+/* Is the nandsim structure initialized ? */
+#define NS_IS_INITIALIZED(ns) ((ns)->geom.totsz != 0)
+
+/* Good operation completion status */
+#define NS_STATUS_OK(ns) (NAND_STATUS_READY | (NAND_STATUS_WP * ((ns)->lines.wp == 0)))
+
+/* Operation failed completion status */
+#define NS_STATUS_FAILED(ns) (NAND_STATUS_FAIL | NS_STATUS_OK(ns))
+
+/* Calculate the page offset in flash RAM image by (row, column) address */
+#define NS_RAW_OFFSET(ns) \
+ (((ns)->regs.row << (ns)->geom.pgshift) + ((ns)->regs.row * (ns)->geom.oobsz) + (ns)->regs.column)
+
+/* Calculate the OOB offset in flash RAM image by (row, column) address */
+#define NS_RAW_OFFSET_OOB(ns) (NS_RAW_OFFSET(ns) + ns->geom.pgsz)
+
+/* After a command is input, the simulator goes to one of the following states */
+#define STATE_CMD_READ0 0x00000001 /* read data from the beginning of page */
+#define STATE_CMD_READ1 0x00000002 /* read data from the second half of page */
+#define STATE_CMD_READSTART 0x00000003 /* read data second command (large page devices) */
+#define STATE_CMD_PAGEPROG 0x00000004 /* start page programm */
+#define STATE_CMD_READOOB 0x00000005 /* read OOB area */
+#define STATE_CMD_ERASE1 0x00000006 /* sector erase first command */
+#define STATE_CMD_STATUS 0x00000007 /* read status */
+#define STATE_CMD_STATUS_M 0x00000008 /* read multi-plane status (isn't implemented) */
+#define STATE_CMD_SEQIN 0x00000009 /* sequential data imput */
+#define STATE_CMD_READID 0x0000000A /* read ID */
+#define STATE_CMD_ERASE2 0x0000000B /* sector erase second command */
+#define STATE_CMD_RESET 0x0000000C /* reset */
+#define STATE_CMD_MASK 0x0000000F /* command states mask */
+
+/* After an addres is input, the simulator goes to one of these states */
+#define STATE_ADDR_PAGE 0x00000010 /* full (row, column) address is accepted */
+#define STATE_ADDR_SEC 0x00000020 /* sector address was accepted */
+#define STATE_ADDR_ZERO 0x00000030 /* one byte zero address was accepted */
+#define STATE_ADDR_MASK 0x00000030 /* address states mask */
+
+/* Durind data input/output the simulator is in these states */
+#define STATE_DATAIN 0x00000100 /* waiting for data input */
+#define STATE_DATAIN_MASK 0x00000100 /* data input states mask */
+
+#define STATE_DATAOUT 0x00001000 /* waiting for page data output */
+#define STATE_DATAOUT_ID 0x00002000 /* waiting for ID bytes output */
+#define STATE_DATAOUT_STATUS 0x00003000 /* waiting for status output */
+#define STATE_DATAOUT_STATUS_M 0x00004000 /* waiting for multi-plane status output */
+#define STATE_DATAOUT_MASK 0x00007000 /* data output states mask */
+
+/* Previous operation is done, ready to accept new requests */
+#define STATE_READY 0x00000000
+
+/* This state is used to mark that the next state isn't known yet */
+#define STATE_UNKNOWN 0x10000000
+
+/* Simulator's actions bit masks */
+#define ACTION_CPY 0x00100000 /* copy page/OOB to the internal buffer */
+#define ACTION_PRGPAGE 0x00200000 /* programm the internal buffer to flash */
+#define ACTION_SECERASE 0x00300000 /* erase sector */
+#define ACTION_ZEROOFF 0x00400000 /* don't add any offset to address */
+#define ACTION_HALFOFF 0x00500000 /* add to address half of page */
+#define ACTION_OOBOFF 0x00600000 /* add to address OOB offset */
+#define ACTION_MASK 0x00700000 /* action mask */
+
+#define NS_OPER_NUM 12 /* Number of operations supported by the simulator */
+#define NS_OPER_STATES 6 /* Maximum number of states in operation */
+
+#define OPT_ANY 0xFFFFFFFF /* any chip supports this operation */
+#define OPT_PAGE256 0x00000001 /* 256-byte page chips */
+#define OPT_PAGE512 0x00000002 /* 512-byte page chips */
+#define OPT_PAGE2048 0x00000008 /* 2048-byte page chips */
+#define OPT_SMARTMEDIA 0x00000010 /* SmartMedia technology chips */
+#define OPT_AUTOINCR 0x00000020 /* page number auto inctimentation is possible */
+#define OPT_PAGE512_8BIT 0x00000040 /* 512-byte page chips with 8-bit bus width */
+#define OPT_LARGEPAGE (OPT_PAGE2048) /* 2048-byte page chips */
+#define OPT_SMALLPAGE (OPT_PAGE256 | OPT_PAGE512) /* 256 and 512-byte page chips */
+
+/* Remove action bits ftom state */
+#define NS_STATE(x) ((x) & ~ACTION_MASK)
+
+/*
+ * Maximum previous states which need to be saved. Currently saving is
+ * only needed for page programm operation with preceeded read command
+ * (which is only valid for 512-byte pages).
+ */
+#define NS_MAX_PREVSTATES 1
+
+/*
+ * The structure which describes all the internal simulator data.
+ */
+struct nandsim {
+ struct mtd_partition part;
+
+ uint busw; /* flash chip bus width (8 or 16) */
+ u_char ids[4]; /* chip's ID bytes */
+ uint32_t options; /* chip's characteristic bits */
+ uint32_t state; /* current chip state */
+ uint32_t nxstate; /* next expected state */
+
+ uint32_t *op; /* current operation, NULL operations isn't known yet */
+ uint32_t pstates[NS_MAX_PREVSTATES]; /* previous states */
+ uint16_t npstates; /* number of previous states saved */
+ uint16_t stateidx; /* current state index */
+
+ /* The simulated NAND flash image */
+ union flash_media {
+ u_char *byte;
+ uint16_t *word;
+ } mem;
+
+ /* Internal buffer of page + OOB size bytes */
+ union internal_buffer {
+ u_char *byte; /* for byte access */
+ uint16_t *word; /* for 16-bit word access */
+ } buf;
+
+ /* NAND flash "geometry" */
+ struct nandsin_geometry {
+ uint32_t totsz; /* total flash size, bytes */
+ uint32_t secsz; /* flash sector (erase block) size, bytes */
+ uint pgsz; /* NAND flash page size, bytes */
+ uint oobsz; /* page OOB area size, bytes */
+ uint32_t totszoob; /* total flash size including OOB, bytes */
+ uint pgszoob; /* page size including OOB , bytes*/
+ uint secszoob; /* sector size including OOB, bytes */
+ uint pgnum; /* total number of pages */
+ uint pgsec; /* number of pages per sector */
+ uint secshift; /* bits number in sector size */
+ uint pgshift; /* bits number in page size */
+ uint oobshift; /* bits number in OOB size */
+ uint pgaddrbytes; /* bytes per page address */
+ uint secaddrbytes; /* bytes per sector address */
+ uint idbytes; /* the number ID bytes that this chip outputs */
+ } geom;
+
+ /* NAND flash internal registers */
+ struct nandsim_regs {
+ unsigned command; /* the command register */
+ u_char status; /* the status register */
+ uint row; /* the page number */
+ uint column; /* the offset within page */
+ uint count; /* internal counter */
+ uint num; /* number of bytes which must be processed */
+ uint off; /* fixed page offset */
+ } regs;
+
+ /* NAND flash lines state */
+ struct ns_lines_status {
+ int ce; /* chip Enable */
+ int cle; /* command Latch Enable */
+ int ale; /* address Latch Enable */
+ int wp; /* write Protect */
+ } lines;
+};
+
+/*
+ * Operations array. To perform any operation the simulator must pass
+ * through the correspondent states chain.
+ */
+static struct nandsim_operations {
+ uint32_t reqopts; /* options which are required to perform the operation */
+ uint32_t states[NS_OPER_STATES]; /* operation's states */
+} ops[NS_OPER_NUM] = {
+ /* Read page + OOB from the beginning */
+ {OPT_SMALLPAGE, {STATE_CMD_READ0 | ACTION_ZEROOFF, STATE_ADDR_PAGE | ACTION_CPY,
+ STATE_DATAOUT, STATE_READY}},
+ /* Read page + OOB from the second half */
+ {OPT_PAGE512_8BIT, {STATE_CMD_READ1 | ACTION_HALFOFF, STATE_ADDR_PAGE | ACTION_CPY,
+ STATE_DATAOUT, STATE_READY}},
+ /* Read OOB */
+ {OPT_SMALLPAGE, {STATE_CMD_READOOB | ACTION_OOBOFF, STATE_ADDR_PAGE | ACTION_CPY,
+ STATE_DATAOUT, STATE_READY}},
+ /* Programm page starting from the beginning */
+ {OPT_ANY, {STATE_CMD_SEQIN, STATE_ADDR_PAGE, STATE_DATAIN,
+ STATE_CMD_PAGEPROG | ACTION_PRGPAGE, STATE_READY}},
+ /* Programm page starting from the beginning */
+ {OPT_SMALLPAGE, {STATE_CMD_READ0, STATE_CMD_SEQIN | ACTION_ZEROOFF, STATE_ADDR_PAGE,
+ STATE_DATAIN, STATE_CMD_PAGEPROG | ACTION_PRGPAGE, STATE_READY}},
+ /* Programm page starting from the second half */
+ {OPT_PAGE512, {STATE_CMD_READ1, STATE_CMD_SEQIN | ACTION_HALFOFF, STATE_ADDR_PAGE,
+ STATE_DATAIN, STATE_CMD_PAGEPROG | ACTION_PRGPAGE, STATE_READY}},
+ /* Programm OOB */
+ {OPT_SMALLPAGE, {STATE_CMD_READOOB, STATE_CMD_SEQIN | ACTION_OOBOFF, STATE_ADDR_PAGE,
+ STATE_DATAIN, STATE_CMD_PAGEPROG | ACTION_PRGPAGE, STATE_READY}},
+ /* Erase sector */
+ {OPT_ANY, {STATE_CMD_ERASE1, STATE_ADDR_SEC, STATE_CMD_ERASE2 | ACTION_SECERASE, STATE_READY}},
+ /* Read status */
+ {OPT_ANY, {STATE_CMD_STATUS, STATE_DATAOUT_STATUS, STATE_READY}},
+ /* Read multi-plane status */
+ {OPT_SMARTMEDIA, {STATE_CMD_STATUS_M, STATE_DATAOUT_STATUS_M, STATE_READY}},
+ /* Read ID */
+ {OPT_ANY, {STATE_CMD_READID, STATE_ADDR_ZERO, STATE_DATAOUT_ID, STATE_READY}},
+ /* Large page devices read page */
+ {OPT_LARGEPAGE, {STATE_CMD_READ0, STATE_ADDR_PAGE, STATE_CMD_READSTART | ACTION_CPY,
+ STATE_DATAOUT, STATE_READY}}
+};
+
+/* MTD structure for NAND controller */
+static struct mtd_info *nsmtd;
+
+static u_char ns_verify_buf[NS_LARGEST_PAGE_SIZE];
+
+/*
+ * Initialize the nandsim structure.
+ *
+ * RETURNS: 0 if success, -ERRNO if failure.
+ */
+static int
+init_nandsim(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = (struct nand_chip *)mtd->priv;
+ struct nandsim *ns = (struct nandsim *)(chip->priv);
+ int i;
+
+ if (NS_IS_INITIALIZED(ns)) {
+ NS_ERR("init_nandsim: nandsim is already initialized\n");
+ return -EIO;
+ }
+
+ /* Force mtd to not do delays */
+ chip->chip_delay = 0;
+
+ /* Initialize the NAND flash parameters */
+ ns->busw = chip->options & NAND_BUSWIDTH_16 ? 16 : 8;
+ ns->geom.totsz = mtd->size;
+ ns->geom.pgsz = mtd->oobblock;
+ ns->geom.oobsz = mtd->oobsize;
+ ns->geom.secsz = mtd->erasesize;
+ ns->geom.pgszoob = ns->geom.pgsz + ns->geom.oobsz;
+ ns->geom.pgnum = ns->geom.totsz / ns->geom.pgsz;
+ ns->geom.totszoob = ns->geom.totsz + ns->geom.pgnum * ns->geom.oobsz;
+ ns->geom.secshift = ffs(ns->geom.secsz) - 1;
+ ns->geom.pgshift = chip->page_shift;
+ ns->geom.oobshift = ffs(ns->geom.oobsz) - 1;
+ ns->geom.pgsec = ns->geom.secsz / ns->geom.pgsz;
+ ns->geom.secszoob = ns->geom.secsz + ns->geom.oobsz * ns->geom.pgsec;
+ ns->options = 0;
+
+ if (ns->geom.pgsz == 256) {
+ ns->options |= OPT_PAGE256;
+ }
+ else if (ns->geom.pgsz == 512) {
+ ns->options |= (OPT_PAGE512 | OPT_AUTOINCR);
+ if (ns->busw == 8)
+ ns->options |= OPT_PAGE512_8BIT;
+ } else if (ns->geom.pgsz == 2048) {
+ ns->options |= OPT_PAGE2048;
+ } else {
+ NS_ERR("init_nandsim: unknown page size %u\n", ns->geom.pgsz);
+ return -EIO;
+ }
+
+ if (ns->options & OPT_SMALLPAGE) {
+ if (ns->geom.totsz < (64 << 20)) {
+ ns->geom.pgaddrbytes = 3;
+ ns->geom.secaddrbytes = 2;
+ } else {
+ ns->geom.pgaddrbytes = 4;
+ ns->geom.secaddrbytes = 3;
+ }
+ } else {
+ if (ns->geom.totsz <= (128 << 20)) {
+ ns->geom.pgaddrbytes = 4;
+ ns->geom.secaddrbytes = 2;
+ } else {
+ ns->geom.pgaddrbytes = 5;
+ ns->geom.secaddrbytes = 3;
+ }
+ }
+
+ /* Detect how many ID bytes the NAND chip outputs */
+ for (i = 0; nand_flash_ids[i].name != NULL; i++) {
+ if (second_id_byte != nand_flash_ids[i].id)
+ continue;
+ if (!(nand_flash_ids[i].options & NAND_NO_AUTOINCR))
+ ns->options |= OPT_AUTOINCR;
+ }
+
+ if (ns->busw == 16)
+ NS_WARN("16-bit flashes support wasn't tested\n");
+
+ printk("flash size: %u MiB\n", ns->geom.totsz >> 20);
+ printk("page size: %u bytes\n", ns->geom.pgsz);
+ printk("OOB area size: %u bytes\n", ns->geom.oobsz);
+ printk("sector size: %u KiB\n", ns->geom.secsz >> 10);
+ printk("pages number: %u\n", ns->geom.pgnum);
+ printk("pages per sector: %u\n", ns->geom.pgsec);
+ printk("bus width: %u\n", ns->busw);
+ printk("bits in sector size: %u\n", ns->geom.secshift);
+ printk("bits in page size: %u\n", ns->geom.pgshift);
+ printk("bits in OOB size: %u\n", ns->geom.oobshift);
+ printk("flash size with OOB: %u KiB\n", ns->geom.totszoob >> 10);
+ printk("page address bytes: %u\n", ns->geom.pgaddrbytes);
+ printk("sector address bytes: %u\n", ns->geom.secaddrbytes);
+ printk("options: %#x\n", ns->options);
+
+ /* Map / allocate and initialize the flash image */
+#ifdef CONFIG_NS_ABS_POS
+ ns->mem.byte = ioremap(CONFIG_NS_ABS_POS, ns->geom.totszoob);
+ if (!ns->mem.byte) {
+ NS_ERR("init_nandsim: failed to map the NAND flash image at address %p\n",
+ (void *)CONFIG_NS_ABS_POS);
+ return -ENOMEM;
+ }
+#else
+ ns->mem.byte = vmalloc(ns->geom.totszoob);
+ if (!ns->mem.byte) {
+ NS_ERR("init_nandsim: unable to allocate %u bytes for flash image\n",
+ ns->geom.totszoob);
+ return -ENOMEM;
+ }
+ memset(ns->mem.byte, 0xFF, ns->geom.totszoob);
+#endif
+
+ /* Allocate / initialize the internal buffer */
+ ns->buf.byte = kmalloc(ns->geom.pgszoob, GFP_KERNEL);
+ if (!ns->buf.byte) {
+ NS_ERR("init_nandsim: unable to allocate %u bytes for the internal buffer\n",
+ ns->geom.pgszoob);
+ goto error;
+ }
+ memset(ns->buf.byte, 0xFF, ns->geom.pgszoob);
+
+ /* Fill the partition_info structure */
+ ns->part.name = "NAND simulator partition";
+ ns->part.offset = 0;
+ ns->part.size = ns->geom.totsz;
+
+ return 0;
+
+error:
+#ifdef CONFIG_NS_ABS_POS
+ iounmap(ns->mem.byte);
+#else
+ vfree(ns->mem.byte);
+#endif
+
+ return -ENOMEM;
+}
+
+/*
+ * Free the nandsim structure.
+ */
+static void
+free_nandsim(struct nandsim *ns)
+{
+ kfree(ns->buf.byte);
+
+#ifdef CONFIG_NS_ABS_POS
+ iounmap(ns->mem.byte);
+#else
+ vfree(ns->mem.byte);
+#endif
+
+ return;
+}
+
+/*
+ * Returns the string representation of 'state' state.
+ */
+static char *
+get_state_name(uint32_t state)
+{
+ switch (NS_STATE(state)) {
+ case STATE_CMD_READ0:
+ return "STATE_CMD_READ0";
+ case STATE_CMD_READ1:
+ return "STATE_CMD_READ1";
+ case STATE_CMD_PAGEPROG:
+ return "STATE_CMD_PAGEPROG";
+ case STATE_CMD_READOOB:
+ return "STATE_CMD_READOOB";
+ case STATE_CMD_READSTART:
+ return "STATE_CMD_READSTART";
+ case STATE_CMD_ERASE1:
+ return "STATE_CMD_ERASE1";
+ case STATE_CMD_STATUS:
+ return "STATE_CMD_STATUS";
+ case STATE_CMD_STATUS_M:
+ return "STATE_CMD_STATUS_M";
+ case STATE_CMD_SEQIN:
+ return "STATE_CMD_SEQIN";
+ case STATE_CMD_READID:
+ return "STATE_CMD_READID";
+ case STATE_CMD_ERASE2:
+ return "STATE_CMD_ERASE2";
+ case STATE_CMD_RESET:
+ return "STATE_CMD_RESET";
+ case STATE_ADDR_PAGE:
+ return "STATE_ADDR_PAGE";
+ case STATE_ADDR_SEC:
+ return "STATE_ADDR_SEC";
+ case STATE_ADDR_ZERO:
+ return "STATE_ADDR_ZERO";
+ case STATE_DATAIN:
+ return "STATE_DATAIN";
+ case STATE_DATAOUT:
+ return "STATE_DATAOUT";
+ case STATE_DATAOUT_ID:
+ return "STATE_DATAOUT_ID";
+ case STATE_DATAOUT_STATUS:
+ return "STATE_DATAOUT_STATUS";
+ case STATE_DATAOUT_STATUS_M:
+ return "STATE_DATAOUT_STATUS_M";
+ case STATE_READY:
+ return "STATE_READY";
+ case STATE_UNKNOWN:
+ return "STATE_UNKNOWN";
+ }
+
+ NS_ERR("get_state_name: unknown state, BUG\n");
+ return NULL;
+}
+
+/*
+ * Check if command is valid.
+ *
+ * RETURNS: 1 if wrong command, 0 if right.
+ */
+static int
+check_command(int cmd)
+{
+ switch (cmd) {
+
+ case NAND_CMD_READ0:
+ case NAND_CMD_READSTART:
+ case NAND_CMD_PAGEPROG:
+ case NAND_CMD_READOOB:
+ case NAND_CMD_ERASE1:
+ case NAND_CMD_STATUS:
+ case NAND_CMD_SEQIN:
+ case NAND_CMD_READID:
+ case NAND_CMD_ERASE2:
+ case NAND_CMD_RESET:
+ case NAND_CMD_READ1:
+ return 0;
+
+ case NAND_CMD_STATUS_MULTI:
+ default:
+ return 1;
+ }
+}
+
+/*
+ * Returns state after command is accepted by command number.
+ */
+static uint32_t
+get_state_by_command(unsigned command)
+{
+ switch (command) {
+ case NAND_CMD_READ0:
+ return STATE_CMD_READ0;
+ case NAND_CMD_READ1:
+ return STATE_CMD_READ1;
+ case NAND_CMD_PAGEPROG:
+ return STATE_CMD_PAGEPROG;
+ case NAND_CMD_READSTART:
+ return STATE_CMD_READSTART;
+ case NAND_CMD_READOOB:
+ return STATE_CMD_READOOB;
+ case NAND_CMD_ERASE1:
+ return STATE_CMD_ERASE1;
+ case NAND_CMD_STATUS:
+ return STATE_CMD_STATUS;
+ case NAND_CMD_STATUS_MULTI:
+ return STATE_CMD_STATUS_M;
+ case NAND_CMD_SEQIN:
+ return STATE_CMD_SEQIN;
+ case NAND_CMD_READID:
+ return STATE_CMD_READID;
+ case NAND_CMD_ERASE2:
+ return STATE_CMD_ERASE2;
+ case NAND_CMD_RESET:
+ return STATE_CMD_RESET;
+ }
+
+ NS_ERR("get_state_by_command: unknown command, BUG\n");
+ return 0;
+}
+
+/*
+ * Move an address byte to the correspondent internal register.
+ */
+static inline void
+accept_addr_byte(struct nandsim *ns, u_char bt)
+{
+ uint byte = (uint)bt;
+
+ if (ns->regs.count < (ns->geom.pgaddrbytes - ns->geom.secaddrbytes))
+ ns->regs.column |= (byte << 8 * ns->regs.count);
+ else {
+ ns->regs.row |= (byte << 8 * (ns->regs.count -
+ ns->geom.pgaddrbytes +
+ ns->geom.secaddrbytes));
+ }
+
+ return;
+}
+
+/*
+ * Switch to STATE_READY state.
+ */
+static inline void
+switch_to_ready_state(struct nandsim *ns, u_char status)
+{
+ NS_DBG("switch_to_ready_state: switch to %s state\n", get_state_name(STATE_READY));
+
+ ns->state = STATE_READY;
+ ns->nxstate = STATE_UNKNOWN;
+ ns->op = NULL;
+ ns->npstates = 0;
+ ns->stateidx = 0;
+ ns->regs.num = 0;
+ ns->regs.count = 0;
+ ns->regs.off = 0;
+ ns->regs.row = 0;
+ ns->regs.column = 0;
+ ns->regs.status = status;
+}
+
+/*
+ * If the operation isn't known yet, try to find it in the global array
+ * of supported operations.
+ *
+ * Operation can be unknown because of the following.
+ * 1. New command was accepted and this is the firs call to find the
+ * correspondent states chain. In this case ns->npstates = 0;
+ * 2. There is several operations which begin with the same command(s)
+ * (for example program from the second half and read from the
+ * second half operations both begin with the READ1 command). In this
+ * case the ns->pstates[] array contains previous states.
+ *
+ * Thus, the function tries to find operation containing the following
+ * states (if the 'flag' parameter is 0):
+ * ns->pstates[0], ... ns->pstates[ns->npstates], ns->state
+ *
+ * If (one and only one) matching operation is found, it is accepted (
+ * ns->ops, ns->state, ns->nxstate are initialized, ns->npstate is
+ * zeroed).
+ *
+ * If there are several maches, the current state is pushed to the
+ * ns->pstates.
+ *
+ * The operation can be unknown only while commands are input to the chip.
+ * As soon as address command is accepted, the operation must be known.
+ * In such situation the function is called with 'flag' != 0, and the
+ * operation is searched using the following pattern:
+ * ns->pstates[0], ... ns->pstates[ns->npstates], <address input>
+ *
+ * It is supposed that this pattern must either match one operation on
+ * none. There can't be ambiguity in that case.
+ *
+ * If no matches found, the functions does the following:
+ * 1. if there are saved states present, try to ignore them and search
+ * again only using the last command. If nothing was found, switch
+ * to the STATE_READY state.
+ * 2. if there are no saved states, switch to the STATE_READY state.
+ *
+ * RETURNS: -2 - no matched operations found.
+ * -1 - several matches.
+ * 0 - operation is found.
+ */
+static int
+find_operation(struct nandsim *ns, uint32_t flag)
+{
+ int opsfound = 0;
+ int i, j, idx = 0;
+
+ for (i = 0; i < NS_OPER_NUM; i++) {
+
+ int found = 1;
+
+ if (!(ns->options & ops[i].reqopts))
+ /* Ignore operations we can't perform */
+ continue;
+
+ if (flag) {
+ if (!(ops[i].states[ns->npstates] & STATE_ADDR_MASK))
+ continue;
+ } else {
+ if (NS_STATE(ns->state) != NS_STATE(ops[i].states[ns->npstates]))
+ continue;
+ }
+
+ for (j = 0; j < ns->npstates; j++)
+ if (NS_STATE(ops[i].states[j]) != NS_STATE(ns->pstates[j])
+ && (ns->options & ops[idx].reqopts)) {
+ found = 0;
+ break;
+ }
+
+ if (found) {
+ idx = i;
+ opsfound += 1;
+ }
+ }
+
+ if (opsfound == 1) {
+ /* Exact match */
+ ns->op = &ops[idx].states[0];
+ if (flag) {
+ /*
+ * In this case the find_operation function was
+ * called when address has just began input. But it isn't
+ * yet fully input and the current state must
+ * not be one of STATE_ADDR_*, but the STATE_ADDR_*
+ * state must be the next state (ns->nxstate).
+ */
+ ns->stateidx = ns->npstates - 1;
+ } else {
+ ns->stateidx = ns->npstates;
+ }
+ ns->npstates = 0;
+ ns->state = ns->op[ns->stateidx];
+ ns->nxstate = ns->op[ns->stateidx + 1];
+ NS_DBG("find_operation: operation found, index: %d, state: %s, nxstate %s\n",
+ idx, get_state_name(ns->state), get_state_name(ns->nxstate));
+ return 0;
+ }
+
+ if (opsfound == 0) {
+ /* Nothing was found. Try to ignore previous commands (if any) and search again */
+ if (ns->npstates != 0) {
+ NS_DBG("find_operation: no operation found, try again with state %s\n",
+ get_state_name(ns->state));
+ ns->npstates = 0;
+ return find_operation(ns, 0);
+
+ }
+ NS_DBG("find_operation: no operations found\n");
+ switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
+ return -2;
+ }
+
+ if (flag) {
+ /* This shouldn't happen */
+ NS_DBG("find_operation: BUG, operation must be known if address is input\n");
+ return -2;
+ }
+
+ NS_DBG("find_operation: there is still ambiguity\n");
+
+ ns->pstates[ns->npstates++] = ns->state;
+
+ return -1;
+}
+
+/*
+ * If state has any action bit, perform this action.
+ *
+ * RETURNS: 0 if success, -1 if error.
+ */
+static int
+do_state_action(struct nandsim *ns, uint32_t action)
+{
+ int i, num;
+ int busdiv = ns->busw == 8 ? 1 : 2;
+
+ action &= ACTION_MASK;
+
+ /* Check that page address input is correct */
+ if (action != ACTION_SECERASE && ns->regs.row >= ns->geom.pgnum) {
+ NS_WARN("do_state_action: wrong page number (%#x)\n", ns->regs.row);
+ return -1;
+ }
+
+ switch (action) {
+
+ case ACTION_CPY:
+ /*
+ * Copy page data to the internal buffer.
+ */
+
+ /* Column shouldn't be very large */
+ if (ns->regs.column >= (ns->geom.pgszoob - ns->regs.off)) {
+ NS_ERR("do_state_action: column number is too large\n");
+ break;
+ }
+ num = ns->geom.pgszoob - ns->regs.off - ns->regs.column;
+ memcpy(ns->buf.byte, ns->mem.byte + NS_RAW_OFFSET(ns) + ns->regs.off, num);
+
+ NS_DBG("do_state_action: (ACTION_CPY:) copy %d bytes to int buf, raw offset %d\n",
+ num, NS_RAW_OFFSET(ns) + ns->regs.off);
+
+ if (ns->regs.off == 0)
+ NS_LOG("read page %d\n", ns->regs.row);
+ else if (ns->regs.off < ns->geom.pgsz)
+ NS_LOG("read page %d (second half)\n", ns->regs.row);
+ else
+ NS_LOG("read OOB of page %d\n", ns->regs.row);
+
+ NS_UDELAY(access_delay);
+ NS_UDELAY(input_cycle * ns->geom.pgsz / 1000 / busdiv);
+
+ break;
+
+ case ACTION_SECERASE:
+ /*
+ * Erase sector.
+ */
+
+ if (ns->lines.wp) {
+ NS_ERR("do_state_action: device is write-protected, ignore sector erase\n");
+ return -1;
+ }
+
+ if (ns->regs.row >= ns->geom.pgnum - ns->geom.pgsec
+ || (ns->regs.row & ~(ns->geom.secsz - 1))) {
+ NS_ERR("do_state_action: wrong sector address (%#x)\n", ns->regs.row);
+ return -1;
+ }
+
+ ns->regs.row = (ns->regs.row <<
+ 8 * (ns->geom.pgaddrbytes - ns->geom.secaddrbytes)) | ns->regs.column;
+ ns->regs.column = 0;
+
+ NS_DBG("do_state_action: erase sector at address %#x, off = %d\n",
+ ns->regs.row, NS_RAW_OFFSET(ns));
+ NS_LOG("erase sector %d\n", ns->regs.row >> (ns->geom.secshift - ns->geom.pgshift));
+
+ memset(ns->mem.byte + NS_RAW_OFFSET(ns), 0xFF, ns->geom.secszoob);
+
+ NS_MDELAY(erase_delay);
+
+ break;
+
+ case ACTION_PRGPAGE:
+ /*
+ * Programm page - move internal buffer data to the page.
+ */
+
+ if (ns->lines.wp) {
+ NS_WARN("do_state_action: device is write-protected, programm\n");
+ return -1;
+ }
+
+ num = ns->geom.pgszoob - ns->regs.off - ns->regs.column;
+ if (num != ns->regs.count) {
+ NS_ERR("do_state_action: too few bytes were input (%d instead of %d)\n",
+ ns->regs.count, num);
+ return -1;
+ }
+
+ for (i = 0; i < num; i++)
+ ns->mem.byte[NS_RAW_OFFSET(ns) + ns->regs.off + i] &= ns->buf.byte[i];
+
+ NS_DBG("do_state_action: copy %d bytes from int buf to (%#x, %#x), raw off = %d\n",
+ num, ns->regs.row, ns->regs.column, NS_RAW_OFFSET(ns) + ns->regs.off);
+ NS_LOG("programm page %d\n", ns->regs.row);
+
+ NS_UDELAY(programm_delay);
+ NS_UDELAY(output_cycle * ns->geom.pgsz / 1000 / busdiv);
+
+ break;
+
+ case ACTION_ZEROOFF:
+ NS_DBG("do_state_action: set internal offset to 0\n");
+ ns->regs.off = 0;
+ break;
+
+ case ACTION_HALFOFF:
+ if (!(ns->options & OPT_PAGE512_8BIT)) {
+ NS_ERR("do_state_action: BUG! can't skip half of page for non-512"
+ "byte page size 8x chips\n");
+ return -1;
+ }
+ NS_DBG("do_state_action: set internal offset to %d\n", ns->geom.pgsz/2);
+ ns->regs.off = ns->geom.pgsz/2;
+ break;
+
+ case ACTION_OOBOFF:
+ NS_DBG("do_state_action: set internal offset to %d\n", ns->geom.pgsz);
+ ns->regs.off = ns->geom.pgsz;
+ break;
+
+ default:
+ NS_DBG("do_state_action: BUG! unknown action\n");
+ }
+
+ return 0;
+}
+
+/*
+ * Switch simulator's state.
+ */
+static void
+switch_state(struct nandsim *ns)
+{
+ if (ns->op) {
+ /*
+ * The current operation have already been identified.
+ * Just follow the states chain.
+ */
+
+ ns->stateidx += 1;
+ ns->state = ns->nxstate;
+ ns->nxstate = ns->op[ns->stateidx + 1];
+
+ NS_DBG("switch_state: operation is known, switch to the next state, "
+ "state: %s, nxstate: %s\n",
+ get_state_name(ns->state), get_state_name(ns->nxstate));
+
+ /* See, whether we need to do some action */
+ if ((ns->state & ACTION_MASK) && do_state_action(ns, ns->state) < 0) {
+ switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
+ return;
+ }
+
+ } else {
+ /*
+ * We don't yet know which operation we perform.
+ * Try to identify it.
+ */
+
+ /*
+ * The only event causing the switch_state function to
+ * be called with yet unknown operation is new command.
+ */
+ ns->state = get_state_by_command(ns->regs.command);
+
+ NS_DBG("switch_state: operation is unknown, try to find it\n");
+
+ if (find_operation(ns, 0) != 0)
+ return;
+
+ if ((ns->state & ACTION_MASK) && do_state_action(ns, ns->state) < 0) {
+ switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
+ return;
+ }
+ }
+
+ /* For 16x devices column means the page offset in words */
+ if ((ns->nxstate & STATE_ADDR_MASK) && ns->busw == 16) {
+ NS_DBG("switch_state: double the column number for 16x device\n");
+ ns->regs.column <<= 1;
+ }
+
+ if (NS_STATE(ns->nxstate) == STATE_READY) {
+ /*
+ * The current state is the last. Return to STATE_READY
+ */
+
+ u_char status = NS_STATUS_OK(ns);
+
+ /* In case of data states, see if all bytes were input/output */
+ if ((ns->state & (STATE_DATAIN_MASK | STATE_DATAOUT_MASK))
+ && ns->regs.count != ns->regs.num) {
+ NS_WARN("switch_state: not all bytes were processed, %d left\n",
+ ns->regs.num - ns->regs.count);
+ status = NS_STATUS_FAILED(ns);
+ }
+
+ NS_DBG("switch_state: operation complete, switch to STATE_READY state\n");
+
+ switch_to_ready_state(ns, status);
+
+ return;
+ } else if (ns->nxstate & (STATE_DATAIN_MASK | STATE_DATAOUT_MASK)) {
+ /*
+ * If the next state is data input/output, switch to it now
+ */
+
+ ns->state = ns->nxstate;
+ ns->nxstate = ns->op[++ns->stateidx + 1];
+ ns->regs.num = ns->regs.count = 0;
+
+ NS_DBG("switch_state: the next state is data I/O, switch, "
+ "state: %s, nxstate: %s\n",
+ get_state_name(ns->state), get_state_name(ns->nxstate));
+
+ /*
+ * Set the internal register to the count of bytes which
+ * are expected to be input or output
+ */
+ switch (NS_STATE(ns->state)) {
+ case STATE_DATAIN:
+ case STATE_DATAOUT:
+ ns->regs.num = ns->geom.pgszoob - ns->regs.off - ns->regs.column;
+ break;
+
+ case STATE_DATAOUT_ID:
+ ns->regs.num = ns->geom.idbytes;
+ break;
+
+ case STATE_DATAOUT_STATUS:
+ case STATE_DATAOUT_STATUS_M:
+ ns->regs.count = ns->regs.num = 0;
+ break;
+
+ default:
+ NS_ERR("switch_state: BUG! unknown data state\n");
+ }
+
+ } else if (ns->nxstate & STATE_ADDR_MASK) {
+ /*
+ * If the next state is address input, set the internal
+ * register to the number of expected address bytes
+ */
+
+ ns->regs.count = 0;
+
+ switch (NS_STATE(ns->nxstate)) {
+ case STATE_ADDR_PAGE:
+ ns->regs.num = ns->geom.pgaddrbytes;
+
+ break;
+ case STATE_ADDR_SEC:
+ ns->regs.num = ns->geom.secaddrbytes;
+ break;
+
+ case STATE_ADDR_ZERO:
+ ns->regs.num = 1;
+ break;
+
+ default:
+ NS_ERR("switch_state: BUG! unknown address state\n");
+ }
+ } else {
+ /*
+ * Just reset internal counters.
+ */
+
+ ns->regs.num = 0;
+ ns->regs.count = 0;
+ }
+}
+
+static void
+ns_hwcontrol(struct mtd_info *mtd, int cmd)
+{
+ struct nandsim *ns = (struct nandsim *)((struct nand_chip *)mtd->priv)->priv;
+
+ switch (cmd) {
+
+ /* set CLE line high */
+ case NAND_CTL_SETCLE:
+ NS_DBG("ns_hwcontrol: start command latch cycles\n");
+ ns->lines.cle = 1;
+ break;
+
+ /* set CLE line low */
+ case NAND_CTL_CLRCLE:
+ NS_DBG("ns_hwcontrol: stop command latch cycles\n");
+ ns->lines.cle = 0;
+ break;
+
+ /* set ALE line high */
+ case NAND_CTL_SETALE:
+ NS_DBG("ns_hwcontrol: start address latch cycles\n");
+ ns->lines.ale = 1;
+ break;
+
+ /* set ALE line low */
+ case NAND_CTL_CLRALE:
+ NS_DBG("ns_hwcontrol: stop address latch cycles\n");
+ ns->lines.ale = 0;
+ break;
+
+ /* set WP line high */
+ case NAND_CTL_SETWP:
+ NS_DBG("ns_hwcontrol: enable write protection\n");
+ ns->lines.wp = 1;
+ break;
+
+ /* set WP line low */
+ case NAND_CTL_CLRWP:
+ NS_DBG("ns_hwcontrol: disable write protection\n");
+ ns->lines.wp = 0;
+ break;
+
+ /* set CE line low */
+ case NAND_CTL_SETNCE:
+ NS_DBG("ns_hwcontrol: enable chip\n");
+ ns->lines.ce = 1;
+ break;
+
+ /* set CE line high */
+ case NAND_CTL_CLRNCE:
+ NS_DBG("ns_hwcontrol: disable chip\n");
+ ns->lines.ce = 0;
+ break;
+
+ default:
+ NS_ERR("hwcontrol: unknown command\n");
+ }
+
+ return;
+}
+
+static u_char
+ns_nand_read_byte(struct mtd_info *mtd)
+{
+ struct nandsim *ns = (struct nandsim *)((struct nand_chip *)mtd->priv)->priv;
+ u_char outb = 0x00;
+
+ /* Sanity and correctness checks */
+ if (!ns->lines.ce) {
+ NS_ERR("read_byte: chip is disabled, return %#x\n", (uint)outb);
+ return outb;
+ }
+ if (ns->lines.ale || ns->lines.cle) {
+ NS_ERR("read_byte: ALE or CLE pin is high, return %#x\n", (uint)outb);
+ return outb;
+ }
+ if (!(ns->state & STATE_DATAOUT_MASK)) {
+ NS_WARN("read_byte: unexpected data output cycle, state is %s "
+ "return %#x\n", get_state_name(ns->state), (uint)outb);
+ return outb;
+ }
+
+ /* Status register may be read as many times as it is wanted */
+ if (NS_STATE(ns->state) == STATE_DATAOUT_STATUS) {
+ NS_DBG("read_byte: return %#x status\n", ns->regs.status);
+ return ns->regs.status;
+ }
+
+ /* Check if there is any data in the internal buffer which may be read */
+ if (ns->regs.count == ns->regs.num) {
+ NS_WARN("read_byte: no more data to output, return %#x\n", (uint)outb);
+ return outb;
+ }
+
+ switch (NS_STATE(ns->state)) {
+ case STATE_DATAOUT:
+ if (ns->busw == 8) {
+ outb = ns->buf.byte[ns->regs.count];
+ ns->regs.count += 1;
+ } else {
+ outb = (u_char)cpu_to_le16(ns->buf.word[ns->regs.count >> 1]);
+ ns->regs.count += 2;
+ }
+ break;
+ case STATE_DATAOUT_ID:
+ NS_DBG("read_byte: read ID byte %d, total = %d\n", ns->regs.count, ns->regs.num);
+ outb = ns->ids[ns->regs.count];
+ ns->regs.count += 1;
+ break;
+ default:
+ BUG();
+ }
+
+ if (ns->regs.count == ns->regs.num) {
+ NS_DBG("read_byte: all bytes were read\n");
+
+ /*
+ * The OPT_AUTOINCR allows to read next conseqitive pages without
+ * new read operation cycle.
+ */
+ if ((ns->options & OPT_AUTOINCR) && NS_STATE(ns->state) == STATE_DATAOUT) {
+ ns->regs.count = 0;
+ if (ns->regs.row + 1 < ns->geom.pgnum)
+ ns->regs.row += 1;
+ NS_DBG("read_byte: switch to the next page (%#x)\n", ns->regs.row);
+ do_state_action(ns, ACTION_CPY);
+ }
+ else if (NS_STATE(ns->nxstate) == STATE_READY)
+ switch_state(ns);
+
+ }
+
+ return outb;
+}
+
+static void
+ns_nand_write_byte(struct mtd_info *mtd, u_char byte)
+{
+ struct nandsim *ns = (struct nandsim *)((struct nand_chip *)mtd->priv)->priv;
+
+ /* Sanity and correctness checks */
+ if (!ns->lines.ce) {
+ NS_ERR("write_byte: chip is disabled, ignore write\n");
+ return;
+ }
+ if (ns->lines.ale && ns->lines.cle) {
+ NS_ERR("write_byte: ALE and CLE pins are high simultaneously, ignore write\n");
+ return;
+ }
+
+ if (ns->lines.cle == 1) {
+ /*
+ * The byte written is a command.
+ */
+
+ if (byte == NAND_CMD_RESET) {
+ NS_LOG("reset chip\n");
+ switch_to_ready_state(ns, NS_STATUS_OK(ns));
+ return;
+ }
+
+ /*
+ * Chip might still be in STATE_DATAOUT
+ * (if OPT_AUTOINCR feature is supported), STATE_DATAOUT_STATUS or
+ * STATE_DATAOUT_STATUS_M state. If so, switch state.
+ */
+ if (NS_STATE(ns->state) == STATE_DATAOUT_STATUS
+ || NS_STATE(ns->state) == STATE_DATAOUT_STATUS_M
+ || ((ns->options & OPT_AUTOINCR) && NS_STATE(ns->state) == STATE_DATAOUT))
+ switch_state(ns);
+
+ /* Check if chip is expecting command */
+ if (NS_STATE(ns->nxstate) != STATE_UNKNOWN && !(ns->nxstate & STATE_CMD_MASK)) {
+ /*
+ * We are in situation when something else (not command)
+ * was expected but command was input. In this case ignore
+ * previous command(s)/state(s) and accept the last one.
+ */
+ NS_WARN("write_byte: command (%#x) wasn't expected, expected state is %s, "
+ "ignore previous states\n", (uint)byte, get_state_name(ns->nxstate));
+ switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
+ }
+
+ /* Check that the command byte is correct */
+ if (check_command(byte)) {
+ NS_ERR("write_byte: unknown command %#x\n", (uint)byte);
+ return;
+ }
+
+ NS_DBG("command byte corresponding to %s state accepted\n",
+ get_state_name(get_state_by_command(byte)));
+ ns->regs.command = byte;
+ switch_state(ns);
+
+ } else if (ns->lines.ale == 1) {
+ /*
+ * The byte written is an address.
+ */
+
+ if (NS_STATE(ns->nxstate) == STATE_UNKNOWN) {
+
+ NS_DBG("write_byte: operation isn't known yet, identify it\n");
+
+ if (find_operation(ns, 1) < 0)
+ return;
+
+ if ((ns->state & ACTION_MASK) && do_state_action(ns, ns->state) < 0) {
+ switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
+ return;
+ }
+
+ ns->regs.count = 0;
+ switch (NS_STATE(ns->nxstate)) {
+ case STATE_ADDR_PAGE:
+ ns->regs.num = ns->geom.pgaddrbytes;
+ break;
+ case STATE_ADDR_SEC:
+ ns->regs.num = ns->geom.secaddrbytes;
+ break;
+ case STATE_ADDR_ZERO:
+ ns->regs.num = 1;
+ break;
+ default:
+ BUG();
+ }
+ }
+
+ /* Check that chip is expecting address */
+ if (!(ns->nxstate & STATE_ADDR_MASK)) {
+ NS_ERR("write_byte: address (%#x) isn't expected, expected state is %s, "
+ "switch to STATE_READY\n", (uint)byte, get_state_name(ns->nxstate));
+ switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
+ return;
+ }
+
+ /* Check if this is expected byte */
+ if (ns->regs.count == ns->regs.num) {
+ NS_ERR("write_byte: no more address bytes expected\n");
+ switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
+ return;
+ }
+
+ accept_addr_byte(ns, byte);
+
+ ns->regs.count += 1;
+
+ NS_DBG("write_byte: address byte %#x was accepted (%d bytes input, %d expected)\n",
+ (uint)byte, ns->regs.count, ns->regs.num);
+
+ if (ns->regs.count == ns->regs.num) {
+ NS_DBG("address (%#x, %#x) is accepted\n", ns->regs.row, ns->regs.column);
+ switch_state(ns);
+ }
+
+ } else {
+ /*
+ * The byte written is an input data.
+ */
+
+ /* Check that chip is expecting data input */
+ if (!(ns->state & STATE_DATAIN_MASK)) {
+ NS_ERR("write_byte: data input (%#x) isn't expected, state is %s, "
+ "switch to %s\n", (uint)byte,
+ get_state_name(ns->state), get_state_name(STATE_READY));
+ switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
+ return;
+ }
+
+ /* Check if this is expected byte */
+ if (ns->regs.count == ns->regs.num) {
+ NS_WARN("write_byte: %u input bytes has already been accepted, ignore write\n",
+ ns->regs.num);
+ return;
+ }
+
+ if (ns->busw == 8) {
+ ns->buf.byte[ns->regs.count] = byte;
+ ns->regs.count += 1;
+ } else {
+ ns->buf.word[ns->regs.count >> 1] = cpu_to_le16((uint16_t)byte);
+ ns->regs.count += 2;
+ }
+ }
+
+ return;
+}
+
+static int
+ns_device_ready(struct mtd_info *mtd)
+{
+ NS_DBG("device_ready\n");
+ return 1;
+}
+
+static uint16_t
+ns_nand_read_word(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = (struct nand_chip *)mtd->priv;
+
+ NS_DBG("read_word\n");
+
+ return chip->read_byte(mtd) | (chip->read_byte(mtd) << 8);
+}
+
+static void
+ns_nand_write_word(struct mtd_info *mtd, uint16_t word)
+{
+ struct nand_chip *chip = (struct nand_chip *)mtd->priv;
+
+ NS_DBG("write_word\n");
+
+ chip->write_byte(mtd, word & 0xFF);
+ chip->write_byte(mtd, word >> 8);
+}
+
+static void
+ns_nand_write_buf(struct mtd_info *mtd, const u_char *buf, int len)
+{
+ struct nandsim *ns = (struct nandsim *)((struct nand_chip *)mtd->priv)->priv;
+
+ /* Check that chip is expecting data input */
+ if (!(ns->state & STATE_DATAIN_MASK)) {
+ NS_ERR("write_buf: data input isn't expected, state is %s, "
+ "switch to STATE_READY\n", get_state_name(ns->state));
+ switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
+ return;
+ }
+
+ /* Check if these are expected bytes */
+ if (ns->regs.count + len > ns->regs.num) {
+ NS_ERR("write_buf: too many input bytes\n");
+ switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
+ return;
+ }
+
+ memcpy(ns->buf.byte + ns->regs.count, buf, len);
+ ns->regs.count += len;
+
+ if (ns->regs.count == ns->regs.num) {
+ NS_DBG("write_buf: %d bytes were written\n", ns->regs.count);
+ }
+}
+
+static void
+ns_nand_read_buf(struct mtd_info *mtd, u_char *buf, int len)
+{
+ struct nandsim *ns = (struct nandsim *)((struct nand_chip *)mtd->priv)->priv;
+
+ /* Sanity and correctness checks */
+ if (!ns->lines.ce) {
+ NS_ERR("read_buf: chip is disabled\n");
+ return;
+ }
+ if (ns->lines.ale || ns->lines.cle) {
+ NS_ERR("read_buf: ALE or CLE pin is high\n");
+ return;
+ }
+ if (!(ns->state & STATE_DATAOUT_MASK)) {
+ NS_WARN("read_buf: unexpected data output cycle, current state is %s\n",
+ get_state_name(ns->state));
+ return;
+ }
+
+ if (NS_STATE(ns->state) != STATE_DATAOUT) {
+ int i;
+
+ for (i = 0; i < len; i++)
+ buf[i] = ((struct nand_chip *)mtd->priv)->read_byte(mtd);
+
+ return;
+ }
+
+ /* Check if these are expected bytes */
+ if (ns->regs.count + len > ns->regs.num) {
+ NS_ERR("read_buf: too many bytes to read\n");
+ switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
+ return;
+ }
+
+ memcpy(buf, ns->buf.byte + ns->regs.count, len);
+ ns->regs.count += len;
+
+ if (ns->regs.count == ns->regs.num) {
+ if ((ns->options & OPT_AUTOINCR) && NS_STATE(ns->state) == STATE_DATAOUT) {
+ ns->regs.count = 0;
+ if (ns->regs.row + 1 < ns->geom.pgnum)
+ ns->regs.row += 1;
+ NS_DBG("read_buf: switch to the next page (%#x)\n", ns->regs.row);
+ do_state_action(ns, ACTION_CPY);
+ }
+ else if (NS_STATE(ns->nxstate) == STATE_READY)
+ switch_state(ns);
+ }
+
+ return;
+}
+
+static int
+ns_nand_verify_buf(struct mtd_info *mtd, const u_char *buf, int len)
+{
+ ns_nand_read_buf(mtd, (u_char *)&ns_verify_buf[0], len);
+
+ if (!memcmp(buf, &ns_verify_buf[0], len)) {
+ NS_DBG("verify_buf: the buffer is OK\n");
+ return 0;
+ } else {
+ NS_DBG("verify_buf: the buffer is wrong\n");
+ return -EFAULT;
+ }
+}
+
+/*
+ * Module initialization function
+ */
+static int __init ns_init_module(void)
+{
+ struct nand_chip *chip;
+ struct nandsim *nand;
+ int retval = -ENOMEM;
+
+ if (bus_width != 8 && bus_width != 16) {
+ NS_ERR("wrong bus width (%d), use only 8 or 16\n", bus_width);
+ return -EINVAL;
+ }
+
+ /* Allocate and initialize mtd_info, nand_chip and nandsim structures */
+ nsmtd = kmalloc(sizeof(struct mtd_info) + sizeof(struct nand_chip)
+ + sizeof(struct nandsim), GFP_KERNEL);
+ if (!nsmtd) {
+ NS_ERR("unable to allocate core structures.\n");
+ return -ENOMEM;
+ }
+ memset(nsmtd, 0, sizeof(struct mtd_info) + sizeof(struct nand_chip) +
+ sizeof(struct nandsim));
+ chip = (struct nand_chip *)(nsmtd + 1);
+ nsmtd->priv = (void *)chip;
+ nand = (struct nandsim *)(chip + 1);
+ chip->priv = (void *)nand;
+
+ /*
+ * Register simulator's callbacks.
+ */
+ chip->hwcontrol = ns_hwcontrol;
+ chip->read_byte = ns_nand_read_byte;
+ chip->dev_ready = ns_device_ready;
+ chip->write_byte = ns_nand_write_byte;
+ chip->write_buf = ns_nand_write_buf;
+ chip->read_buf = ns_nand_read_buf;
+ chip->verify_buf = ns_nand_verify_buf;
+ chip->write_word = ns_nand_write_word;
+ chip->read_word = ns_nand_read_word;
+ chip->eccmode = NAND_ECC_SOFT;
+ chip->options |= NAND_SKIP_BBTSCAN;
+
+ /*
+ * Perform minimum nandsim structure initialization to handle
+ * the initial ID read command correctly
+ */
+ if (third_id_byte != 0xFF || fourth_id_byte != 0xFF)
+ nand->geom.idbytes = 4;
+ else
+ nand->geom.idbytes = 2;
+ nand->regs.status = NS_STATUS_OK(nand);
+ nand->nxstate = STATE_UNKNOWN;
+ nand->options |= OPT_PAGE256; /* temporary value */
+ nand->ids[0] = first_id_byte;
+ nand->ids[1] = second_id_byte;
+ nand->ids[2] = third_id_byte;
+ nand->ids[3] = fourth_id_byte;
+ if (bus_width == 16) {
+ nand->busw = 16;
+ chip->options |= NAND_BUSWIDTH_16;
+ }
+
+ if ((retval = nand_scan(nsmtd, 1)) != 0) {
+ NS_ERR("can't register NAND Simulator\n");
+ if (retval > 0)
+ retval = -ENXIO;
+ goto error;
+ }
+
+ if ((retval = init_nandsim(nsmtd)) != 0) {
+ NS_ERR("scan_bbt: can't initialize the nandsim structure\n");
+ goto error;
+ }
+
+ if ((retval = nand_default_bbt(nsmtd)) != 0) {
+ free_nandsim(nand);
+ goto error;
+ }
+
+ /* Register NAND as one big partition */
+ add_mtd_partitions(nsmtd, &nand->part, 1);
+
+ return 0;
+
+error:
+ kfree(nsmtd);
+
+ return retval;
+}
+
+module_init(ns_init_module);
+
+/*
+ * Module clean-up function
+ */
+static void __exit ns_cleanup_module(void)
+{
+ struct nandsim *ns = (struct nandsim *)(((struct nand_chip *)nsmtd->priv)->priv);
+
+ free_nandsim(ns); /* Free nandsim private resources */
+ nand_release(nsmtd); /* Unregisterd drived */
+ kfree(nsmtd); /* Free other structures */
+}
+
+module_exit(ns_cleanup_module);
+
+MODULE_LICENSE ("GPL");
+MODULE_AUTHOR ("Artem B. Bityuckiy");
+MODULE_DESCRIPTION ("The NAND flash simulator");
+
diff --git a/linux-2.4.x/drivers/mtd/nand/ppchameleonevb.c b/linux-2.4.x/drivers/mtd/nand/ppchameleonevb.c
new file mode 100644
index 0000000..91a95f3
--- /dev/null
+++ b/linux-2.4.x/drivers/mtd/nand/ppchameleonevb.c
@@ -0,0 +1,420 @@
+/*
+ * drivers/mtd/nand/ppchameleonevb.c
+ *
+ * Copyright (C) 2003 DAVE Srl (info@wawnet.biz)
+ *
+ * Derived from drivers/mtd/nand/edb7312.c
+ *
+ *
+ * $Id: ppchameleonevb.c,v 1.7 2005/11/07 11:14:31 gleixner Exp $
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Overview:
+ * This is a device driver for the NAND flash devices found on the
+ * PPChameleon/PPChameleonEVB system.
+ * PPChameleon options (autodetected):
+ * - BA model: no NAND
+ * - ME model: 32MB (Samsung K9F5608U0B)
+ * - HI model: 128MB (Samsung K9F1G08UOM)
+ * PPChameleonEVB options:
+ * - 32MB (Samsung K9F5608U0B)
+ */
+
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/partitions.h>
+#include <asm/io.h>
+#include <platforms/PPChameleonEVB.h>
+
+#undef USE_READY_BUSY_PIN
+#define USE_READY_BUSY_PIN
+/* see datasheets (tR) */
+#define NAND_BIG_DELAY_US 25
+#define NAND_SMALL_DELAY_US 10
+
+/* handy sizes */
+#define SZ_4M 0x00400000
+#define NAND_SMALL_SIZE 0x02000000
+#define NAND_MTD_NAME "ppchameleon-nand"
+#define NAND_EVB_MTD_NAME "ppchameleonevb-nand"
+
+/* GPIO pins used to drive NAND chip mounted on processor module */
+#define NAND_nCE_GPIO_PIN (0x80000000 >> 1)
+#define NAND_CLE_GPIO_PIN (0x80000000 >> 2)
+#define NAND_ALE_GPIO_PIN (0x80000000 >> 3)
+#define NAND_RB_GPIO_PIN (0x80000000 >> 4)
+/* GPIO pins used to drive NAND chip mounted on EVB */
+#define NAND_EVB_nCE_GPIO_PIN (0x80000000 >> 14)
+#define NAND_EVB_CLE_GPIO_PIN (0x80000000 >> 15)
+#define NAND_EVB_ALE_GPIO_PIN (0x80000000 >> 16)
+#define NAND_EVB_RB_GPIO_PIN (0x80000000 >> 31)
+
+/*
+ * MTD structure for PPChameleonEVB board
+ */
+static struct mtd_info *ppchameleon_mtd = NULL;
+static struct mtd_info *ppchameleonevb_mtd = NULL;
+
+/*
+ * Module stuff
+ */
+static unsigned long ppchameleon_fio_pbase = CFG_NAND0_PADDR;
+static unsigned long ppchameleonevb_fio_pbase = CFG_NAND1_PADDR;
+
+#ifdef MODULE
+module_param(ppchameleon_fio_pbase, ulong, 0);
+module_param(ppchameleonevb_fio_pbase, ulong, 0);
+#else
+__setup("ppchameleon_fio_pbase=",ppchameleon_fio_pbase);
+__setup("ppchameleonevb_fio_pbase=",ppchameleonevb_fio_pbase);
+#endif
+
+#ifdef CONFIG_MTD_PARTITIONS
+/*
+ * Define static partitions for flash devices
+ */
+static struct mtd_partition partition_info_hi[] = {
+ { name: "PPChameleon HI Nand Flash",
+ offset: 0,
+ size: 128*1024*1024 }
+};
+
+static struct mtd_partition partition_info_me[] = {
+ { name: "PPChameleon ME Nand Flash",
+ offset: 0,
+ size: 32*1024*1024 }
+};
+
+static struct mtd_partition partition_info_evb[] = {
+ { name: "PPChameleonEVB Nand Flash",
+ offset: 0,
+ size: 32*1024*1024 }
+};
+
+#define NUM_PARTITIONS 1
+
+extern int parse_cmdline_partitions(struct mtd_info *master,
+ struct mtd_partition **pparts,
+ const char *mtd_id);
+#endif
+
+
+/*
+ * hardware specific access to control-lines
+ */
+static void ppchameleon_hwcontrol(struct mtd_info *mtdinfo, int cmd)
+{
+ switch(cmd) {
+
+ case NAND_CTL_SETCLE:
+ MACRO_NAND_CTL_SETCLE((unsigned long)CFG_NAND0_PADDR);
+ break;
+ case NAND_CTL_CLRCLE:
+ MACRO_NAND_CTL_CLRCLE((unsigned long)CFG_NAND0_PADDR);
+ break;
+ case NAND_CTL_SETALE:
+ MACRO_NAND_CTL_SETALE((unsigned long)CFG_NAND0_PADDR);
+ break;
+ case NAND_CTL_CLRALE:
+ MACRO_NAND_CTL_CLRALE((unsigned long)CFG_NAND0_PADDR);
+ break;
+ case NAND_CTL_SETNCE:
+ MACRO_NAND_ENABLE_CE((unsigned long)CFG_NAND0_PADDR);
+ break;
+ case NAND_CTL_CLRNCE:
+ MACRO_NAND_DISABLE_CE((unsigned long)CFG_NAND0_PADDR);
+ break;
+ }
+}
+
+static void ppchameleonevb_hwcontrol(struct mtd_info *mtdinfo, int cmd)
+{
+ switch(cmd) {
+
+ case NAND_CTL_SETCLE:
+ MACRO_NAND_CTL_SETCLE((unsigned long)CFG_NAND1_PADDR);
+ break;
+ case NAND_CTL_CLRCLE:
+ MACRO_NAND_CTL_CLRCLE((unsigned long)CFG_NAND1_PADDR);
+ break;
+ case NAND_CTL_SETALE:
+ MACRO_NAND_CTL_SETALE((unsigned long)CFG_NAND1_PADDR);
+ break;
+ case NAND_CTL_CLRALE:
+ MACRO_NAND_CTL_CLRALE((unsigned long)CFG_NAND1_PADDR);
+ break;
+ case NAND_CTL_SETNCE:
+ MACRO_NAND_ENABLE_CE((unsigned long)CFG_NAND1_PADDR);
+ break;
+ case NAND_CTL_CLRNCE:
+ MACRO_NAND_DISABLE_CE((unsigned long)CFG_NAND1_PADDR);
+ break;
+ }
+}
+
+#ifdef USE_READY_BUSY_PIN
+/*
+ * read device ready pin
+ */
+static int ppchameleon_device_ready(struct mtd_info *minfo)
+{
+ if (in_be32((volatile unsigned*)GPIO0_IR) & NAND_RB_GPIO_PIN)
+ return 1;
+ return 0;
+}
+
+static int ppchameleonevb_device_ready(struct mtd_info *minfo)
+{
+ if (in_be32((volatile unsigned*)GPIO0_IR) & NAND_EVB_RB_GPIO_PIN)
+ return 1;
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_MTD_PARTITIONS
+const char *part_probes[] = { "cmdlinepart", NULL };
+const char *part_probes_evb[] = { "cmdlinepart", NULL };
+#endif
+
+/*
+ * Main initialization routine
+ */
+static int __init ppchameleonevb_init (void)
+{
+ struct nand_chip *this;
+ const char *part_type = 0;
+ int mtd_parts_nb = 0;
+ struct mtd_partition *mtd_parts = 0;
+ void __iomem *ppchameleon_fio_base;
+ void __iomem *ppchameleonevb_fio_base;
+
+
+ /*********************************
+ * Processor module NAND (if any) *
+ *********************************/
+ /* Allocate memory for MTD device structure and private data */
+ ppchameleon_mtd = kmalloc(sizeof(struct mtd_info) +
+ sizeof(struct nand_chip), GFP_KERNEL);
+ if (!ppchameleon_mtd) {
+ printk("Unable to allocate PPChameleon NAND MTD device structure.\n");
+ return -ENOMEM;
+ }
+
+ /* map physical address */
+ ppchameleon_fio_base = ioremap(ppchameleon_fio_pbase, SZ_4M);
+ if(!ppchameleon_fio_base) {
+ printk("ioremap PPChameleon NAND flash failed\n");
+ kfree(ppchameleon_mtd);
+ return -EIO;
+ }
+
+ /* Get pointer to private data */
+ this = (struct nand_chip *) (&ppchameleon_mtd[1]);
+
+ /* Initialize structures */
+ memset((char *) ppchameleon_mtd, 0, sizeof(struct mtd_info));
+ memset((char *) this, 0, sizeof(struct nand_chip));
+
+ /* Link the private data with the MTD structure */
+ ppchameleon_mtd->priv = this;
+
+ /* Initialize GPIOs */
+ /* Pin mapping for NAND chip */
+ /*
+ CE GPIO_01
+ CLE GPIO_02
+ ALE GPIO_03
+ R/B GPIO_04
+ */
+ /* output select */
+ out_be32((volatile unsigned*)GPIO0_OSRH, in_be32((volatile unsigned*)GPIO0_OSRH) & 0xC0FFFFFF);
+ /* three-state select */
+ out_be32((volatile unsigned*)GPIO0_TSRH, in_be32((volatile unsigned*)GPIO0_TSRH) & 0xC0FFFFFF);
+ /* enable output driver */
+ out_be32((volatile unsigned*)GPIO0_TCR, in_be32((volatile unsigned*)GPIO0_TCR) | NAND_nCE_GPIO_PIN | NAND_CLE_GPIO_PIN | NAND_ALE_GPIO_PIN);
+#ifdef USE_READY_BUSY_PIN
+ /* three-state select */
+ out_be32((volatile unsigned*)GPIO0_TSRH, in_be32((volatile unsigned*)GPIO0_TSRH) & 0xFF3FFFFF);
+ /* high-impedecence */
+ out_be32((volatile unsigned*)GPIO0_TCR, in_be32((volatile unsigned*)GPIO0_TCR) & (~NAND_RB_GPIO_PIN));
+ /* input select */
+ out_be32((volatile unsigned*)GPIO0_ISR1H, (in_be32((volatile unsigned*)GPIO0_ISR1H) & 0xFF3FFFFF) | 0x00400000);
+#endif
+
+ /* insert callbacks */
+ this->IO_ADDR_R = ppchameleon_fio_base;
+ this->IO_ADDR_W = ppchameleon_fio_base;
+ this->hwcontrol = ppchameleon_hwcontrol;
+#ifdef USE_READY_BUSY_PIN
+ this->dev_ready = ppchameleon_device_ready;
+#endif
+ this->chip_delay = NAND_BIG_DELAY_US;
+ /* ECC mode */
+ this->eccmode = NAND_ECC_SOFT;
+
+ /* Scan to find existence of the device (it could not be mounted) */
+ if (nand_scan (ppchameleon_mtd, 1)) {
+ iounmap((void *)ppchameleon_fio_base);
+ kfree (ppchameleon_mtd);
+ goto nand_evb_init;
+ }
+
+#ifndef USE_READY_BUSY_PIN
+ /* Adjust delay if necessary */
+ if (ppchameleon_mtd->size == NAND_SMALL_SIZE)
+ this->chip_delay = NAND_SMALL_DELAY_US;
+#endif
+
+#ifdef CONFIG_MTD_PARTITIONS
+ ppchameleon_mtd->name = "ppchameleon-nand";
+ mtd_parts_nb = parse_mtd_partitions(ppchameleon_mtd, part_probes, &mtd_parts, 0);
+ if (mtd_parts_nb > 0)
+ part_type = "command line";
+ else
+ mtd_parts_nb = 0;
+#endif
+ if (mtd_parts_nb == 0)
+ {
+ if (ppchameleon_mtd->size == NAND_SMALL_SIZE)
+ mtd_parts = partition_info_me;
+ else
+ mtd_parts = partition_info_hi;
+ mtd_parts_nb = NUM_PARTITIONS;
+ part_type = "static";
+ }
+
+ /* Register the partitions */
+ printk(KERN_NOTICE "Using %s partition definition\n", part_type);
+ add_mtd_partitions(ppchameleon_mtd, mtd_parts, mtd_parts_nb);
+
+nand_evb_init:
+ /****************************
+ * EVB NAND (always present) *
+ ****************************/
+ /* Allocate memory for MTD device structure and private data */
+ ppchameleonevb_mtd = kmalloc(sizeof(struct mtd_info) +
+ sizeof(struct nand_chip), GFP_KERNEL);
+ if (!ppchameleonevb_mtd) {
+ printk("Unable to allocate PPChameleonEVB NAND MTD device structure.\n");
+ return -ENOMEM;
+ }
+
+ /* map physical address */
+ ppchameleonevb_fio_base = ioremap(ppchameleonevb_fio_pbase, SZ_4M);
+ if(!ppchameleonevb_fio_base) {
+ printk("ioremap PPChameleonEVB NAND flash failed\n");
+ kfree(ppchameleonevb_mtd);
+ return -EIO;
+ }
+
+ /* Get pointer to private data */
+ this = (struct nand_chip *) (&ppchameleonevb_mtd[1]);
+
+ /* Initialize structures */
+ memset((char *) ppchameleonevb_mtd, 0, sizeof(struct mtd_info));
+ memset((char *) this, 0, sizeof(struct nand_chip));
+
+ /* Link the private data with the MTD structure */
+ ppchameleonevb_mtd->priv = this;
+
+ /* Initialize GPIOs */
+ /* Pin mapping for NAND chip */
+ /*
+ CE GPIO_14
+ CLE GPIO_15
+ ALE GPIO_16
+ R/B GPIO_31
+ */
+ /* output select */
+ out_be32((volatile unsigned*)GPIO0_OSRH, in_be32((volatile unsigned*)GPIO0_OSRH) & 0xFFFFFFF0);
+ out_be32((volatile unsigned*)GPIO0_OSRL, in_be32((volatile unsigned*)GPIO0_OSRL) & 0x3FFFFFFF);
+ /* three-state select */
+ out_be32((volatile unsigned*)GPIO0_TSRH, in_be32((volatile unsigned*)GPIO0_TSRH) & 0xFFFFFFF0);
+ out_be32((volatile unsigned*)GPIO0_TSRL, in_be32((volatile unsigned*)GPIO0_TSRL) & 0x3FFFFFFF);
+ /* enable output driver */
+ out_be32((volatile unsigned*)GPIO0_TCR, in_be32((volatile unsigned*)GPIO0_TCR) | NAND_EVB_nCE_GPIO_PIN |
+ NAND_EVB_CLE_GPIO_PIN | NAND_EVB_ALE_GPIO_PIN);
+#ifdef USE_READY_BUSY_PIN
+ /* three-state select */
+ out_be32((volatile unsigned*)GPIO0_TSRL, in_be32((volatile unsigned*)GPIO0_TSRL) & 0xFFFFFFFC);
+ /* high-impedecence */
+ out_be32((volatile unsigned*)GPIO0_TCR, in_be32((volatile unsigned*)GPIO0_TCR) & (~NAND_EVB_RB_GPIO_PIN));
+ /* input select */
+ out_be32((volatile unsigned*)GPIO0_ISR1L, (in_be32((volatile unsigned*)GPIO0_ISR1L) & 0xFFFFFFFC) | 0x00000001);
+#endif
+
+ /* insert callbacks */
+ this->IO_ADDR_R = ppchameleonevb_fio_base;
+ this->IO_ADDR_W = ppchameleonevb_fio_base;
+ this->hwcontrol = ppchameleonevb_hwcontrol;
+#ifdef USE_READY_BUSY_PIN
+ this->dev_ready = ppchameleonevb_device_ready;
+#endif
+ this->chip_delay = NAND_SMALL_DELAY_US;
+
+ /* ECC mode */
+ this->eccmode = NAND_ECC_SOFT;
+
+ /* Scan to find existence of the device */
+ if (nand_scan (ppchameleonevb_mtd, 1)) {
+ iounmap((void *)ppchameleonevb_fio_base);
+ kfree (ppchameleonevb_mtd);
+ return -ENXIO;
+ }
+
+#ifdef CONFIG_MTD_PARTITIONS
+ ppchameleonevb_mtd->name = NAND_EVB_MTD_NAME;
+ mtd_parts_nb = parse_mtd_partitions(ppchameleonevb_mtd, part_probes_evb, &mtd_parts, 0);
+ if (mtd_parts_nb > 0)
+ part_type = "command line";
+ else
+ mtd_parts_nb = 0;
+#endif
+ if (mtd_parts_nb == 0)
+ {
+ mtd_parts = partition_info_evb;
+ mtd_parts_nb = NUM_PARTITIONS;
+ part_type = "static";
+ }
+
+ /* Register the partitions */
+ printk(KERN_NOTICE "Using %s partition definition\n", part_type);
+ add_mtd_partitions(ppchameleonevb_mtd, mtd_parts, mtd_parts_nb);
+
+ /* Return happy */
+ return 0;
+}
+module_init(ppchameleonevb_init);
+
+/*
+ * Clean up routine
+ */
+static void __exit ppchameleonevb_cleanup (void)
+{
+ struct nand_chip *this;
+
+ /* Release resources, unregister device(s) */
+ nand_release (ppchameleon_mtd);
+ nand_release (ppchameleonevb_mtd);
+
+ /* Release iomaps */
+ this = (struct nand_chip *) &ppchameleon_mtd[1];
+ iounmap((void *) this->IO_ADDR_R;
+ this = (struct nand_chip *) &ppchameleonevb_mtd[1];
+ iounmap((void *) this->IO_ADDR_R;
+
+ /* Free the MTD device structure */
+ kfree (ppchameleon_mtd);
+ kfree (ppchameleonevb_mtd);
+}
+module_exit(ppchameleonevb_cleanup);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("DAVE Srl <support-ppchameleon@dave-tech.it>");
+MODULE_DESCRIPTION("MTD map driver for DAVE Srl PPChameleonEVB board");
diff --git a/linux-2.4.x/drivers/mtd/nand/rtc_from4.c b/linux-2.4.x/drivers/mtd/nand/rtc_from4.c
new file mode 100644
index 0000000..3a5841c
--- /dev/null
+++ b/linux-2.4.x/drivers/mtd/nand/rtc_from4.c
@@ -0,0 +1,683 @@
+/*
+ * drivers/mtd/nand/rtc_from4.c
+ *
+ * Copyright (C) 2004 Red Hat, Inc.
+ *
+ * Derived from drivers/mtd/nand/spia.c
+ * Copyright (C) 2000 Steven J. Hill (sjhill@realitydiluted.com)
+ *
+ * $Id: rtc_from4.c,v 1.10 2005/11/07 11:14:31 gleixner Exp $
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Overview:
+ * This is a device driver for the AG-AND flash device found on the
+ * Renesas Technology Corp. Flash ROM 4-slot interface board (FROM_BOARD4),
+ * which utilizes the Renesas HN29V1G91T-30 part.
+ * This chip is a 1 GBibit (128MiB x 8 bits) AG-AND flash device.
+ */
+
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/rslib.h>
+#include <linux/module.h>
+#include <linux/mtd/compatmac.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/partitions.h>
+#include <asm/io.h>
+
+/*
+ * MTD structure for Renesas board
+ */
+static struct mtd_info *rtc_from4_mtd = NULL;
+
+#define RTC_FROM4_MAX_CHIPS 2
+
+/* HS77x9 processor register defines */
+#define SH77X9_BCR1 ((volatile unsigned short *)(0xFFFFFF60))
+#define SH77X9_BCR2 ((volatile unsigned short *)(0xFFFFFF62))
+#define SH77X9_WCR1 ((volatile unsigned short *)(0xFFFFFF64))
+#define SH77X9_WCR2 ((volatile unsigned short *)(0xFFFFFF66))
+#define SH77X9_MCR ((volatile unsigned short *)(0xFFFFFF68))
+#define SH77X9_PCR ((volatile unsigned short *)(0xFFFFFF6C))
+#define SH77X9_FRQCR ((volatile unsigned short *)(0xFFFFFF80))
+
+/*
+ * Values specific to the Renesas Technology Corp. FROM_BOARD4 (used with HS77x9 processor)
+ */
+/* Address where flash is mapped */
+#define RTC_FROM4_FIO_BASE 0x14000000
+
+/* CLE and ALE are tied to address lines 5 & 4, respectively */
+#define RTC_FROM4_CLE (1 << 5)
+#define RTC_FROM4_ALE (1 << 4)
+
+/* address lines A24-A22 used for chip selection */
+#define RTC_FROM4_NAND_ADDR_SLOT3 (0x00800000)
+#define RTC_FROM4_NAND_ADDR_SLOT4 (0x00C00000)
+#define RTC_FROM4_NAND_ADDR_FPGA (0x01000000)
+/* mask address lines A24-A22 used for chip selection */
+#define RTC_FROM4_NAND_ADDR_MASK (RTC_FROM4_NAND_ADDR_SLOT3 | RTC_FROM4_NAND_ADDR_SLOT4 | RTC_FROM4_NAND_ADDR_FPGA)
+
+/* FPGA status register for checking device ready (bit zero) */
+#define RTC_FROM4_FPGA_SR (RTC_FROM4_NAND_ADDR_FPGA | 0x00000002)
+#define RTC_FROM4_DEVICE_READY 0x0001
+
+/* FPGA Reed-Solomon ECC Control register */
+
+#define RTC_FROM4_RS_ECC_CTL (RTC_FROM4_NAND_ADDR_FPGA | 0x00000050)
+#define RTC_FROM4_RS_ECC_CTL_CLR (1 << 7)
+#define RTC_FROM4_RS_ECC_CTL_GEN (1 << 6)
+#define RTC_FROM4_RS_ECC_CTL_FD_E (1 << 5)
+
+/* FPGA Reed-Solomon ECC code base */
+#define RTC_FROM4_RS_ECC (RTC_FROM4_NAND_ADDR_FPGA | 0x00000060)
+#define RTC_FROM4_RS_ECCN (RTC_FROM4_NAND_ADDR_FPGA | 0x00000080)
+
+/* FPGA Reed-Solomon ECC check register */
+#define RTC_FROM4_RS_ECC_CHK (RTC_FROM4_NAND_ADDR_FPGA | 0x00000070)
+#define RTC_FROM4_RS_ECC_CHK_ERROR (1 << 7)
+
+#define ERR_STAT_ECC_AVAILABLE 0x20
+
+/* Undefine for software ECC */
+#define RTC_FROM4_HWECC 1
+
+/* Define as 1 for no virtual erase blocks (in JFFS2) */
+#define RTC_FROM4_NO_VIRTBLOCKS 0
+
+/*
+ * Module stuff
+ */
+static void __iomem *rtc_from4_fio_base = (void *)P2SEGADDR(RTC_FROM4_FIO_BASE);
+
+const static struct mtd_partition partition_info[] = {
+ {
+ .name = "Renesas flash partition 1",
+ .offset = 0,
+ .size = MTDPART_SIZ_FULL
+ },
+};
+#define NUM_PARTITIONS 1
+
+/*
+ * hardware specific flash bbt decriptors
+ * Note: this is to allow debugging by disabling
+ * NAND_BBT_CREATE and/or NAND_BBT_WRITE
+ *
+ */
+static uint8_t bbt_pattern[] = {'B', 'b', 't', '0' };
+static uint8_t mirror_pattern[] = {'1', 't', 'b', 'B' };
+
+static struct nand_bbt_descr rtc_from4_bbt_main_descr = {
+ .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
+ | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
+ .offs = 40,
+ .len = 4,
+ .veroffs = 44,
+ .maxblocks = 4,
+ .pattern = bbt_pattern
+};
+
+static struct nand_bbt_descr rtc_from4_bbt_mirror_descr = {
+ .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
+ | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
+ .offs = 40,
+ .len = 4,
+ .veroffs = 44,
+ .maxblocks = 4,
+ .pattern = mirror_pattern
+};
+
+
+
+#ifdef RTC_FROM4_HWECC
+
+/* the Reed Solomon control structure */
+static struct rs_control *rs_decoder;
+
+/*
+ * hardware specific Out Of Band information
+ */
+static struct nand_oobinfo rtc_from4_nand_oobinfo = {
+ .useecc = MTD_NANDECC_AUTOPLACE,
+ .eccbytes = 32,
+ .eccpos = {
+ 0, 1, 2, 3, 4, 5, 6, 7,
+ 8, 9, 10, 11, 12, 13, 14, 15,
+ 16, 17, 18, 19, 20, 21, 22, 23,
+ 24, 25, 26, 27, 28, 29, 30, 31},
+ .oobfree = { {32, 32} }
+};
+
+/* Aargh. I missed the reversed bit order, when I
+ * was talking to Renesas about the FPGA.
+ *
+ * The table is used for bit reordering and inversion
+ * of the ecc byte which we get from the FPGA
+ */
+static uint8_t revbits[256] = {
+ 0x00, 0x80, 0x40, 0xc0, 0x20, 0xa0, 0x60, 0xe0,
+ 0x10, 0x90, 0x50, 0xd0, 0x30, 0xb0, 0x70, 0xf0,
+ 0x08, 0x88, 0x48, 0xc8, 0x28, 0xa8, 0x68, 0xe8,
+ 0x18, 0x98, 0x58, 0xd8, 0x38, 0xb8, 0x78, 0xf8,
+ 0x04, 0x84, 0x44, 0xc4, 0x24, 0xa4, 0x64, 0xe4,
+ 0x14, 0x94, 0x54, 0xd4, 0x34, 0xb4, 0x74, 0xf4,
+ 0x0c, 0x8c, 0x4c, 0xcc, 0x2c, 0xac, 0x6c, 0xec,
+ 0x1c, 0x9c, 0x5c, 0xdc, 0x3c, 0xbc, 0x7c, 0xfc,
+ 0x02, 0x82, 0x42, 0xc2, 0x22, 0xa2, 0x62, 0xe2,
+ 0x12, 0x92, 0x52, 0xd2, 0x32, 0xb2, 0x72, 0xf2,
+ 0x0a, 0x8a, 0x4a, 0xca, 0x2a, 0xaa, 0x6a, 0xea,
+ 0x1a, 0x9a, 0x5a, 0xda, 0x3a, 0xba, 0x7a, 0xfa,
+ 0x06, 0x86, 0x46, 0xc6, 0x26, 0xa6, 0x66, 0xe6,
+ 0x16, 0x96, 0x56, 0xd6, 0x36, 0xb6, 0x76, 0xf6,
+ 0x0e, 0x8e, 0x4e, 0xce, 0x2e, 0xae, 0x6e, 0xee,
+ 0x1e, 0x9e, 0x5e, 0xde, 0x3e, 0xbe, 0x7e, 0xfe,
+ 0x01, 0x81, 0x41, 0xc1, 0x21, 0xa1, 0x61, 0xe1,
+ 0x11, 0x91, 0x51, 0xd1, 0x31, 0xb1, 0x71, 0xf1,
+ 0x09, 0x89, 0x49, 0xc9, 0x29, 0xa9, 0x69, 0xe9,
+ 0x19, 0x99, 0x59, 0xd9, 0x39, 0xb9, 0x79, 0xf9,
+ 0x05, 0x85, 0x45, 0xc5, 0x25, 0xa5, 0x65, 0xe5,
+ 0x15, 0x95, 0x55, 0xd5, 0x35, 0xb5, 0x75, 0xf5,
+ 0x0d, 0x8d, 0x4d, 0xcd, 0x2d, 0xad, 0x6d, 0xed,
+ 0x1d, 0x9d, 0x5d, 0xdd, 0x3d, 0xbd, 0x7d, 0xfd,
+ 0x03, 0x83, 0x43, 0xc3, 0x23, 0xa3, 0x63, 0xe3,
+ 0x13, 0x93, 0x53, 0xd3, 0x33, 0xb3, 0x73, 0xf3,
+ 0x0b, 0x8b, 0x4b, 0xcb, 0x2b, 0xab, 0x6b, 0xeb,
+ 0x1b, 0x9b, 0x5b, 0xdb, 0x3b, 0xbb, 0x7b, 0xfb,
+ 0x07, 0x87, 0x47, 0xc7, 0x27, 0xa7, 0x67, 0xe7,
+ 0x17, 0x97, 0x57, 0xd7, 0x37, 0xb7, 0x77, 0xf7,
+ 0x0f, 0x8f, 0x4f, 0xcf, 0x2f, 0xaf, 0x6f, 0xef,
+ 0x1f, 0x9f, 0x5f, 0xdf, 0x3f, 0xbf, 0x7f, 0xff,
+};
+
+#endif
+
+
+
+/*
+ * rtc_from4_hwcontrol - hardware specific access to control-lines
+ * @mtd: MTD device structure
+ * @cmd: hardware control command
+ *
+ * Address lines (A5 and A4) are used to control Command and Address Latch
+ * Enable on this board, so set the read/write address appropriately.
+ *
+ * Chip Enable is also controlled by the Chip Select (CS5) and
+ * Address lines (A24-A22), so no action is required here.
+ *
+ */
+static void rtc_from4_hwcontrol(struct mtd_info *mtd, int cmd)
+{
+ struct nand_chip* this = (struct nand_chip *) (mtd->priv);
+
+ switch(cmd) {
+
+ case NAND_CTL_SETCLE:
+ this->IO_ADDR_W = (void __iomem *)((unsigned long)this->IO_ADDR_W | RTC_FROM4_CLE);
+ break;
+ case NAND_CTL_CLRCLE:
+ this->IO_ADDR_W = (void __iomem *)((unsigned long)this->IO_ADDR_W & ~RTC_FROM4_CLE);
+ break;
+
+ case NAND_CTL_SETALE:
+ this->IO_ADDR_W = (void __iomem *)((unsigned long)this->IO_ADDR_W | RTC_FROM4_ALE);
+ break;
+ case NAND_CTL_CLRALE:
+ this->IO_ADDR_W = (void __iomem *)((unsigned long)this->IO_ADDR_W & ~RTC_FROM4_ALE);
+ break;
+
+ case NAND_CTL_SETNCE:
+ break;
+ case NAND_CTL_CLRNCE:
+ break;
+
+ }
+}
+
+
+/*
+ * rtc_from4_nand_select_chip - hardware specific chip select
+ * @mtd: MTD device structure
+ * @chip: Chip to select (0 == slot 3, 1 == slot 4)
+ *
+ * The chip select is based on address lines A24-A22.
+ * This driver uses flash slots 3 and 4 (A23-A22).
+ *
+ */
+static void rtc_from4_nand_select_chip(struct mtd_info *mtd, int chip)
+{
+ struct nand_chip *this = mtd->priv;
+
+ this->IO_ADDR_R = (void __iomem *)((unsigned long)this->IO_ADDR_R & ~RTC_FROM4_NAND_ADDR_MASK);
+ this->IO_ADDR_W = (void __iomem *)((unsigned long)this->IO_ADDR_W & ~RTC_FROM4_NAND_ADDR_MASK);
+
+ switch(chip) {
+
+ case 0: /* select slot 3 chip */
+ this->IO_ADDR_R = (void __iomem *)((unsigned long)this->IO_ADDR_R | RTC_FROM4_NAND_ADDR_SLOT3);
+ this->IO_ADDR_W = (void __iomem *)((unsigned long)this->IO_ADDR_W | RTC_FROM4_NAND_ADDR_SLOT3);
+ break;
+ case 1: /* select slot 4 chip */
+ this->IO_ADDR_R = (void __iomem *)((unsigned long)this->IO_ADDR_R | RTC_FROM4_NAND_ADDR_SLOT4);
+ this->IO_ADDR_W = (void __iomem *)((unsigned long)this->IO_ADDR_W | RTC_FROM4_NAND_ADDR_SLOT4);
+ break;
+
+ }
+}
+
+
+/*
+ * rtc_from4_nand_device_ready - hardware specific ready/busy check
+ * @mtd: MTD device structure
+ *
+ * This board provides the Ready/Busy state in the status register
+ * of the FPGA. Bit zero indicates the RDY(1)/BSY(0) signal.
+ *
+ */
+static int rtc_from4_nand_device_ready(struct mtd_info *mtd)
+{
+ unsigned short status;
+
+ status = *((volatile unsigned short *)(rtc_from4_fio_base + RTC_FROM4_FPGA_SR));
+
+ return (status & RTC_FROM4_DEVICE_READY);
+
+}
+
+
+/*
+ * deplete - code to perform device recovery in case there was a power loss
+ * @mtd: MTD device structure
+ * @chip: Chip to select (0 == slot 3, 1 == slot 4)
+ *
+ * If there was a sudden loss of power during an erase operation, a
+ * "device recovery" operation must be performed when power is restored
+ * to ensure correct operation. This routine performs the required steps
+ * for the requested chip.
+ *
+ * See page 86 of the data sheet for details.
+ *
+ */
+static void deplete(struct mtd_info *mtd, int chip)
+{
+ struct nand_chip *this = mtd->priv;
+
+ /* wait until device is ready */
+ while (!this->dev_ready(mtd));
+
+ this->select_chip(mtd, chip);
+
+ /* Send the commands for device recovery, phase 1 */
+ this->cmdfunc (mtd, NAND_CMD_DEPLETE1, 0x0000, 0x0000);
+ this->cmdfunc (mtd, NAND_CMD_DEPLETE2, -1, -1);
+
+ /* Send the commands for device recovery, phase 2 */
+ this->cmdfunc (mtd, NAND_CMD_DEPLETE1, 0x0000, 0x0004);
+ this->cmdfunc (mtd, NAND_CMD_DEPLETE2, -1, -1);
+
+}
+
+
+#ifdef RTC_FROM4_HWECC
+/*
+ * rtc_from4_enable_hwecc - hardware specific hardware ECC enable function
+ * @mtd: MTD device structure
+ * @mode: I/O mode; read or write
+ *
+ * enable hardware ECC for data read or write
+ *
+ */
+static void rtc_from4_enable_hwecc(struct mtd_info *mtd, int mode)
+{
+ volatile unsigned short * rs_ecc_ctl = (volatile unsigned short *)(rtc_from4_fio_base + RTC_FROM4_RS_ECC_CTL);
+ unsigned short status;
+
+ switch (mode) {
+ case NAND_ECC_READ :
+ status = RTC_FROM4_RS_ECC_CTL_CLR
+ | RTC_FROM4_RS_ECC_CTL_FD_E;
+
+ *rs_ecc_ctl = status;
+ break;
+
+ case NAND_ECC_READSYN :
+ status = 0x00;
+
+ *rs_ecc_ctl = status;
+ break;
+
+ case NAND_ECC_WRITE :
+ status = RTC_FROM4_RS_ECC_CTL_CLR
+ | RTC_FROM4_RS_ECC_CTL_GEN
+ | RTC_FROM4_RS_ECC_CTL_FD_E;
+
+ *rs_ecc_ctl = status;
+ break;
+
+ default:
+ BUG();
+ break;
+ }
+
+}
+
+
+/*
+ * rtc_from4_calculate_ecc - hardware specific code to read ECC code
+ * @mtd: MTD device structure
+ * @dat: buffer containing the data to generate ECC codes
+ * @ecc_code ECC codes calculated
+ *
+ * The ECC code is calculated by the FPGA. All we have to do is read the values
+ * from the FPGA registers.
+ *
+ * Note: We read from the inverted registers, since data is inverted before
+ * the code is calculated. So all 0xff data (blank page) results in all 0xff rs code
+ *
+ */
+static void rtc_from4_calculate_ecc(struct mtd_info *mtd, const u_char *dat, u_char *ecc_code)
+{
+ volatile unsigned short * rs_eccn = (volatile unsigned short *)(rtc_from4_fio_base + RTC_FROM4_RS_ECCN);
+ unsigned short value;
+ int i;
+
+ for (i = 0; i < 8; i++) {
+ value = *rs_eccn;
+ ecc_code[i] = (unsigned char)value;
+ rs_eccn++;
+ }
+ ecc_code[7] |= 0x0f; /* set the last four bits (not used) */
+}
+
+
+/*
+ * rtc_from4_correct_data - hardware specific code to correct data using ECC code
+ * @mtd: MTD device structure
+ * @buf: buffer containing the data to generate ECC codes
+ * @ecc1 ECC codes read
+ * @ecc2 ECC codes calculated
+ *
+ * The FPGA tells us fast, if there's an error or not. If no, we go back happy
+ * else we read the ecc results from the fpga and call the rs library to decode
+ * and hopefully correct the error.
+ *
+ */
+static int rtc_from4_correct_data(struct mtd_info *mtd, const u_char *buf, u_char *ecc1, u_char *ecc2)
+{
+ int i, j, res;
+ unsigned short status;
+ uint16_t par[6], syn[6];
+ uint8_t ecc[8];
+ volatile unsigned short *rs_ecc;
+
+ status = *((volatile unsigned short *)(rtc_from4_fio_base + RTC_FROM4_RS_ECC_CHK));
+
+ if (!(status & RTC_FROM4_RS_ECC_CHK_ERROR)) {
+ return 0;
+ }
+
+ /* Read the syndrom pattern from the FPGA and correct the bitorder */
+ rs_ecc = (volatile unsigned short *)(rtc_from4_fio_base + RTC_FROM4_RS_ECC);
+ for (i = 0; i < 8; i++) {
+ ecc[i] = revbits[(*rs_ecc) & 0xFF];
+ rs_ecc++;
+ }
+
+ /* convert into 6 10bit syndrome fields */
+ par[5] = rs_decoder->index_of[(((uint16_t)ecc[0] >> 0) & 0x0ff) |
+ (((uint16_t)ecc[1] << 8) & 0x300)];
+ par[4] = rs_decoder->index_of[(((uint16_t)ecc[1] >> 2) & 0x03f) |
+ (((uint16_t)ecc[2] << 6) & 0x3c0)];
+ par[3] = rs_decoder->index_of[(((uint16_t)ecc[2] >> 4) & 0x00f) |
+ (((uint16_t)ecc[3] << 4) & 0x3f0)];
+ par[2] = rs_decoder->index_of[(((uint16_t)ecc[3] >> 6) & 0x003) |
+ (((uint16_t)ecc[4] << 2) & 0x3fc)];
+ par[1] = rs_decoder->index_of[(((uint16_t)ecc[5] >> 0) & 0x0ff) |
+ (((uint16_t)ecc[6] << 8) & 0x300)];
+ par[0] = (((uint16_t)ecc[6] >> 2) & 0x03f) | (((uint16_t)ecc[7] << 6) & 0x3c0);
+
+ /* Convert to computable syndrome */
+ for (i = 0; i < 6; i++) {
+ syn[i] = par[0];
+ for (j = 1; j < 6; j++)
+ if (par[j] != rs_decoder->nn)
+ syn[i] ^= rs_decoder->alpha_to[rs_modnn(rs_decoder, par[j] + i * j)];
+
+ /* Convert to index form */
+ syn[i] = rs_decoder->index_of[syn[i]];
+ }
+
+ /* Let the library code do its magic.*/
+ res = decode_rs8(rs_decoder, (uint8_t *)buf, par, 512, syn, 0, NULL, 0xff, NULL);
+ if (res > 0) {
+ DEBUG (MTD_DEBUG_LEVEL0, "rtc_from4_correct_data: "
+ "ECC corrected %d errors on read\n", res);
+ }
+ return res;
+}
+
+
+/**
+ * rtc_from4_errstat - perform additional error status checks
+ * @mtd: MTD device structure
+ * @this: NAND chip structure
+ * @state: state or the operation
+ * @status: status code returned from read status
+ * @page: startpage inside the chip, must be called with (page & this->pagemask)
+ *
+ * Perform additional error status checks on erase and write failures
+ * to determine if errors are correctable. For this device, correctable
+ * 1-bit errors on erase and write are considered acceptable.
+ *
+ * note: see pages 34..37 of data sheet for details.
+ *
+ */
+static int rtc_from4_errstat(struct mtd_info *mtd, struct nand_chip *this, int state, int status, int page)
+{
+ int er_stat=0;
+ int rtn, retlen;
+ size_t len;
+ uint8_t *buf;
+ int i;
+
+ this->cmdfunc (mtd, NAND_CMD_STATUS_CLEAR, -1, -1);
+
+ if (state == FL_ERASING) {
+ for (i=0; i<4; i++) {
+ if (status & 1<<(i+1)) {
+ this->cmdfunc (mtd, (NAND_CMD_STATUS_ERROR + i + 1), -1, -1);
+ rtn = this->read_byte(mtd);
+ this->cmdfunc (mtd, NAND_CMD_STATUS_RESET, -1, -1);
+ if (!(rtn & ERR_STAT_ECC_AVAILABLE)) {
+ er_stat |= 1<<(i+1); /* err_ecc_not_avail */
+ }
+ }
+ }
+ } else if (state == FL_WRITING) {
+ /* single bank write logic */
+ this->cmdfunc (mtd, NAND_CMD_STATUS_ERROR, -1, -1);
+ rtn = this->read_byte(mtd);
+ this->cmdfunc (mtd, NAND_CMD_STATUS_RESET, -1, -1);
+ if (!(rtn & ERR_STAT_ECC_AVAILABLE)) {
+ er_stat |= 1<<1; /* err_ecc_not_avail */
+ } else {
+ len = mtd->oobblock;
+ buf = kmalloc (len, GFP_KERNEL);
+ if (!buf) {
+ printk (KERN_ERR "rtc_from4_errstat: Out of memory!\n");
+ er_stat = 1; /* if we can't check, assume failed */
+ } else {
+ /* recovery read */
+ /* page read */
+ rtn = nand_do_read_ecc (mtd, page, len, &retlen, buf, NULL, this->autooob, 1);
+ if (rtn) { /* if read failed or > 1-bit error corrected */
+ er_stat |= 1<<1; /* ECC read failed */
+ }
+ kfree(buf);
+ }
+ }
+ }
+
+ rtn = status;
+ if (er_stat == 0) { /* if ECC is available */
+ rtn = (status & ~NAND_STATUS_FAIL); /* clear the error bit */
+ }
+
+ return rtn;
+}
+#endif
+
+
+/*
+ * Main initialization routine
+ */
+int __init rtc_from4_init (void)
+{
+ struct nand_chip *this;
+ unsigned short bcr1, bcr2, wcr2;
+ int i;
+
+ /* Allocate memory for MTD device structure and private data */
+ rtc_from4_mtd = kmalloc(sizeof(struct mtd_info) + sizeof (struct nand_chip),
+ GFP_KERNEL);
+ if (!rtc_from4_mtd) {
+ printk ("Unable to allocate Renesas NAND MTD device structure.\n");
+ return -ENOMEM;
+ }
+
+ /* Get pointer to private data */
+ this = (struct nand_chip *) (&rtc_from4_mtd[1]);
+
+ /* Initialize structures */
+ memset((char *) rtc_from4_mtd, 0, sizeof(struct mtd_info));
+ memset((char *) this, 0, sizeof(struct nand_chip));
+
+ /* Link the private data with the MTD structure */
+ rtc_from4_mtd->priv = this;
+
+ /* set area 5 as PCMCIA mode to clear the spec of tDH(Data hold time;9ns min) */
+ bcr1 = *SH77X9_BCR1 & ~0x0002;
+ bcr1 |= 0x0002;
+ *SH77X9_BCR1 = bcr1;
+
+ /* set */
+ bcr2 = *SH77X9_BCR2 & ~0x0c00;
+ bcr2 |= 0x0800;
+ *SH77X9_BCR2 = bcr2;
+
+ /* set area 5 wait states */
+ wcr2 = *SH77X9_WCR2 & ~0x1c00;
+ wcr2 |= 0x1c00;
+ *SH77X9_WCR2 = wcr2;
+
+ /* Set address of NAND IO lines */
+ this->IO_ADDR_R = rtc_from4_fio_base;
+ this->IO_ADDR_W = rtc_from4_fio_base;
+ /* Set address of hardware control function */
+ this->hwcontrol = rtc_from4_hwcontrol;
+ /* Set address of chip select function */
+ this->select_chip = rtc_from4_nand_select_chip;
+ /* command delay time (in us) */
+ this->chip_delay = 100;
+ /* return the status of the Ready/Busy line */
+ this->dev_ready = rtc_from4_nand_device_ready;
+
+#ifdef RTC_FROM4_HWECC
+ printk(KERN_INFO "rtc_from4_init: using hardware ECC detection.\n");
+
+ this->eccmode = NAND_ECC_HW8_512;
+ this->options |= NAND_HWECC_SYNDROME;
+ /* return the status of extra status and ECC checks */
+ this->errstat = rtc_from4_errstat;
+ /* set the nand_oobinfo to support FPGA H/W error detection */
+ this->autooob = &rtc_from4_nand_oobinfo;
+ this->enable_hwecc = rtc_from4_enable_hwecc;
+ this->calculate_ecc = rtc_from4_calculate_ecc;
+ this->correct_data = rtc_from4_correct_data;
+#else
+ printk(KERN_INFO "rtc_from4_init: using software ECC detection.\n");
+
+ this->eccmode = NAND_ECC_SOFT;
+#endif
+
+ /* set the bad block tables to support debugging */
+ this->bbt_td = &rtc_from4_bbt_main_descr;
+ this->bbt_md = &rtc_from4_bbt_mirror_descr;
+
+ /* Scan to find existence of the device */
+ if (nand_scan(rtc_from4_mtd, RTC_FROM4_MAX_CHIPS)) {
+ kfree(rtc_from4_mtd);
+ return -ENXIO;
+ }
+
+ /* Perform 'device recovery' for each chip in case there was a power loss. */
+ for (i=0; i < this->numchips; i++) {
+ deplete(rtc_from4_mtd, i);
+ }
+
+#if RTC_FROM4_NO_VIRTBLOCKS
+ /* use a smaller erase block to minimize wasted space when a block is bad */
+ /* note: this uses eight times as much RAM as using the default and makes */
+ /* mounts take four times as long. */
+ rtc_from4_mtd->flags |= MTD_NO_VIRTBLOCKS;
+#endif
+
+ /* Register the partitions */
+ add_mtd_partitions(rtc_from4_mtd, partition_info, NUM_PARTITIONS);
+
+#ifdef RTC_FROM4_HWECC
+ /* We could create the decoder on demand, if memory is a concern.
+ * This way we have it handy, if an error happens
+ *
+ * Symbolsize is 10 (bits)
+ * Primitve polynomial is x^10+x^3+1
+ * first consecutive root is 0
+ * primitve element to generate roots = 1
+ * generator polinomial degree = 6
+ */
+ rs_decoder = init_rs(10, 0x409, 0, 1, 6);
+ if (!rs_decoder) {
+ printk (KERN_ERR "Could not create a RS decoder\n");
+ nand_release(rtc_from4_mtd);
+ kfree(rtc_from4_mtd);
+ return -ENOMEM;
+ }
+#endif
+ /* Return happy */
+ return 0;
+}
+module_init(rtc_from4_init);
+
+
+/*
+ * Clean up routine
+ */
+#ifdef MODULE
+static void __exit rtc_from4_cleanup (void)
+{
+ /* Release resource, unregister partitions */
+ nand_release(rtc_from4_mtd);
+
+ /* Free the MTD device structure */
+ kfree (rtc_from4_mtd);
+
+#ifdef RTC_FROM4_HWECC
+ /* Free the reed solomon resources */
+ if (rs_decoder) {
+ free_rs(rs_decoder);
+ }
+#endif
+}
+module_exit(rtc_from4_cleanup);
+#endif
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("d.marlin <dmarlin@redhat.com");
+MODULE_DESCRIPTION("Board-specific glue layer for AG-AND flash on Renesas FROM_BOARD4");
+
diff --git a/linux-2.4.x/drivers/mtd/nand/s3c2410.c b/linux-2.4.x/drivers/mtd/nand/s3c2410.c
new file mode 100644
index 0000000..5b5a0b0
--- /dev/null
+++ b/linux-2.4.x/drivers/mtd/nand/s3c2410.c
@@ -0,0 +1,801 @@
+/* linux/drivers/mtd/nand/s3c2410.c
+ *
+ * Copyright (c) 2004,2005 Simtec Electronics
+ * http://www.simtec.co.uk/products/SWLINUX/
+ * Ben Dooks <ben@simtec.co.uk>
+ *
+ * Samsung S3C2410/S3C240 NAND driver
+ *
+ * Changelog:
+ * 21-Sep-2004 BJD Initial version
+ * 23-Sep-2004 BJD Mulitple device support
+ * 28-Sep-2004 BJD Fixed ECC placement for Hardware mode
+ * 12-Oct-2004 BJD Fixed errors in use of platform data
+ * 18-Feb-2005 BJD Fix sparse errors
+ * 14-Mar-2005 BJD Applied tglx's code reduction patch
+ * 02-May-2005 BJD Fixed s3c2440 support
+ * 02-May-2005 BJD Reduced hwcontrol decode
+ * 20-Jun-2005 BJD Updated s3c2440 support, fixed timing bug
+ * 08-Jul-2005 BJD Fix OOPS when no platform data supplied
+ * 20-Oct-2005 BJD Fix timing calculation bug
+ * 14-Jan-2006 BJD Allow clock to be stopped when idle
+ *
+ * $Id: s3c2410.c,v 1.23 2006/04/01 18:06:29 bjd Exp $
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+*/
+
+#include <linux/config.h>
+
+#ifdef CONFIG_MTD_NAND_S3C2410_DEBUG
+#define DEBUG
+#endif
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/ioport.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/nand_ecc.h>
+#include <linux/mtd/partitions.h>
+
+#include <asm/io.h>
+#include <asm/hardware/clock.h>
+
+#include <asm/arch/regs-nand.h>
+#include <asm/arch/nand.h>
+
+#define PFX "s3c2410-nand: "
+
+#ifdef CONFIG_MTD_NAND_S3C2410_HWECC
+static int hardware_ecc = 1;
+#else
+static int hardware_ecc = 0;
+#endif
+
+#ifdef CONFIG_MTD_NAND_S3C2410_CLKSTOP
+static int clock_stop = 1;
+#else
+static const int clock_stop = 0;
+#endif
+
+
+/* new oob placement block for use with hardware ecc generation
+ */
+
+static struct nand_oobinfo nand_hw_eccoob = {
+ .useecc = MTD_NANDECC_AUTOPLACE,
+ .eccbytes = 3,
+ .eccpos = {0, 1, 2 },
+ .oobfree = { {8, 8} }
+};
+
+/* controller and mtd information */
+
+struct s3c2410_nand_info;
+
+struct s3c2410_nand_mtd {
+ struct mtd_info mtd;
+ struct nand_chip chip;
+ struct s3c2410_nand_set *set;
+ struct s3c2410_nand_info *info;
+ int scan_res;
+};
+
+/* overview of the s3c2410 nand state */
+
+struct s3c2410_nand_info {
+ /* mtd info */
+ struct nand_hw_control controller;
+ struct s3c2410_nand_mtd *mtds;
+ struct s3c2410_platform_nand *platform;
+
+ /* device info */
+ struct device *device;
+ struct resource *area;
+ struct clk *clk;
+ void __iomem *regs;
+ int mtd_count;
+
+ unsigned char is_s3c2440;
+};
+
+/* conversion functions */
+
+static struct s3c2410_nand_mtd *s3c2410_nand_mtd_toours(struct mtd_info *mtd)
+{
+ return container_of(mtd, struct s3c2410_nand_mtd, mtd);
+}
+
+static struct s3c2410_nand_info *s3c2410_nand_mtd_toinfo(struct mtd_info *mtd)
+{
+ return s3c2410_nand_mtd_toours(mtd)->info;
+}
+
+static struct s3c2410_nand_info *to_nand_info(struct platform_device *dev)
+{
+ return platform_get_drvdata(dev);
+}
+
+static struct s3c2410_platform_nand *to_nand_plat(struct platform_device *dev)
+{
+ return dev->dev.platform_data;
+}
+
+static inline int allow_clk_stop(struct s3c2410_nand_info *info)
+{
+ return clock_stop;
+}
+
+/* timing calculations */
+
+#define NS_IN_KHZ 1000000
+
+static int s3c2410_nand_calc_rate(int wanted, unsigned long clk, int max)
+{
+ int result;
+
+ result = (wanted * clk) / NS_IN_KHZ;
+ result++;
+
+ pr_debug("result %d from %ld, %d\n", result, clk, wanted);
+
+ if (result > max) {
+ printk("%d ns is too big for current clock rate %ld\n",
+ wanted, clk);
+ return -1;
+ }
+
+ if (result < 1)
+ result = 1;
+
+ return result;
+}
+
+#define to_ns(ticks,clk) (((ticks) * NS_IN_KHZ) / (unsigned int)(clk))
+
+/* controller setup */
+
+static int s3c2410_nand_inithw(struct s3c2410_nand_info *info,
+ struct platform_device *pdev)
+{
+ struct s3c2410_platform_nand *plat = to_nand_plat(pdev);
+ unsigned long clkrate = clk_get_rate(info->clk);
+ int tacls, twrph0, twrph1;
+ unsigned long cfg;
+
+ /* calculate the timing information for the controller */
+
+ clkrate /= 1000; /* turn clock into kHz for ease of use */
+
+ if (plat != NULL) {
+ tacls = s3c2410_nand_calc_rate(plat->tacls, clkrate, 4);
+ twrph0 = s3c2410_nand_calc_rate(plat->twrph0, clkrate, 8);
+ twrph1 = s3c2410_nand_calc_rate(plat->twrph1, clkrate, 8);
+ } else {
+ /* default timings */
+ tacls = 4;
+ twrph0 = 8;
+ twrph1 = 8;
+ }
+
+ if (tacls < 0 || twrph0 < 0 || twrph1 < 0) {
+ printk(KERN_ERR PFX "cannot get timings suitable for board\n");
+ return -EINVAL;
+ }
+
+ printk(KERN_INFO PFX "Tacls=%d, %dns Twrph0=%d %dns, Twrph1=%d %dns\n",
+ tacls, to_ns(tacls, clkrate),
+ twrph0, to_ns(twrph0, clkrate),
+ twrph1, to_ns(twrph1, clkrate));
+
+ if (!info->is_s3c2440) {
+ cfg = S3C2410_NFCONF_EN;
+ cfg |= S3C2410_NFCONF_TACLS(tacls-1);
+ cfg |= S3C2410_NFCONF_TWRPH0(twrph0-1);
+ cfg |= S3C2410_NFCONF_TWRPH1(twrph1-1);
+ } else {
+ cfg = S3C2440_NFCONF_TACLS(tacls-1);
+ cfg |= S3C2440_NFCONF_TWRPH0(twrph0-1);
+ cfg |= S3C2440_NFCONF_TWRPH1(twrph1-1);
+
+ /* enable the controller and de-assert nFCE */
+
+ writel(S3C2440_NFCONT_ENABLE | S3C2440_NFCONT_ENABLE,
+ info->regs + S3C2440_NFCONT);
+ }
+
+ pr_debug(PFX "NF_CONF is 0x%lx\n", cfg);
+
+ writel(cfg, info->regs + S3C2410_NFCONF);
+ return 0;
+}
+
+/* select chip */
+
+static void s3c2410_nand_select_chip(struct mtd_info *mtd, int chip)
+{
+ struct s3c2410_nand_info *info;
+ struct s3c2410_nand_mtd *nmtd;
+ struct nand_chip *this = mtd->priv;
+ void __iomem *reg;
+ unsigned long cur;
+ unsigned long bit;
+
+ nmtd = this->priv;
+ info = nmtd->info;
+
+ bit = (info->is_s3c2440) ? S3C2440_NFCONT_nFCE : S3C2410_NFCONF_nFCE;
+ reg = info->regs+((info->is_s3c2440) ? S3C2440_NFCONT:S3C2410_NFCONF);
+
+ if (chip != -1 && allow_clk_stop(info))
+ clk_enable(info->clk);
+
+ cur = readl(reg);
+
+ if (chip == -1) {
+ cur |= bit;
+ } else {
+ if (nmtd->set != NULL && chip > nmtd->set->nr_chips) {
+ printk(KERN_ERR PFX "chip %d out of range\n", chip);
+ return;
+ }
+
+ if (info->platform != NULL) {
+ if (info->platform->select_chip != NULL)
+ (info->platform->select_chip)(nmtd->set, chip);
+ }
+
+ cur &= ~bit;
+ }
+
+ writel(cur, reg);
+
+ if (chip == -1 && allow_clk_stop(info))
+ clk_disable(info->clk);
+}
+
+/* command and control functions
+ *
+ * Note, these all use tglx's method of changing the IO_ADDR_W field
+ * to make the code simpler, and use the nand layer's code to issue the
+ * command and address sequences via the proper IO ports.
+ *
+*/
+
+static void s3c2410_nand_hwcontrol(struct mtd_info *mtd, int cmd)
+{
+ struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
+ struct nand_chip *chip = mtd->priv;
+
+ switch (cmd) {
+ case NAND_CTL_SETNCE:
+ case NAND_CTL_CLRNCE:
+ printk(KERN_ERR "%s: called for NCE\n", __FUNCTION__);
+ break;
+
+ case NAND_CTL_SETCLE:
+ chip->IO_ADDR_W = info->regs + S3C2410_NFCMD;
+ break;
+
+ case NAND_CTL_SETALE:
+ chip->IO_ADDR_W = info->regs + S3C2410_NFADDR;
+ break;
+
+ /* NAND_CTL_CLRCLE: */
+ /* NAND_CTL_CLRALE: */
+ default:
+ chip->IO_ADDR_W = info->regs + S3C2410_NFDATA;
+ break;
+ }
+}
+
+/* command and control functions */
+
+static void s3c2440_nand_hwcontrol(struct mtd_info *mtd, int cmd)
+{
+ struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
+ struct nand_chip *chip = mtd->priv;
+
+ switch (cmd) {
+ case NAND_CTL_SETNCE:
+ case NAND_CTL_CLRNCE:
+ printk(KERN_ERR "%s: called for NCE\n", __FUNCTION__);
+ break;
+
+ case NAND_CTL_SETCLE:
+ chip->IO_ADDR_W = info->regs + S3C2440_NFCMD;
+ break;
+
+ case NAND_CTL_SETALE:
+ chip->IO_ADDR_W = info->regs + S3C2440_NFADDR;
+ break;
+
+ /* NAND_CTL_CLRCLE: */
+ /* NAND_CTL_CLRALE: */
+ default:
+ chip->IO_ADDR_W = info->regs + S3C2440_NFDATA;
+ break;
+ }
+}
+
+/* s3c2410_nand_devready()
+ *
+ * returns 0 if the nand is busy, 1 if it is ready
+*/
+
+static int s3c2410_nand_devready(struct mtd_info *mtd)
+{
+ struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
+
+ if (info->is_s3c2440)
+ return readb(info->regs + S3C2440_NFSTAT) & S3C2440_NFSTAT_READY;
+ return readb(info->regs + S3C2410_NFSTAT) & S3C2410_NFSTAT_BUSY;
+}
+
+
+/* ECC handling functions */
+
+static int s3c2410_nand_correct_data(struct mtd_info *mtd, u_char *dat,
+ u_char *read_ecc, u_char *calc_ecc)
+{
+ pr_debug("s3c2410_nand_correct_data(%p,%p,%p,%p)\n",
+ mtd, dat, read_ecc, calc_ecc);
+
+ pr_debug("eccs: read %02x,%02x,%02x vs calc %02x,%02x,%02x\n",
+ read_ecc[0], read_ecc[1], read_ecc[2],
+ calc_ecc[0], calc_ecc[1], calc_ecc[2]);
+
+ if (read_ecc[0] == calc_ecc[0] &&
+ read_ecc[1] == calc_ecc[1] &&
+ read_ecc[2] == calc_ecc[2])
+ return 0;
+
+ /* we curently have no method for correcting the error */
+
+ return -1;
+}
+
+/* ECC functions
+ *
+ * These allow the s3c2410 and s3c2440 to use the controller's ECC
+ * generator block to ECC the data as it passes through]
+*/
+
+static void s3c2410_nand_enable_hwecc(struct mtd_info *mtd, int mode)
+{
+ struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
+ unsigned long ctrl;
+
+ ctrl = readl(info->regs + S3C2410_NFCONF);
+ ctrl |= S3C2410_NFCONF_INITECC;
+ writel(ctrl, info->regs + S3C2410_NFCONF);
+}
+
+static void s3c2440_nand_enable_hwecc(struct mtd_info *mtd, int mode)
+{
+ struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
+ unsigned long ctrl;
+
+ ctrl = readl(info->regs + S3C2440_NFCONT);
+ writel(ctrl | S3C2440_NFCONT_INITECC, info->regs + S3C2440_NFCONT);
+}
+
+static int s3c2410_nand_calculate_ecc(struct mtd_info *mtd,
+ const u_char *dat, u_char *ecc_code)
+{
+ struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
+
+ ecc_code[0] = readb(info->regs + S3C2410_NFECC + 0);
+ ecc_code[1] = readb(info->regs + S3C2410_NFECC + 1);
+ ecc_code[2] = readb(info->regs + S3C2410_NFECC + 2);
+
+ pr_debug("calculate_ecc: returning ecc %02x,%02x,%02x\n",
+ ecc_code[0], ecc_code[1], ecc_code[2]);
+
+ return 0;
+}
+
+
+static int s3c2440_nand_calculate_ecc(struct mtd_info *mtd,
+ const u_char *dat, u_char *ecc_code)
+{
+ struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
+ unsigned long ecc = readl(info->regs + S3C2440_NFMECC0);
+
+ ecc_code[0] = ecc;
+ ecc_code[1] = ecc >> 8;
+ ecc_code[2] = ecc >> 16;
+
+ pr_debug("calculate_ecc: returning ecc %02x,%02x,%02x\n",
+ ecc_code[0], ecc_code[1], ecc_code[2]);
+
+ return 0;
+}
+
+
+/* over-ride the standard functions for a little more speed. We can
+ * use read/write block to move the data buffers to/from the controller
+*/
+
+static void s3c2410_nand_read_buf(struct mtd_info *mtd, u_char *buf, int len)
+{
+ struct nand_chip *this = mtd->priv;
+ readsb(this->IO_ADDR_R, buf, len);
+}
+
+static void s3c2410_nand_write_buf(struct mtd_info *mtd,
+ const u_char *buf, int len)
+{
+ struct nand_chip *this = mtd->priv;
+ writesb(this->IO_ADDR_W, buf, len);
+}
+
+/* device management functions */
+
+static int s3c2410_nand_remove(struct platform_device *pdev)
+{
+ struct s3c2410_nand_info *info = to_nand_info(pdev);
+
+ platform_set_drvdata(pdev, NULL);
+
+ if (info == NULL)
+ return 0;
+
+ /* first thing we need to do is release all our mtds
+ * and their partitions, then go through freeing the
+ * resources used
+ */
+
+ if (info->mtds != NULL) {
+ struct s3c2410_nand_mtd *ptr = info->mtds;
+ int mtdno;
+
+ for (mtdno = 0; mtdno < info->mtd_count; mtdno++, ptr++) {
+ pr_debug("releasing mtd %d (%p)\n", mtdno, ptr);
+ nand_release(&ptr->mtd);
+ }
+
+ kfree(info->mtds);
+ }
+
+ /* free the common resources */
+
+ if (info->clk != NULL && !IS_ERR(info->clk)) {
+ if (!allow_clk_stop(info))
+ clk_disable(info->clk);
+ clk_put(info->clk);
+ }
+
+ if (info->regs != NULL) {
+ iounmap(info->regs);
+ info->regs = NULL;
+ }
+
+ if (info->area != NULL) {
+ release_resource(info->area);
+ kfree(info->area);
+ info->area = NULL;
+ }
+
+ kfree(info);
+
+ return 0;
+}
+
+#ifdef CONFIG_MTD_PARTITIONS
+static int s3c2410_nand_add_partition(struct s3c2410_nand_info *info,
+ struct s3c2410_nand_mtd *mtd,
+ struct s3c2410_nand_set *set)
+{
+ if (set == NULL)
+ return add_mtd_device(&mtd->mtd);
+
+ if (set->nr_partitions > 0 && set->partitions != NULL) {
+ return add_mtd_partitions(&mtd->mtd,
+ set->partitions,
+ set->nr_partitions);
+ }
+
+ return add_mtd_device(&mtd->mtd);
+}
+#else
+static int s3c2410_nand_add_partition(struct s3c2410_nand_info *info,
+ struct s3c2410_nand_mtd *mtd,
+ struct s3c2410_nand_set *set)
+{
+ return add_mtd_device(&mtd->mtd);
+}
+#endif
+
+/* s3c2410_nand_init_chip
+ *
+ * init a single instance of an chip
+*/
+
+static void s3c2410_nand_init_chip(struct s3c2410_nand_info *info,
+ struct s3c2410_nand_mtd *nmtd,
+ struct s3c2410_nand_set *set)
+{
+ struct nand_chip *chip = &nmtd->chip;
+
+ chip->IO_ADDR_R = info->regs + S3C2410_NFDATA;
+ chip->IO_ADDR_W = info->regs + S3C2410_NFDATA;
+ chip->hwcontrol = s3c2410_nand_hwcontrol;
+ chip->dev_ready = s3c2410_nand_devready;
+ chip->write_buf = s3c2410_nand_write_buf;
+ chip->read_buf = s3c2410_nand_read_buf;
+ chip->select_chip = s3c2410_nand_select_chip;
+ chip->chip_delay = 50;
+ chip->priv = nmtd;
+ chip->options = 0;
+ chip->controller = &info->controller;
+
+ if (info->is_s3c2440) {
+ chip->IO_ADDR_R = info->regs + S3C2440_NFDATA;
+ chip->IO_ADDR_W = info->regs + S3C2440_NFDATA;
+ chip->hwcontrol = s3c2440_nand_hwcontrol;
+ }
+
+ nmtd->info = info;
+ nmtd->mtd.priv = chip;
+ nmtd->set = set;
+
+ if (hardware_ecc) {
+ chip->correct_data = s3c2410_nand_correct_data;
+ chip->enable_hwecc = s3c2410_nand_enable_hwecc;
+ chip->calculate_ecc = s3c2410_nand_calculate_ecc;
+ chip->eccmode = NAND_ECC_HW3_512;
+ chip->autooob = &nand_hw_eccoob;
+
+ if (info->is_s3c2440) {
+ chip->enable_hwecc = s3c2440_nand_enable_hwecc;
+ chip->calculate_ecc = s3c2440_nand_calculate_ecc;
+ }
+ } else {
+ chip->eccmode = NAND_ECC_SOFT;
+ }
+}
+
+/* s3c2410_nand_probe
+ *
+ * called by device layer when it finds a device matching
+ * one our driver can handled. This code checks to see if
+ * it can allocate all necessary resources then calls the
+ * nand layer to look for devices
+*/
+
+static int s3c24xx_nand_probe(struct platform_device *pdev, int is_s3c2440)
+{
+ struct s3c2410_platform_nand *plat = to_nand_plat(pdev);
+ struct s3c2410_nand_info *info;
+ struct s3c2410_nand_mtd *nmtd;
+ struct s3c2410_nand_set *sets;
+ struct resource *res;
+ int err = 0;
+ int size;
+ int nr_sets;
+ int setno;
+
+ pr_debug("s3c2410_nand_probe(%p)\n", pdev);
+
+ info = kmalloc(sizeof(*info), GFP_KERNEL);
+ if (info == NULL) {
+ dev_err(&pdev->dev, "no memory for flash info\n");
+ err = -ENOMEM;
+ goto exit_error;
+ }
+
+ memzero(info, sizeof(*info));
+ platform_set_drvdata(pdev, info);
+
+ spin_lock_init(&info->controller.lock);
+ init_waitqueue_head(&info->controller.wq);
+
+ /* get the clock source and enable it */
+
+ info->clk = clk_get(&pdev->dev, "nand");
+ if (IS_ERR(info->clk)) {
+ dev_err(&pdev->dev, "failed to get clock");
+ err = -ENOENT;
+ goto exit_error;
+ }
+
+ clk_enable(info->clk);
+
+ /* allocate and map the resource */
+
+ /* currently we assume we have the one resource */
+ res = pdev->resource;
+ size = res->end - res->start + 1;
+
+ info->area = request_mem_region(res->start, size, pdev->name);
+
+ if (info->area == NULL) {
+ dev_err(&pdev->dev, "cannot reserve register region\n");
+ err = -ENOENT;
+ goto exit_error;
+ }
+
+ info->device = &pdev->dev;
+ info->platform = plat;
+ info->regs = ioremap(res->start, size);
+ info->is_s3c2440 = is_s3c2440;
+
+ if (info->regs == NULL) {
+ dev_err(&pdev->dev, "cannot reserve register region\n");
+ err = -EIO;
+ goto exit_error;
+ }
+
+ dev_dbg(&pdev->dev, "mapped registers at %p\n", info->regs);
+
+ /* initialise the hardware */
+
+ err = s3c2410_nand_inithw(info, pdev);
+ if (err != 0)
+ goto exit_error;
+
+ sets = (plat != NULL) ? plat->sets : NULL;
+ nr_sets = (plat != NULL) ? plat->nr_sets : 1;
+
+ info->mtd_count = nr_sets;
+
+ /* allocate our information */
+
+ size = nr_sets * sizeof(*info->mtds);
+ info->mtds = kmalloc(size, GFP_KERNEL);
+ if (info->mtds == NULL) {
+ dev_err(&pdev->dev, "failed to allocate mtd storage\n");
+ err = -ENOMEM;
+ goto exit_error;
+ }
+
+ memzero(info->mtds, size);
+
+ /* initialise all possible chips */
+
+ nmtd = info->mtds;
+
+ for (setno = 0; setno < nr_sets; setno++, nmtd++) {
+ pr_debug("initialising set %d (%p, info %p)\n",
+ setno, nmtd, info);
+
+ s3c2410_nand_init_chip(info, nmtd, sets);
+
+ nmtd->scan_res = nand_scan(&nmtd->mtd,
+ (sets) ? sets->nr_chips : 1);
+
+ if (nmtd->scan_res == 0) {
+ s3c2410_nand_add_partition(info, nmtd, sets);
+ }
+
+ if (sets != NULL)
+ sets++;
+ }
+
+ if (allow_clk_stop(info)) {
+ dev_info(&pdev->dev, "clock idle support enabled\n");
+ clk_disable(info->clk);
+ }
+
+ pr_debug("initialised ok\n");
+ return 0;
+
+ exit_error:
+ s3c2410_nand_remove(pdev);
+
+ if (err == 0)
+ err = -EINVAL;
+ return err;
+}
+
+/* PM Support */
+#ifdef CONFIG_PM
+
+static int s3c24xx_nand_suspend(struct platform_device *dev, pm_message_t pm)
+{
+ struct s3c2410_nand_info *info = platform_get_drvdata(dev);
+
+ if (info) {
+ if (!allow_clk_stop(info))
+ clk_disable(info->clk);
+ }
+
+ return 0;
+}
+
+static int s3c24xx_nand_resume(struct platform_device *dev)
+{
+ struct s3c2410_nand_info *info = platform_get_drvdata(dev);
+
+ if (info) {
+ clk_enable(info->clk);
+ s3c2410_nand_inithw(info, dev);
+
+ if (allow_clk_stop(info))
+ clk_disable(info->clk);
+ }
+
+ return 0;
+}
+
+#else
+#define s3c24xx_nand_suspend NULL
+#define s3c24xx_nand_resume NULL
+#endif
+
+/* driver device registration */
+
+static int s3c2410_nand_probe(struct platform_device *dev)
+{
+ return s3c24xx_nand_probe(dev, 0);
+}
+
+static int s3c2440_nand_probe(struct platform_device *dev)
+{
+ return s3c24xx_nand_probe(dev, 1);
+}
+
+static struct platform_driver s3c2410_nand_driver = {
+ .probe = s3c2410_nand_probe,
+ .remove = s3c2410_nand_remove,
+ .suspend = s3c24xx_nand_suspend,
+ .resume = s3c24xx_nand_resume,
+ .driver = {
+ .name = "s3c2410-nand",
+ .owner = THIS_MODULE,
+ },
+};
+
+static struct platform_driver s3c2440_nand_driver = {
+ .probe = s3c2440_nand_probe,
+ .remove = s3c2410_nand_remove,
+ .suspend = s3c24xx_nand_suspend,
+ .resume = s3c24xx_nand_resume,
+ .driver = {
+ .name = "s3c2440-nand",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init s3c2410_nand_init(void)
+{
+ printk("S3C24XX NAND Driver, (c) 2004 Simtec Electronics\n");
+
+ platform_driver_register(&s3c2440_nand_driver);
+ return platform_driver_register(&s3c2410_nand_driver);
+}
+
+static void __exit s3c2410_nand_exit(void)
+{
+ platform_driver_unregister(&s3c2440_nand_driver);
+ platform_driver_unregister(&s3c2410_nand_driver);
+}
+
+module_init(s3c2410_nand_init);
+module_exit(s3c2410_nand_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
+MODULE_DESCRIPTION("S3C24XX MTD NAND driver");
diff --git a/linux-2.4.x/drivers/mtd/nand/sharpsl.c b/linux-2.4.x/drivers/mtd/nand/sharpsl.c
new file mode 100755
index 0000000..b018c25
--- /dev/null
+++ b/linux-2.4.x/drivers/mtd/nand/sharpsl.c
@@ -0,0 +1,287 @@
+/*
+ * drivers/mtd/nand/sharpsl.c
+ *
+ * Copyright (C) 2004 Richard Purdie
+ *
+ * $Id: sharpsl.c,v 1.8 2006/04/18 00:07:45 rpurdie Exp $
+ *
+ * Based on Sharp's NAND driver sharp_sl.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/genhd.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/nand_ecc.h>
+#include <linux/mtd/partitions.h>
+#include <linux/interrupt.h>
+#include <asm/io.h>
+#include <asm/hardware.h>
+#include <asm/mach-types.h>
+
+static void __iomem *sharpsl_io_base;
+static int sharpsl_phys_base = 0x0C000000;
+
+/* register offset */
+#define ECCLPLB sharpsl_io_base+0x00 /* line parity 7 - 0 bit */
+#define ECCLPUB sharpsl_io_base+0x04 /* line parity 15 - 8 bit */
+#define ECCCP sharpsl_io_base+0x08 /* column parity 5 - 0 bit */
+#define ECCCNTR sharpsl_io_base+0x0C /* ECC byte counter */
+#define ECCCLRR sharpsl_io_base+0x10 /* cleare ECC */
+#define FLASHIO sharpsl_io_base+0x14 /* Flash I/O */
+#define FLASHCTL sharpsl_io_base+0x18 /* Flash Control */
+
+/* Flash control bit */
+#define FLRYBY (1 << 5)
+#define FLCE1 (1 << 4)
+#define FLWP (1 << 3)
+#define FLALE (1 << 2)
+#define FLCLE (1 << 1)
+#define FLCE0 (1 << 0)
+
+
+/*
+ * MTD structure for SharpSL
+ */
+static struct mtd_info *sharpsl_mtd = NULL;
+
+/*
+ * Define partitions for flash device
+ */
+#define DEFAULT_NUM_PARTITIONS 3
+
+static int nr_partitions;
+static struct mtd_partition sharpsl_nand_default_partition_info[] = {
+ {
+ .name = "System Area",
+ .offset = 0,
+ .size = 7 * 1024 * 1024,
+ },
+ {
+ .name = "Root Filesystem",
+ .offset = 7 * 1024 * 1024,
+ .size = 30 * 1024 * 1024,
+ },
+ {
+ .name = "Home Filesystem",
+ .offset = MTDPART_OFS_APPEND ,
+ .size = MTDPART_SIZ_FULL ,
+ },
+};
+
+/*
+ * hardware specific access to control-lines
+ */
+static void
+sharpsl_nand_hwcontrol(struct mtd_info* mtd, int cmd)
+{
+ switch (cmd) {
+ case NAND_CTL_SETCLE:
+ writeb(readb(FLASHCTL) | FLCLE, FLASHCTL);
+ break;
+ case NAND_CTL_CLRCLE:
+ writeb(readb(FLASHCTL) & ~FLCLE, FLASHCTL);
+ break;
+
+ case NAND_CTL_SETALE:
+ writeb(readb(FLASHCTL) | FLALE, FLASHCTL);
+ break;
+ case NAND_CTL_CLRALE:
+ writeb(readb(FLASHCTL) & ~FLALE, FLASHCTL);
+ break;
+
+ case NAND_CTL_SETNCE:
+ writeb(readb(FLASHCTL) & ~(FLCE0|FLCE1), FLASHCTL);
+ break;
+ case NAND_CTL_CLRNCE:
+ writeb(readb(FLASHCTL) | (FLCE0|FLCE1), FLASHCTL);
+ break;
+ }
+}
+
+static uint8_t scan_ff_pattern[] = { 0xff, 0xff };
+
+static struct nand_bbt_descr sharpsl_bbt = {
+ .options = 0,
+ .offs = 4,
+ .len = 2,
+ .pattern = scan_ff_pattern
+};
+
+static struct nand_bbt_descr sharpsl_akita_bbt = {
+ .options = 0,
+ .offs = 4,
+ .len = 1,
+ .pattern = scan_ff_pattern
+};
+
+static struct nand_oobinfo akita_oobinfo = {
+ .useecc = MTD_NANDECC_AUTOPLACE,
+ .eccbytes = 24,
+ .eccpos = {
+ 0x5, 0x1, 0x2, 0x3, 0x6, 0x7, 0x15, 0x11,
+ 0x12, 0x13, 0x16, 0x17, 0x25, 0x21, 0x22, 0x23,
+ 0x26, 0x27, 0x35, 0x31, 0x32, 0x33, 0x36, 0x37},
+ .oobfree = { {0x08, 0x09} }
+};
+
+static int
+sharpsl_nand_dev_ready(struct mtd_info* mtd)
+{
+ return !((readb(FLASHCTL) & FLRYBY) == 0);
+}
+
+static void
+sharpsl_nand_enable_hwecc(struct mtd_info* mtd, int mode)
+{
+ writeb(0 ,ECCCLRR);
+}
+
+static int
+sharpsl_nand_calculate_ecc(struct mtd_info* mtd, const u_char* dat,
+ u_char* ecc_code)
+{
+ ecc_code[0] = ~readb(ECCLPUB);
+ ecc_code[1] = ~readb(ECCLPLB);
+ ecc_code[2] = (~readb(ECCCP) << 2) | 0x03;
+ return readb(ECCCNTR) != 0;
+}
+
+
+#ifdef CONFIG_MTD_PARTITIONS
+const char *part_probes[] = { "cmdlinepart", NULL };
+#endif
+
+
+/*
+ * Main initialization routine
+ */
+int __init
+sharpsl_nand_init(void)
+{
+ struct nand_chip *this;
+ struct mtd_partition* sharpsl_partition_info;
+ int err = 0;
+
+ /* Allocate memory for MTD device structure and private data */
+ sharpsl_mtd = kmalloc(sizeof(struct mtd_info) + sizeof(struct nand_chip),
+ GFP_KERNEL);
+ if (!sharpsl_mtd) {
+ printk ("Unable to allocate SharpSL NAND MTD device structure.\n");
+ return -ENOMEM;
+ }
+
+ /* map physical adress */
+ sharpsl_io_base = ioremap(sharpsl_phys_base, 0x1000);
+ if(!sharpsl_io_base){
+ printk("ioremap to access Sharp SL NAND chip failed\n");
+ kfree(sharpsl_mtd);
+ return -EIO;
+ }
+
+ /* Get pointer to private data */
+ this = (struct nand_chip *) (&sharpsl_mtd[1]);
+
+ /* Initialize structures */
+ memset((char *) sharpsl_mtd, 0, sizeof(struct mtd_info));
+ memset((char *) this, 0, sizeof(struct nand_chip));
+
+ /* Link the private data with the MTD structure */
+ sharpsl_mtd->priv = this;
+
+ /*
+ * PXA initialize
+ */
+ writeb(readb(FLASHCTL) | FLWP, FLASHCTL);
+
+ /* Set address of NAND IO lines */
+ this->IO_ADDR_R = FLASHIO;
+ this->IO_ADDR_W = FLASHIO;
+ /* Set address of hardware control function */
+ this->hwcontrol = sharpsl_nand_hwcontrol;
+ this->dev_ready = sharpsl_nand_dev_ready;
+ /* 15 us command delay time */
+ this->chip_delay = 15;
+ /* set eccmode using hardware ECC */
+ this->eccmode = NAND_ECC_HW3_256;
+ this->badblock_pattern = &sharpsl_bbt;
+ if (machine_is_akita() || machine_is_borzoi()) {
+ this->badblock_pattern = &sharpsl_akita_bbt;
+ this->autooob = &akita_oobinfo;
+ }
+ this->enable_hwecc = sharpsl_nand_enable_hwecc;
+ this->calculate_ecc = sharpsl_nand_calculate_ecc;
+ this->correct_data = nand_correct_data;
+
+ /* Scan to find existence of the device */
+ err=nand_scan(sharpsl_mtd,1);
+ if (err) {
+ iounmap(sharpsl_io_base);
+ kfree(sharpsl_mtd);
+ return err;
+ }
+
+ /* Register the partitions */
+ sharpsl_mtd->name = "sharpsl-nand";
+ nr_partitions = parse_mtd_partitions(sharpsl_mtd, part_probes,
+ &sharpsl_partition_info, 0);
+
+ if (nr_partitions <= 0) {
+ nr_partitions = DEFAULT_NUM_PARTITIONS;
+ sharpsl_partition_info = sharpsl_nand_default_partition_info;
+ if (machine_is_poodle()) {
+ sharpsl_partition_info[1].size=22 * 1024 * 1024;
+ } else if (machine_is_corgi() || machine_is_shepherd()) {
+ sharpsl_partition_info[1].size=25 * 1024 * 1024;
+ } else if (machine_is_husky()) {
+ sharpsl_partition_info[1].size=53 * 1024 * 1024;
+ } else if (machine_is_spitz()) {
+ sharpsl_partition_info[1].size=5 * 1024 * 1024;
+ } else if (machine_is_akita()) {
+ sharpsl_partition_info[1].size=58 * 1024 * 1024;
+ } else if (machine_is_borzoi()) {
+ sharpsl_partition_info[1].size=32 * 1024 * 1024;
+ }
+ }
+
+ if (machine_is_husky() || machine_is_borzoi() || machine_is_akita()) {
+ /* Need to use small eraseblock size for backward compatibility */
+ sharpsl_mtd->flags |= MTD_NO_VIRTBLOCKS;
+ }
+
+ add_mtd_partitions(sharpsl_mtd, sharpsl_partition_info, nr_partitions);
+
+ /* Return happy */
+ return 0;
+}
+module_init(sharpsl_nand_init);
+
+/*
+ * Clean up routine
+ */
+#ifdef MODULE
+static void __exit sharpsl_nand_cleanup(void)
+{
+ struct nand_chip *this = (struct nand_chip *) &sharpsl_mtd[1];
+
+ /* Release resources, unregister device */
+ nand_release(sharpsl_mtd);
+
+ iounmap(sharpsl_io_base);
+
+ /* Free the MTD device structure */
+ kfree(sharpsl_mtd);
+}
+module_exit(sharpsl_nand_cleanup);
+#endif
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Richard Purdie <rpurdie@rpsys.net>");
+MODULE_DESCRIPTION("Device specific logic for NAND flash on Sharp SL-C7xx Series");
diff --git a/linux-2.4.x/drivers/mtd/nand/spia.c b/linux-2.4.x/drivers/mtd/nand/spia.c
index df2f7bc..32541cb 100644
--- a/linux-2.4.x/drivers/mtd/nand/spia.c
+++ b/linux-2.4.x/drivers/mtd/nand/spia.c
@@ -1,9 +1,14 @@
/*
* drivers/mtd/nand/spia.c
*
- * Copyright (C) 2000 Steven J. Hill (sjhill@cotw.com)
+ * Copyright (C) 2000 Steven J. Hill (sjhill@realitydiluted.com)
*
- * $Id: spia.c,v 1.12 2001/10/02 15:05:14 dwmw2 Exp $
+ *
+ * 10-29-2001 TG change to support hardwarespecific access
+ * to controllines (due to change in nand.c)
+ * page_cache added
+ *
+ * $Id: spia.c,v 1.25 2005/11/07 11:14:31 gleixner Exp $
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -15,6 +20,8 @@
* a 64Mibit (8MiB x 8 bits) NAND flash device.
*/
+#include <linux/kernel.h>
+#include <linux/init.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/mtd/mtd.h>
@@ -30,14 +37,14 @@ static struct mtd_info *spia_mtd = NULL;
/*
* Values specific to the SPIA board (used with EP7212 processor)
*/
-#define SPIA_IO_ADDR = 0xd0000000 /* Start of EP7212 IO address space */
-#define SPIA_FIO_ADDR = 0xf0000000 /* Address where flash is mapped */
-#define SPIA_PEDR = 0x0080 /*
+#define SPIA_IO_BASE 0xd0000000 /* Start of EP7212 IO address space */
+#define SPIA_FIO_BASE 0xf0000000 /* Address where flash is mapped */
+#define SPIA_PEDR 0x0080 /*
* IO offset to Port E data register
* where the CLE, ALE and NCE pins
* are wired to.
*/
-#define SPIA_PEDDR = 0x00c0 /*
+#define SPIA_PEDDR 0x00c0 /*
* IO offset to Port E data direction
* register so we can control the IO
* lines.
@@ -52,29 +59,47 @@ static int spia_fio_base = SPIA_FIO_BASE;
static int spia_pedr = SPIA_PEDR;
static int spia_peddr = SPIA_PEDDR;
-MODULE_PARM(spia_io_base, "i");
-MODULE_PARM(spia_fio_base, "i");
-MODULE_PARM(spia_pedr, "i");
-MODULE_PARM(spia_peddr, "i");
-
-__setup("spia_io_base=",spia_io_base);
-__setup("spia_fio_base=",spia_fio_base);
-__setup("spia_pedr=",spia_pedr);
-__setup("spia_peddr=",spia_peddr);
+module_param(spia_io_base, int, 0);
+module_param(spia_fio_base, int, 0);
+module_param(spia_pedr, int, 0);
+module_param(spia_peddr, int, 0);
/*
* Define partitions for flash device
*/
const static struct mtd_partition partition_info[] = {
- { name: "SPIA flash partition 1",
- offset: 0,
- size: 2*1024*1024 },
- { name: "SPIA flash partition 2",
- offset: 2*1024*1024,
- size: 6*1024*1024 }
+ {
+ .name = "SPIA flash partition 1",
+ .offset = 0,
+ .size = 2*1024*1024
+ },
+ {
+ .name = "SPIA flash partition 2",
+ .offset = 2*1024*1024,
+ .size = 6*1024*1024
+ }
};
#define NUM_PARTITIONS 2
+
+/*
+ * hardware specific access to control-lines
+*/
+static void spia_hwcontrol(struct mtd_info *mtd, int cmd){
+
+ switch(cmd){
+
+ case NAND_CTL_SETCLE: (*(volatile unsigned char *) (spia_io_base + spia_pedr)) |= 0x01; break;
+ case NAND_CTL_CLRCLE: (*(volatile unsigned char *) (spia_io_base + spia_pedr)) &= ~0x01; break;
+
+ case NAND_CTL_SETALE: (*(volatile unsigned char *) (spia_io_base + spia_pedr)) |= 0x02; break;
+ case NAND_CTL_CLRALE: (*(volatile unsigned char *) (spia_io_base + spia_pedr)) &= ~0x02; break;
+
+ case NAND_CTL_SETNCE: (*(volatile unsigned char *) (spia_io_base + spia_pedr)) &= ~0x04; break;
+ case NAND_CTL_CLRNCE: (*(volatile unsigned char *) (spia_io_base + spia_pedr)) |= 0x04; break;
+ }
+}
+
/*
* Main initialization routine
*/
@@ -107,26 +132,19 @@ int __init spia_init (void)
(*(volatile unsigned char *) (spia_io_base + spia_peddr)) = 0x07;
/* Set address of NAND IO lines */
- this->IO_ADDR = spia_fio_base;
- this->CTRL_ADDR = spia_io_base + spia_pedr;
- this->CLE = 0x01;
- this->ALE = 0x02;
- this->NCE = 0x04;
+ this->IO_ADDR_R = (void __iomem *) spia_fio_base;
+ this->IO_ADDR_W = (void __iomem *) spia_fio_base;
+ /* Set address of hardware control function */
+ this->hwcontrol = spia_hwcontrol;
+ /* 15 us command delay time */
+ this->chip_delay = 15;
/* Scan to find existence of the device */
- if (nand_scan (spia_mtd)) {
+ if (nand_scan (spia_mtd, 1)) {
kfree (spia_mtd);
return -ENXIO;
}
- /* Allocate memory for internal data buffer */
- this->data_buf = kmalloc (sizeof(u_char) * (spia_mtd->oobblock + spia_mtd->oobsize), GFP_KERNEL);
- if (!this->data_buf) {
- printk ("Unable to allocate NAND data buffer for SPIA.\n");
- kfree (spia_mtd);
- return -ENOMEM;
- }
-
/* Register the partitions */
add_mtd_partitions(spia_mtd, partition_info, NUM_PARTITIONS);
@@ -141,13 +159,8 @@ module_init(spia_init);
#ifdef MODULE
static void __exit spia_cleanup (void)
{
- struct nand_chip *this = (struct nand_chip *) &spia_mtd[1];
-
- /* Unregister the device */
- del_mtd_device (spia_mtd);
-
- /* Free internal data buffer */
- kfree (this->data_buf);
+ /* Release resources, unregister device */
+ nand_release (spia_mtd);
/* Free the MTD device structure */
kfree (spia_mtd);
@@ -156,5 +169,5 @@ module_exit(spia_cleanup);
#endif
MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Steven J. Hill <sjhill@cotw.com");
+MODULE_AUTHOR("Steven J. Hill <sjhill@realitydiluted.com");
MODULE_DESCRIPTION("Board-specific glue layer for NAND flash on SPIA board");
diff --git a/linux-2.4.x/drivers/mtd/nand/toto.c b/linux-2.4.x/drivers/mtd/nand/toto.c
new file mode 100644
index 0000000..7609c43
--- /dev/null
+++ b/linux-2.4.x/drivers/mtd/nand/toto.c
@@ -0,0 +1,205 @@
+/*
+ * drivers/mtd/nand/toto.c
+ *
+ * Copyright (c) 2003 Texas Instruments
+ *
+ * Derived from drivers/mtd/autcpu12.c
+ *
+ * Copyright (c) 2002 Thomas Gleixner <tgxl@linutronix.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Overview:
+ * This is a device driver for the NAND flash device found on the
+ * TI fido board. It supports 32MiB and 64MiB cards
+ *
+ * $Id: toto.c,v 1.5 2005/11/07 11:14:31 gleixner Exp $
+ */
+
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/partitions.h>
+#include <asm/io.h>
+#include <asm/arch/hardware.h>
+#include <asm/sizes.h>
+#include <asm/arch/toto.h>
+#include <asm/arch-omap1510/hardware.h>
+#include <asm/arch/gpio.h>
+
+/*
+ * MTD structure for TOTO board
+ */
+static struct mtd_info *toto_mtd = NULL;
+
+static unsigned long toto_io_base = OMAP_FLASH_1_BASE;
+
+#define CONFIG_NAND_WORKAROUND 1
+
+#define NAND_NCE 0x4000
+#define NAND_CLE 0x1000
+#define NAND_ALE 0x0002
+#define NAND_MASK (NAND_CLE | NAND_ALE | NAND_NCE)
+
+#define T_NAND_CTL_CLRALE(iob) gpiosetout(NAND_ALE, 0)
+#define T_NAND_CTL_SETALE(iob) gpiosetout(NAND_ALE, NAND_ALE)
+#ifdef CONFIG_NAND_WORKAROUND /* "some" dev boards busted, blue wired to rts2 :( */
+#define T_NAND_CTL_CLRCLE(iob) gpiosetout(NAND_CLE, 0); rts2setout(2, 2)
+#define T_NAND_CTL_SETCLE(iob) gpiosetout(NAND_CLE, NAND_CLE); rts2setout(2, 0)
+#else
+#define T_NAND_CTL_CLRCLE(iob) gpiosetout(NAND_CLE, 0)
+#define T_NAND_CTL_SETCLE(iob) gpiosetout(NAND_CLE, NAND_CLE)
+#endif
+#define T_NAND_CTL_SETNCE(iob) gpiosetout(NAND_NCE, 0)
+#define T_NAND_CTL_CLRNCE(iob) gpiosetout(NAND_NCE, NAND_NCE)
+
+/*
+ * Define partitions for flash devices
+ */
+
+static struct mtd_partition partition_info64M[] = {
+ { .name = "toto kernel partition 1",
+ .offset = 0,
+ .size = 2 * SZ_1M },
+ { .name = "toto file sys partition 2",
+ .offset = 2 * SZ_1M,
+ .size = 14 * SZ_1M },
+ { .name = "toto user partition 3",
+ .offset = 16 * SZ_1M,
+ .size = 16 * SZ_1M },
+ { .name = "toto devboard extra partition 4",
+ .offset = 32 * SZ_1M,
+ .size = 32 * SZ_1M },
+};
+
+static struct mtd_partition partition_info32M[] = {
+ { .name = "toto kernel partition 1",
+ .offset = 0,
+ .size = 2 * SZ_1M },
+ { .name = "toto file sys partition 2",
+ .offset = 2 * SZ_1M,
+ .size = 14 * SZ_1M },
+ { .name = "toto user partition 3",
+ .offset = 16 * SZ_1M,
+ .size = 16 * SZ_1M },
+};
+
+#define NUM_PARTITIONS32M 3
+#define NUM_PARTITIONS64M 4
+/*
+ * hardware specific access to control-lines
+*/
+
+static void toto_hwcontrol(struct mtd_info *mtd, int cmd)
+{
+
+ udelay(1); /* hopefully enough time for tc make proceding write to clear */
+ switch(cmd){
+
+ case NAND_CTL_SETCLE: T_NAND_CTL_SETCLE(cmd); break;
+ case NAND_CTL_CLRCLE: T_NAND_CTL_CLRCLE(cmd); break;
+
+ case NAND_CTL_SETALE: T_NAND_CTL_SETALE(cmd); break;
+ case NAND_CTL_CLRALE: T_NAND_CTL_CLRALE(cmd); break;
+
+ case NAND_CTL_SETNCE: T_NAND_CTL_SETNCE(cmd); break;
+ case NAND_CTL_CLRNCE: T_NAND_CTL_CLRNCE(cmd); break;
+ }
+ udelay(1); /* allow time to ensure gpio state to over take memory write */
+}
+
+/*
+ * Main initialization routine
+ */
+int __init toto_init (void)
+{
+ struct nand_chip *this;
+ int err = 0;
+
+ /* Allocate memory for MTD device structure and private data */
+ toto_mtd = kmalloc (sizeof(struct mtd_info) + sizeof (struct nand_chip),
+ GFP_KERNEL);
+ if (!toto_mtd) {
+ printk (KERN_WARNING "Unable to allocate toto NAND MTD device structure.\n");
+ err = -ENOMEM;
+ goto out;
+ }
+
+ /* Get pointer to private data */
+ this = (struct nand_chip *) (&toto_mtd[1]);
+
+ /* Initialize structures */
+ memset((char *) toto_mtd, 0, sizeof(struct mtd_info));
+ memset((char *) this, 0, sizeof(struct nand_chip));
+
+ /* Link the private data with the MTD structure */
+ toto_mtd->priv = this;
+
+ /* Set address of NAND IO lines */
+ this->IO_ADDR_R = toto_io_base;
+ this->IO_ADDR_W = toto_io_base;
+ this->hwcontrol = toto_hwcontrol;
+ this->dev_ready = NULL;
+ /* 25 us command delay time */
+ this->chip_delay = 30;
+ this->eccmode = NAND_ECC_SOFT;
+
+ /* Scan to find existance of the device */
+ if (nand_scan (toto_mtd, 1)) {
+ err = -ENXIO;
+ goto out_mtd;
+ }
+
+ /* Register the partitions */
+ switch(toto_mtd->size){
+ case SZ_64M: add_mtd_partitions(toto_mtd, partition_info64M, NUM_PARTITIONS64M); break;
+ case SZ_32M: add_mtd_partitions(toto_mtd, partition_info32M, NUM_PARTITIONS32M); break;
+ default: {
+ printk (KERN_WARNING "Unsupported Nand device\n");
+ err = -ENXIO;
+ goto out_buf;
+ }
+ }
+
+ gpioreserve(NAND_MASK); /* claim our gpios */
+ archflashwp(0,0); /* open up flash for writing */
+
+ goto out;
+
+out_buf:
+ kfree (this->data_buf);
+out_mtd:
+ kfree (toto_mtd);
+out:
+ return err;
+}
+
+module_init(toto_init);
+
+/*
+ * Clean up routine
+ */
+static void __exit toto_cleanup (void)
+{
+ /* Release resources, unregister device */
+ nand_release (toto_mtd);
+
+ /* Free the MTD device structure */
+ kfree (toto_mtd);
+
+ /* stop flash writes */
+ archflashwp(0,1);
+
+ /* release gpios to system */
+ gpiorelease(NAND_MASK);
+}
+module_exit(toto_cleanup);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Richard Woodruff <r-woodruff2@ti.com>");
+MODULE_DESCRIPTION("Glue layer for NAND flash on toto board");
diff --git a/linux-2.4.x/drivers/mtd/nand/tx4925ndfmc.c b/linux-2.4.x/drivers/mtd/nand/tx4925ndfmc.c
new file mode 100644
index 0000000..2309d21
--- /dev/null
+++ b/linux-2.4.x/drivers/mtd/nand/tx4925ndfmc.c
@@ -0,0 +1,416 @@
+/*
+ * drivers/mtd/tx4925ndfmc.c
+ *
+ * Overview:
+ * This is a device driver for the NAND flash device found on the
+ * Toshiba RBTX4925 reference board, which is a SmartMediaCard. It supports
+ * 16MiB, 32MiB and 64MiB cards.
+ *
+ * Author: MontaVista Software, Inc. source@mvista.com
+ *
+ * Derived from drivers/mtd/autcpu12.c
+ * Copyright (c) 2001 Thomas Gleixner (gleixner@autronix.de)
+ *
+ * $Id: tx4925ndfmc.c,v 1.6 2005/11/07 11:14:31 gleixner Exp $
+ *
+ * Copyright (C) 2001 Toshiba Corporation
+ *
+ * 2003 (c) MontaVista Software, Inc. This file is licensed under
+ * the terms of the GNU General Public License version 2. This program
+ * is licensed "as is" without any warranty of any kind, whether express
+ * or implied.
+ *
+ */
+
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/partitions.h>
+#include <linux/delay.h>
+#include <asm/io.h>
+#include <asm/tx4925/tx4925_nand.h>
+
+extern struct nand_oobinfo jffs2_oobinfo;
+
+/*
+ * MTD structure for RBTX4925 board
+ */
+static struct mtd_info *tx4925ndfmc_mtd = NULL;
+
+/*
+ * Define partitions for flash devices
+ */
+
+static struct mtd_partition partition_info16k[] = {
+ { .name = "RBTX4925 flash partition 1",
+ .offset = 0,
+ .size = 8 * 0x00100000 },
+ { .name = "RBTX4925 flash partition 2",
+ .offset = 8 * 0x00100000,
+ .size = 8 * 0x00100000 },
+};
+
+static struct mtd_partition partition_info32k[] = {
+ { .name = "RBTX4925 flash partition 1",
+ .offset = 0,
+ .size = 8 * 0x00100000 },
+ { .name = "RBTX4925 flash partition 2",
+ .offset = 8 * 0x00100000,
+ .size = 24 * 0x00100000 },
+};
+
+static struct mtd_partition partition_info64k[] = {
+ { .name = "User FS",
+ .offset = 0,
+ .size = 16 * 0x00100000 },
+ { .name = "RBTX4925 flash partition 2",
+ .offset = 16 * 0x00100000,
+ .size = 48 * 0x00100000},
+};
+
+static struct mtd_partition partition_info128k[] = {
+ { .name = "Skip bad section",
+ .offset = 0,
+ .size = 16 * 0x00100000 },
+ { .name = "User FS",
+ .offset = 16 * 0x00100000,
+ .size = 112 * 0x00100000 },
+};
+#define NUM_PARTITIONS16K 2
+#define NUM_PARTITIONS32K 2
+#define NUM_PARTITIONS64K 2
+#define NUM_PARTITIONS128K 2
+
+/*
+ * hardware specific access to control-lines
+*/
+static void tx4925ndfmc_hwcontrol(struct mtd_info *mtd, int cmd)
+{
+
+ switch(cmd){
+
+ case NAND_CTL_SETCLE:
+ tx4925_ndfmcptr->mcr |= TX4925_NDFMCR_CLE;
+ break;
+ case NAND_CTL_CLRCLE:
+ tx4925_ndfmcptr->mcr &= ~TX4925_NDFMCR_CLE;
+ break;
+ case NAND_CTL_SETALE:
+ tx4925_ndfmcptr->mcr |= TX4925_NDFMCR_ALE;
+ break;
+ case NAND_CTL_CLRALE:
+ tx4925_ndfmcptr->mcr &= ~TX4925_NDFMCR_ALE;
+ break;
+ case NAND_CTL_SETNCE:
+ tx4925_ndfmcptr->mcr |= TX4925_NDFMCR_CE;
+ break;
+ case NAND_CTL_CLRNCE:
+ tx4925_ndfmcptr->mcr &= ~TX4925_NDFMCR_CE;
+ break;
+ case NAND_CTL_SETWP:
+ tx4925_ndfmcptr->mcr |= TX4925_NDFMCR_WE;
+ break;
+ case NAND_CTL_CLRWP:
+ tx4925_ndfmcptr->mcr &= ~TX4925_NDFMCR_WE;
+ break;
+ }
+}
+
+/*
+* read device ready pin
+*/
+static int tx4925ndfmc_device_ready(struct mtd_info *mtd)
+{
+ int ready;
+ ready = (tx4925_ndfmcptr->sr & TX4925_NDSFR_BUSY) ? 0 : 1;
+ return ready;
+}
+void tx4925ndfmc_enable_hwecc(struct mtd_info *mtd, int mode)
+{
+ /* reset first */
+ tx4925_ndfmcptr->mcr |= TX4925_NDFMCR_ECC_CNTL_MASK;
+ tx4925_ndfmcptr->mcr &= ~TX4925_NDFMCR_ECC_CNTL_MASK;
+ tx4925_ndfmcptr->mcr |= TX4925_NDFMCR_ECC_CNTL_ENAB;
+}
+static void tx4925ndfmc_disable_ecc(void)
+{
+ tx4925_ndfmcptr->mcr &= ~TX4925_NDFMCR_ECC_CNTL_MASK;
+}
+static void tx4925ndfmc_enable_read_ecc(void)
+{
+ tx4925_ndfmcptr->mcr &= ~TX4925_NDFMCR_ECC_CNTL_MASK;
+ tx4925_ndfmcptr->mcr |= TX4925_NDFMCR_ECC_CNTL_READ;
+}
+void tx4925ndfmc_readecc(struct mtd_info *mtd, const u_char *dat, u_char *ecc_code){
+ int i;
+ u_char *ecc = ecc_code;
+ tx4925ndfmc_enable_read_ecc();
+ for (i = 0;i < 6;i++,ecc++)
+ *ecc = tx4925_read_nfmc(&(tx4925_ndfmcptr->dtr));
+ tx4925ndfmc_disable_ecc();
+}
+void tx4925ndfmc_device_setup(void)
+{
+
+ *(unsigned char *)0xbb005000 &= ~0x08;
+
+ /* reset NDFMC */
+ tx4925_ndfmcptr->rstr |= TX4925_NDFRSTR_RST;
+ while (tx4925_ndfmcptr->rstr & TX4925_NDFRSTR_RST);
+
+ /* setup BusSeparete, Hold Time, Strobe Pulse Width */
+ tx4925_ndfmcptr->mcr = TX4925_BSPRT ? TX4925_NDFMCR_BSPRT : 0;
+ tx4925_ndfmcptr->spr = TX4925_HOLD << 4 | TX4925_SPW;
+}
+static u_char tx4925ndfmc_nand_read_byte(struct mtd_info *mtd)
+{
+ struct nand_chip *this = mtd->priv;
+ return tx4925_read_nfmc(this->IO_ADDR_R);
+}
+
+static void tx4925ndfmc_nand_write_byte(struct mtd_info *mtd, u_char byte)
+{
+ struct nand_chip *this = mtd->priv;
+ tx4925_write_nfmc(byte, this->IO_ADDR_W);
+}
+
+static void tx4925ndfmc_nand_write_buf(struct mtd_info *mtd, const u_char *buf, int len)
+{
+ int i;
+ struct nand_chip *this = mtd->priv;
+
+ for (i=0; i<len; i++)
+ tx4925_write_nfmc(buf[i], this->IO_ADDR_W);
+}
+
+static void tx4925ndfmc_nand_read_buf(struct mtd_info *mtd, u_char *buf, int len)
+{
+ int i;
+ struct nand_chip *this = mtd->priv;
+
+ for (i=0; i<len; i++)
+ buf[i] = tx4925_read_nfmc(this->IO_ADDR_R);
+}
+
+static int tx4925ndfmc_nand_verify_buf(struct mtd_info *mtd, const u_char *buf, int len)
+{
+ int i;
+ struct nand_chip *this = mtd->priv;
+
+ for (i=0; i<len; i++)
+ if (buf[i] != tx4925_read_nfmc(this->IO_ADDR_R))
+ return -EFAULT;
+
+ return 0;
+}
+
+/*
+ * Send command to NAND device
+ */
+static void tx4925ndfmc_nand_command (struct mtd_info *mtd, unsigned command, int column, int page_addr)
+{
+ register struct nand_chip *this = mtd->priv;
+
+ /* Begin command latch cycle */
+ this->hwcontrol(mtd, NAND_CTL_SETCLE);
+ /*
+ * Write out the command to the device.
+ */
+ if (command == NAND_CMD_SEQIN) {
+ int readcmd;
+
+ if (column >= mtd->oobblock) {
+ /* OOB area */
+ column -= mtd->oobblock;
+ readcmd = NAND_CMD_READOOB;
+ } else if (column < 256) {
+ /* First 256 bytes --> READ0 */
+ readcmd = NAND_CMD_READ0;
+ } else {
+ column -= 256;
+ readcmd = NAND_CMD_READ1;
+ }
+ this->write_byte(mtd, readcmd);
+ }
+ this->write_byte(mtd, command);
+
+ /* Set ALE and clear CLE to start address cycle */
+ this->hwcontrol(mtd, NAND_CTL_CLRCLE);
+
+ if (column != -1 || page_addr != -1) {
+ this->hwcontrol(mtd, NAND_CTL_SETALE);
+
+ /* Serially input address */
+ if (column != -1)
+ this->write_byte(mtd, column);
+ if (page_addr != -1) {
+ this->write_byte(mtd, (unsigned char) (page_addr & 0xff));
+ this->write_byte(mtd, (unsigned char) ((page_addr >> 8) & 0xff));
+ /* One more address cycle for higher density devices */
+ if (mtd->size & 0x0c000000)
+ this->write_byte(mtd, (unsigned char) ((page_addr >> 16) & 0x0f));
+ }
+ /* Latch in address */
+ this->hwcontrol(mtd, NAND_CTL_CLRALE);
+ }
+
+ /*
+ * program and erase have their own busy handlers
+ * status and sequential in needs no delay
+ */
+ switch (command) {
+
+ case NAND_CMD_PAGEPROG:
+ /* Turn off WE */
+ this->hwcontrol (mtd, NAND_CTL_CLRWP);
+ return;
+
+ case NAND_CMD_SEQIN:
+ /* Turn on WE */
+ this->hwcontrol (mtd, NAND_CTL_SETWP);
+ return;
+
+ case NAND_CMD_ERASE1:
+ case NAND_CMD_ERASE2:
+ case NAND_CMD_STATUS:
+ return;
+
+ case NAND_CMD_RESET:
+ if (this->dev_ready)
+ break;
+ this->hwcontrol(mtd, NAND_CTL_SETCLE);
+ this->write_byte(mtd, NAND_CMD_STATUS);
+ this->hwcontrol(mtd, NAND_CTL_CLRCLE);
+ while ( !(this->read_byte(mtd) & 0x40));
+ return;
+
+ /* This applies to read commands */
+ default:
+ /*
+ * If we don't have access to the busy pin, we apply the given
+ * command delay
+ */
+ if (!this->dev_ready) {
+ udelay (this->chip_delay);
+ return;
+ }
+ }
+
+ /* wait until command is processed */
+ while (!this->dev_ready(mtd));
+}
+
+#ifdef CONFIG_MTD_CMDLINE_PARTS
+extern int parse_cmdline_partitions(struct mtd_info *master, struct mtd_partitio
+n **pparts, char *);
+#endif
+
+/*
+ * Main initialization routine
+ */
+extern int nand_correct_data(struct mtd_info *mtd, u_char *dat, u_char *read_ecc, u_char *calc_ecc);
+int __init tx4925ndfmc_init (void)
+{
+ struct nand_chip *this;
+ int err = 0;
+
+ /* Allocate memory for MTD device structure and private data */
+ tx4925ndfmc_mtd = kmalloc (sizeof(struct mtd_info) + sizeof (struct nand_chip),
+ GFP_KERNEL);
+ if (!tx4925ndfmc_mtd) {
+ printk ("Unable to allocate RBTX4925 NAND MTD device structure.\n");
+ err = -ENOMEM;
+ goto out;
+ }
+
+ tx4925ndfmc_device_setup();
+
+ /* io is indirect via a register so don't need to ioremap address */
+
+ /* Get pointer to private data */
+ this = (struct nand_chip *) (&tx4925ndfmc_mtd[1]);
+
+ /* Initialize structures */
+ memset((char *) tx4925ndfmc_mtd, 0, sizeof(struct mtd_info));
+ memset((char *) this, 0, sizeof(struct nand_chip));
+
+ /* Link the private data with the MTD structure */
+ tx4925ndfmc_mtd->priv = this;
+
+ /* Set address of NAND IO lines */
+ this->IO_ADDR_R = (void __iomem *)&(tx4925_ndfmcptr->dtr);
+ this->IO_ADDR_W = (void __iomem *)&(tx4925_ndfmcptr->dtr);
+ this->hwcontrol = tx4925ndfmc_hwcontrol;
+ this->enable_hwecc = tx4925ndfmc_enable_hwecc;
+ this->calculate_ecc = tx4925ndfmc_readecc;
+ this->correct_data = nand_correct_data;
+ this->eccmode = NAND_ECC_HW6_512;
+ this->dev_ready = tx4925ndfmc_device_ready;
+ /* 20 us command delay time */
+ this->chip_delay = 20;
+ this->read_byte = tx4925ndfmc_nand_read_byte;
+ this->write_byte = tx4925ndfmc_nand_write_byte;
+ this->cmdfunc = tx4925ndfmc_nand_command;
+ this->write_buf = tx4925ndfmc_nand_write_buf;
+ this->read_buf = tx4925ndfmc_nand_read_buf;
+ this->verify_buf = tx4925ndfmc_nand_verify_buf;
+
+ /* Scan to find existance of the device */
+ if (nand_scan (tx4925ndfmc_mtd, 1)) {
+ err = -ENXIO;
+ goto out_ior;
+ }
+
+ /* Register the partitions */
+#ifdef CONFIG_MTD_CMDLINE_PARTS
+ {
+ int mtd_parts_nb = 0;
+ struct mtd_partition *mtd_parts = 0;
+ mtd_parts_nb = parse_cmdline_partitions(tx4925ndfmc_mtd, &mtd_parts, "tx4925ndfmc");
+ if (mtd_parts_nb > 0)
+ add_mtd_partitions(tx4925ndfmc_mtd, mtd_parts, mtd_parts_nb);
+ else
+ add_mtd_device(tx4925ndfmc_mtd);
+ }
+#else /* ifdef CONFIG_MTD_CMDLINE_PARTS */
+ switch(tx4925ndfmc_mtd->size){
+ case 0x01000000: add_mtd_partitions(tx4925ndfmc_mtd, partition_info16k, NUM_PARTITIONS16K); break;
+ case 0x02000000: add_mtd_partitions(tx4925ndfmc_mtd, partition_info32k, NUM_PARTITIONS32K); break;
+ case 0x04000000: add_mtd_partitions(tx4925ndfmc_mtd, partition_info64k, NUM_PARTITIONS64K); break;
+ case 0x08000000: add_mtd_partitions(tx4925ndfmc_mtd, partition_info128k, NUM_PARTITIONS128K); break;
+ default: {
+ printk ("Unsupported SmartMedia device\n");
+ err = -ENXIO;
+ goto out_ior;
+ }
+ }
+#endif /* ifdef CONFIG_MTD_CMDLINE_PARTS */
+ goto out;
+
+out_ior:
+out:
+ return err;
+}
+
+module_init(tx4925ndfmc_init);
+
+/*
+ * Clean up routine
+ */
+#ifdef MODULE
+static void __exit tx4925ndfmc_cleanup (void)
+{
+ /* Release resources, unregister device */
+ nand_release (tx4925ndfmc_mtd);
+
+ /* Free the MTD device structure */
+ kfree (tx4925ndfmc_mtd);
+}
+module_exit(tx4925ndfmc_cleanup);
+#endif
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Alice Hennessy <ahennessy@mvista.com>");
+MODULE_DESCRIPTION("Glue layer for SmartMediaCard on Toshiba RBTX4925");
diff --git a/linux-2.4.x/drivers/mtd/nand/tx4938ndfmc.c b/linux-2.4.x/drivers/mtd/nand/tx4938ndfmc.c
new file mode 100644
index 0000000..f012728
--- /dev/null
+++ b/linux-2.4.x/drivers/mtd/nand/tx4938ndfmc.c
@@ -0,0 +1,406 @@
+/*
+ * drivers/mtd/nand/tx4938ndfmc.c
+ *
+ * Overview:
+ * This is a device driver for the NAND flash device connected to
+ * TX4938 internal NAND Memory Controller.
+ * TX4938 NDFMC is almost same as TX4925 NDFMC, but register size are 64 bit.
+ *
+ * Author: source@mvista.com
+ *
+ * Based on spia.c by Steven J. Hill
+ *
+ * $Id: tx4938ndfmc.c,v 1.5 2005/11/07 11:14:31 gleixner Exp $
+ *
+ * Copyright (C) 2000-2001 Toshiba Corporation
+ *
+ * 2003 (c) MontaVista Software, Inc. This file is licensed under the
+ * terms of the GNU General Public License version 2. This program is
+ * licensed "as is" without any warranty of any kind, whether express
+ * or implied.
+ */
+#include <linux/config.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/nand_ecc.h>
+#include <linux/mtd/partitions.h>
+#include <asm/io.h>
+#include <asm/bootinfo.h>
+#include <linux/delay.h>
+#include <asm/tx4938/rbtx4938.h>
+
+extern struct nand_oobinfo jffs2_oobinfo;
+
+/*
+ * MTD structure for TX4938 NDFMC
+ */
+static struct mtd_info *tx4938ndfmc_mtd;
+
+/*
+ * Define partitions for flash device
+ */
+#define flush_wb() (void)tx4938_ndfmcptr->mcr;
+
+#define NUM_PARTITIONS 3
+#define NUMBER_OF_CIS_BLOCKS 24
+#define SIZE_OF_BLOCK 0x00004000
+#define NUMBER_OF_BLOCK_PER_ZONE 1024
+#define SIZE_OF_ZONE (NUMBER_OF_BLOCK_PER_ZONE * SIZE_OF_BLOCK)
+#ifndef CONFIG_MTD_CMDLINE_PARTS
+/*
+ * You can use the following sample of MTD partitions
+ * on the NAND Flash Memory 32MB or more.
+ *
+ * The following figure shows the image of the sample partition on
+ * the 32MB NAND Flash Memory.
+ *
+ * Block No.
+ * 0 +-----------------------------+ ------
+ * | CIS | ^
+ * 24 +-----------------------------+ |
+ * | kernel image | | Zone 0
+ * | | |
+ * +-----------------------------+ |
+ * 1023 | unused area | v
+ * +-----------------------------+ ------
+ * 1024 | JFFS2 | ^
+ * | | |
+ * | | | Zone 1
+ * | | |
+ * | | |
+ * | | v
+ * 2047 +-----------------------------+ ------
+ *
+ */
+static struct mtd_partition partition_info[NUM_PARTITIONS] = {
+ {
+ .name = "RBTX4938 CIS Area",
+ .offset = 0,
+ .size = (NUMBER_OF_CIS_BLOCKS * SIZE_OF_BLOCK),
+ .mask_flags = MTD_WRITEABLE /* This partition is NOT writable */
+ },
+ {
+ .name = "RBTX4938 kernel image",
+ .offset = MTDPART_OFS_APPEND,
+ .size = 8 * 0x00100000, /* 8MB (Depends on size of kernel image) */
+ .mask_flags = MTD_WRITEABLE /* This partition is NOT writable */
+ },
+ {
+ .name = "Root FS (JFFS2)",
+ .offset = (0 + SIZE_OF_ZONE), /* start address of next zone */
+ .size = MTDPART_SIZ_FULL
+ },
+};
+#endif
+
+static void tx4938ndfmc_hwcontrol(struct mtd_info *mtd, int cmd)
+{
+ switch (cmd) {
+ case NAND_CTL_SETCLE:
+ tx4938_ndfmcptr->mcr |= TX4938_NDFMCR_CLE;
+ break;
+ case NAND_CTL_CLRCLE:
+ tx4938_ndfmcptr->mcr &= ~TX4938_NDFMCR_CLE;
+ break;
+ case NAND_CTL_SETALE:
+ tx4938_ndfmcptr->mcr |= TX4938_NDFMCR_ALE;
+ break;
+ case NAND_CTL_CLRALE:
+ tx4938_ndfmcptr->mcr &= ~TX4938_NDFMCR_ALE;
+ break;
+ /* TX4938_NDFMCR_CE bit is 0:high 1:low */
+ case NAND_CTL_SETNCE:
+ tx4938_ndfmcptr->mcr |= TX4938_NDFMCR_CE;
+ break;
+ case NAND_CTL_CLRNCE:
+ tx4938_ndfmcptr->mcr &= ~TX4938_NDFMCR_CE;
+ break;
+ case NAND_CTL_SETWP:
+ tx4938_ndfmcptr->mcr |= TX4938_NDFMCR_WE;
+ break;
+ case NAND_CTL_CLRWP:
+ tx4938_ndfmcptr->mcr &= ~TX4938_NDFMCR_WE;
+ break;
+ }
+}
+static int tx4938ndfmc_dev_ready(struct mtd_info *mtd)
+{
+ flush_wb();
+ return !(tx4938_ndfmcptr->sr & TX4938_NDFSR_BUSY);
+}
+static void tx4938ndfmc_calculate_ecc(struct mtd_info *mtd, const u_char *dat, u_char *ecc_code)
+{
+ u32 mcr = tx4938_ndfmcptr->mcr;
+ mcr &= ~TX4938_NDFMCR_ECC_ALL;
+ tx4938_ndfmcptr->mcr = mcr | TX4938_NDFMCR_ECC_OFF;
+ tx4938_ndfmcptr->mcr = mcr | TX4938_NDFMCR_ECC_READ;
+ ecc_code[1] = tx4938_ndfmcptr->dtr;
+ ecc_code[0] = tx4938_ndfmcptr->dtr;
+ ecc_code[2] = tx4938_ndfmcptr->dtr;
+ tx4938_ndfmcptr->mcr = mcr | TX4938_NDFMCR_ECC_OFF;
+}
+static void tx4938ndfmc_enable_hwecc(struct mtd_info *mtd, int mode)
+{
+ u32 mcr = tx4938_ndfmcptr->mcr;
+ mcr &= ~TX4938_NDFMCR_ECC_ALL;
+ tx4938_ndfmcptr->mcr = mcr | TX4938_NDFMCR_ECC_RESET;
+ tx4938_ndfmcptr->mcr = mcr | TX4938_NDFMCR_ECC_OFF;
+ tx4938_ndfmcptr->mcr = mcr | TX4938_NDFMCR_ECC_ON;
+}
+
+static u_char tx4938ndfmc_nand_read_byte(struct mtd_info *mtd)
+{
+ struct nand_chip *this = mtd->priv;
+ return tx4938_read_nfmc(this->IO_ADDR_R);
+}
+
+static void tx4938ndfmc_nand_write_byte(struct mtd_info *mtd, u_char byte)
+{
+ struct nand_chip *this = mtd->priv;
+ tx4938_write_nfmc(byte, this->IO_ADDR_W);
+}
+
+static void tx4938ndfmc_nand_write_buf(struct mtd_info *mtd, const u_char *buf, int len)
+{
+ int i;
+ struct nand_chip *this = mtd->priv;
+
+ for (i=0; i<len; i++)
+ tx4938_write_nfmc(buf[i], this->IO_ADDR_W);
+}
+
+static void tx4938ndfmc_nand_read_buf(struct mtd_info *mtd, u_char *buf, int len)
+{
+ int i;
+ struct nand_chip *this = mtd->priv;
+
+ for (i=0; i<len; i++)
+ buf[i] = tx4938_read_nfmc(this->IO_ADDR_R);
+}
+
+static int tx4938ndfmc_nand_verify_buf(struct mtd_info *mtd, const u_char *buf, int len)
+{
+ int i;
+ struct nand_chip *this = mtd->priv;
+
+ for (i=0; i<len; i++)
+ if (buf[i] != tx4938_read_nfmc(this->IO_ADDR_R))
+ return -EFAULT;
+
+ return 0;
+}
+
+/*
+ * Send command to NAND device
+ */
+static void tx4938ndfmc_nand_command (struct mtd_info *mtd, unsigned command, int column, int page_addr)
+{
+ register struct nand_chip *this = mtd->priv;
+
+ /* Begin command latch cycle */
+ this->hwcontrol(mtd, NAND_CTL_SETCLE);
+ /*
+ * Write out the command to the device.
+ */
+ if (command == NAND_CMD_SEQIN) {
+ int readcmd;
+
+ if (column >= mtd->oobblock) {
+ /* OOB area */
+ column -= mtd->oobblock;
+ readcmd = NAND_CMD_READOOB;
+ } else if (column < 256) {
+ /* First 256 bytes --> READ0 */
+ readcmd = NAND_CMD_READ0;
+ } else {
+ column -= 256;
+ readcmd = NAND_CMD_READ1;
+ }
+ this->write_byte(mtd, readcmd);
+ }
+ this->write_byte(mtd, command);
+
+ /* Set ALE and clear CLE to start address cycle */
+ this->hwcontrol(mtd, NAND_CTL_CLRCLE);
+
+ if (column != -1 || page_addr != -1) {
+ this->hwcontrol(mtd, NAND_CTL_SETALE);
+
+ /* Serially input address */
+ if (column != -1)
+ this->write_byte(mtd, column);
+ if (page_addr != -1) {
+ this->write_byte(mtd, (unsigned char) (page_addr & 0xff));
+ this->write_byte(mtd, (unsigned char) ((page_addr >> 8) & 0xff));
+ /* One more address cycle for higher density devices */
+ if (mtd->size & 0x0c000000)
+ this->write_byte(mtd, (unsigned char) ((page_addr >> 16) & 0x0f));
+ }
+ /* Latch in address */
+ this->hwcontrol(mtd, NAND_CTL_CLRALE);
+ }
+
+ /*
+ * program and erase have their own busy handlers
+ * status and sequential in needs no delay
+ */
+ switch (command) {
+
+ case NAND_CMD_PAGEPROG:
+ /* Turn off WE */
+ this->hwcontrol (mtd, NAND_CTL_CLRWP);
+ return;
+
+ case NAND_CMD_SEQIN:
+ /* Turn on WE */
+ this->hwcontrol (mtd, NAND_CTL_SETWP);
+ return;
+
+ case NAND_CMD_ERASE1:
+ case NAND_CMD_ERASE2:
+ case NAND_CMD_STATUS:
+ return;
+
+ case NAND_CMD_RESET:
+ if (this->dev_ready)
+ break;
+ this->hwcontrol(mtd, NAND_CTL_SETCLE);
+ this->write_byte(mtd, NAND_CMD_STATUS);
+ this->hwcontrol(mtd, NAND_CTL_CLRCLE);
+ while ( !(this->read_byte(mtd) & 0x40));
+ return;
+
+ /* This applies to read commands */
+ default:
+ /*
+ * If we don't have access to the busy pin, we apply the given
+ * command delay
+ */
+ if (!this->dev_ready) {
+ udelay (this->chip_delay);
+ return;
+ }
+ }
+
+ /* wait until command is processed */
+ while (!this->dev_ready(mtd));
+}
+
+#ifdef CONFIG_MTD_CMDLINE_PARTS
+extern int parse_cmdline_partitions(struct mtd_info *master, struct mtd_partition **pparts, char *);
+#endif
+/*
+ * Main initialization routine
+ */
+int __init tx4938ndfmc_init (void)
+{
+ struct nand_chip *this;
+ int bsprt = 0, hold = 0xf, spw = 0xf;
+ int protected = 0;
+
+ if ((*rbtx4938_piosel_ptr & 0x0c) != 0x08) {
+ printk("TX4938 NDFMC: disabled by IOC PIOSEL\n");
+ return -ENODEV;
+ }
+ bsprt = 1;
+ hold = 2;
+ spw = 9 - 1; /* 8 GBUSCLK = 80ns (@ GBUSCLK 100MHz) */
+
+ if ((tx4938_ccfgptr->pcfg &
+ (TX4938_PCFG_ATA_SEL|TX4938_PCFG_ISA_SEL|TX4938_PCFG_NDF_SEL))
+ != TX4938_PCFG_NDF_SEL) {
+ printk("TX4938 NDFMC: disabled by PCFG.\n");
+ return -ENODEV;
+ }
+
+ /* reset NDFMC */
+ tx4938_ndfmcptr->rstr |= TX4938_NDFRSTR_RST;
+ while (tx4938_ndfmcptr->rstr & TX4938_NDFRSTR_RST)
+ ;
+ /* setup BusSeparete, Hold Time, Strobe Pulse Width */
+ tx4938_ndfmcptr->mcr = bsprt ? TX4938_NDFMCR_BSPRT : 0;
+ tx4938_ndfmcptr->spr = hold << 4 | spw;
+
+ /* Allocate memory for MTD device structure and private data */
+ tx4938ndfmc_mtd = kmalloc (sizeof(struct mtd_info) + sizeof (struct nand_chip),
+ GFP_KERNEL);
+ if (!tx4938ndfmc_mtd) {
+ printk ("Unable to allocate TX4938 NDFMC MTD device structure.\n");
+ return -ENOMEM;
+ }
+
+ /* Get pointer to private data */
+ this = (struct nand_chip *) (&tx4938ndfmc_mtd[1]);
+
+ /* Initialize structures */
+ memset((char *) tx4938ndfmc_mtd, 0, sizeof(struct mtd_info));
+ memset((char *) this, 0, sizeof(struct nand_chip));
+
+ /* Link the private data with the MTD structure */
+ tx4938ndfmc_mtd->priv = this;
+
+ /* Set address of NAND IO lines */
+ this->IO_ADDR_R = (unsigned long)&tx4938_ndfmcptr->dtr;
+ this->IO_ADDR_W = (unsigned long)&tx4938_ndfmcptr->dtr;
+ this->hwcontrol = tx4938ndfmc_hwcontrol;
+ this->dev_ready = tx4938ndfmc_dev_ready;
+ this->calculate_ecc = tx4938ndfmc_calculate_ecc;
+ this->correct_data = nand_correct_data;
+ this->enable_hwecc = tx4938ndfmc_enable_hwecc;
+ this->eccmode = NAND_ECC_HW3_256;
+ this->chip_delay = 100;
+ this->read_byte = tx4938ndfmc_nand_read_byte;
+ this->write_byte = tx4938ndfmc_nand_write_byte;
+ this->cmdfunc = tx4938ndfmc_nand_command;
+ this->write_buf = tx4938ndfmc_nand_write_buf;
+ this->read_buf = tx4938ndfmc_nand_read_buf;
+ this->verify_buf = tx4938ndfmc_nand_verify_buf;
+
+ /* Scan to find existance of the device */
+ if (nand_scan (tx4938ndfmc_mtd, 1)) {
+ kfree (tx4938ndfmc_mtd);
+ return -ENXIO;
+ }
+
+ if (protected) {
+ printk(KERN_INFO "TX4938 NDFMC: write protected.\n");
+ tx4938ndfmc_mtd->flags &= ~(MTD_WRITEABLE | MTD_ERASEABLE);
+ }
+
+#ifdef CONFIG_MTD_CMDLINE_PARTS
+ {
+ int mtd_parts_nb = 0;
+ struct mtd_partition *mtd_parts = 0;
+ mtd_parts_nb = parse_cmdline_partitions(tx4938ndfmc_mtd, &mtd_parts, "tx4938ndfmc");
+ if (mtd_parts_nb > 0)
+ add_mtd_partitions(tx4938ndfmc_mtd, mtd_parts, mtd_parts_nb);
+ else
+ add_mtd_device(tx4938ndfmc_mtd);
+ }
+#else
+ add_mtd_partitions(tx4938ndfmc_mtd, partition_info, NUM_PARTITIONS );
+#endif
+
+ return 0;
+}
+module_init(tx4938ndfmc_init);
+
+/*
+ * Clean up routine
+ */
+static void __exit tx4938ndfmc_cleanup (void)
+{
+ /* Release resources, unregister device */
+ nand_release (tx4938ndfmc_mtd);
+
+ /* Free the MTD device structure */
+ kfree (tx4938ndfmc_mtd);
+}
+module_exit(tx4938ndfmc_cleanup);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Alice Hennessy <ahennessy@mvista.com>");
+MODULE_DESCRIPTION("Board-specific glue layer for NAND flash on TX4938 NDFMC");
diff --git a/linux-2.4.x/drivers/mtd/nftlcore.c b/linux-2.4.x/drivers/mtd/nftlcore.c
index 2393d58..cf493d0 100644
--- a/linux-2.4.x/drivers/mtd/nftlcore.c
+++ b/linux-2.4.x/drivers/mtd/nftlcore.c
@@ -1,7 +1,7 @@
/* Linux driver for NAND Flash Translation Layer */
/* (c) 1999 Machine Vision Holdings, Inc. */
/* Author: David Woodhouse <dwmw2@infradead.org> */
-/* $Id: nftlcore.c,v 1.85 2001/11/20 11:42:33 dwmw2 Exp $ */
+/* $Id: nftlcore.c,v 1.99 2005/11/29 20:01:25 gleixner Exp $ */
/*
The contents of this file are distributed under the GNU General
@@ -23,14 +23,13 @@
#include <linux/slab.h>
#include <linux/sched.h>
#include <linux/init.h>
-#include <linux/blkpg.h>
+#include <linux/hdreg.h>
-#ifdef CONFIG_KMOD
#include <linux/kmod.h>
-#endif
#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
#include <linux/mtd/nftl.h>
-#include <linux/mtd/compatmac.h>
+#include <linux/mtd/blktrans.h>
/* maximum number of loops while examining next block, to have a
chance to detect consistency problems (they should never happen
@@ -38,190 +37,103 @@
#define MAX_LOOPS 10000
-/* NFTL block device stuff */
-#define MAJOR_NR NFTL_MAJOR
-#define DEVICE_REQUEST nftl_request
-#define DEVICE_OFF(device)
-
-
-#include <linux/blk.h>
-#include <linux/hdreg.h>
-
-/* Linux-specific block device functions */
-
-/* I _HATE_ the Linux block device setup more than anything else I've ever
- * encountered, except ...
- */
-
-static int nftl_sizes[256];
-static int nftl_blocksizes[256];
-
-/* .. for the Linux partition table handling. */
-struct hd_struct part_table[256];
-
-#if LINUX_VERSION_CODE < 0x20328
-static void dummy_init (struct gendisk *crap)
-{}
-#endif
-
-static struct gendisk nftl_gendisk = {
- major: MAJOR_NR,
- major_name: "nftl",
- minor_shift: NFTL_PARTN_BITS, /* Bits to shift to get real from partition */
- max_p: (1<<NFTL_PARTN_BITS)-1, /* Number of partitions per real */
-#if LINUX_VERSION_CODE < 0x20328
- max_nr: MAX_NFTLS, /* maximum number of real */
- init: dummy_init, /* init function */
-#endif
- part: part_table, /* hd struct */
- sizes: nftl_sizes, /* block sizes */
-};
-
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,14)
-#define BLK_INC_USE_COUNT MOD_INC_USE_COUNT
-#define BLK_DEC_USE_COUNT MOD_DEC_USE_COUNT
-#else
-#define BLK_INC_USE_COUNT do {} while(0)
-#define BLK_DEC_USE_COUNT do {} while(0)
-#endif
-struct NFTLrecord *NFTLs[MAX_NFTLS];
-
-static void NFTL_setup(struct mtd_info *mtd)
+static void nftl_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
{
- int i;
struct NFTLrecord *nftl;
unsigned long temp;
- int firstfree = -1;
- DEBUG(MTD_DEBUG_LEVEL1,"NFTL_setup\n");
+ if (mtd->type != MTD_NANDFLASH)
+ return;
+ /* OK, this is moderately ugly. But probably safe. Alternatives? */
+ if (memcmp(mtd->name, "DiskOnChip", 10))
+ return;
- for (i = 0; i < MAX_NFTLS; i++) {
- if (!NFTLs[i] && firstfree == -1)
- firstfree = i;
- else if (NFTLs[i] && NFTLs[i]->mtd == mtd) {
- /* This is a Spare Media Header for an NFTL we've already found */
- DEBUG(MTD_DEBUG_LEVEL1, "MTD already mounted as NFTL\n");
- return;
- }
- }
- if (firstfree == -1) {
- printk(KERN_WARNING "No more NFTL slot available\n");
+ if (!mtd->block_isbad) {
+ printk(KERN_ERR
+"NFTL no longer supports the old DiskOnChip drivers loaded via docprobe.\n"
+"Please use the new diskonchip driver under the NAND subsystem.\n");
return;
- }
+ }
+
+ DEBUG(MTD_DEBUG_LEVEL1, "NFTL: add_mtd for %s\n", mtd->name);
nftl = kmalloc(sizeof(struct NFTLrecord), GFP_KERNEL);
+
if (!nftl) {
- printk(KERN_WARNING "Out of memory for NFTL data structures\n");
+ printk(KERN_WARNING "NFTL: out of memory for data structures\n");
return;
}
+ memset(nftl, 0, sizeof(*nftl));
- init_MUTEX(&nftl->mutex);
-
- /* get physical parameters */
- nftl->EraseSize = mtd->erasesize;
- nftl->nb_blocks = mtd->size / mtd->erasesize;
- nftl->mtd = mtd;
+ nftl->mbd.mtd = mtd;
+ nftl->mbd.devnum = -1;
+ nftl->mbd.blksize = 512;
+ nftl->mbd.tr = tr;
+ memcpy(&nftl->oobinfo, &mtd->oobinfo, sizeof(struct nand_oobinfo));
+ nftl->oobinfo.useecc = MTD_NANDECC_PLACEONLY;
if (NFTL_mount(nftl) < 0) {
- printk(KERN_WARNING "Could not mount NFTL device\n");
+ printk(KERN_WARNING "NFTL: could not mount device\n");
kfree(nftl);
return;
}
/* OK, it's a new one. Set up all the data structures. */
-#ifdef PSYCHO_DEBUG
- printk("Found new NFTL nftl%c\n", firstfree + 'a');
-#endif
- /* linux stuff */
- nftl->usecount = 0;
+ /* Calculate geometry */
nftl->cylinders = 1024;
nftl->heads = 16;
temp = nftl->cylinders * nftl->heads;
- nftl->sectors = nftl->nr_sects / temp;
- if (nftl->nr_sects % temp) {
+ nftl->sectors = nftl->mbd.size / temp;
+ if (nftl->mbd.size % temp) {
nftl->sectors++;
temp = nftl->cylinders * nftl->sectors;
- nftl->heads = nftl->nr_sects / temp;
+ nftl->heads = nftl->mbd.size / temp;
- if (nftl->nr_sects % temp) {
+ if (nftl->mbd.size % temp) {
nftl->heads++;
temp = nftl->heads * nftl->sectors;
- nftl->cylinders = nftl->nr_sects / temp;
+ nftl->cylinders = nftl->mbd.size / temp;
}
}
- if (nftl->nr_sects != nftl->heads * nftl->cylinders * nftl->sectors) {
- printk(KERN_WARNING "Cannot calculate an NFTL geometry to "
- "match size of 0x%lx.\n", nftl->nr_sects);
- printk(KERN_WARNING "Using C:%d H:%d S:%d (== 0x%lx sects)\n",
- nftl->cylinders, nftl->heads , nftl->sectors,
- (long)nftl->cylinders * (long)nftl->heads * (long)nftl->sectors );
-
- /* Oh no we don't have nftl->nr_sects = nftl->heads * nftl->cylinders * nftl->sectors; */
+ if (nftl->mbd.size != nftl->heads * nftl->cylinders * nftl->sectors) {
+ /*
+ Oh no we don't have
+ mbd.size == heads * cylinders * sectors
+ */
+ printk(KERN_WARNING "NFTL: cannot calculate a geometry to "
+ "match size of 0x%lx.\n", nftl->mbd.size);
+ printk(KERN_WARNING "NFTL: using C:%d H:%d S:%d "
+ "(== 0x%lx sects)\n",
+ nftl->cylinders, nftl->heads , nftl->sectors,
+ (long)nftl->cylinders * (long)nftl->heads *
+ (long)nftl->sectors );
}
- NFTLs[firstfree] = nftl;
- /* Finally, set up the block device sizes */
- nftl_sizes[firstfree * 16] = nftl->nr_sects;
- //nftl_blocksizes[firstfree*16] = 512;
- part_table[firstfree * 16].nr_sects = nftl->nr_sects;
-
- nftl_gendisk.nr_real++;
-
- /* partition check ... */
-#if LINUX_VERSION_CODE < 0x20328
- resetup_one_dev(&nftl_gendisk, firstfree);
-#else
- grok_partitions(&nftl_gendisk, firstfree, 1<<NFTL_PARTN_BITS, nftl->nr_sects);
-#endif
-}
-static void NFTL_unsetup(int i)
-{
- struct NFTLrecord *nftl = NFTLs[i];
-
- DEBUG(MTD_DEBUG_LEVEL1, "NFTL_unsetup %d\n", i);
-
- NFTLs[i] = NULL;
-
- if (nftl->ReplUnitTable)
+ if (add_mtd_blktrans_dev(&nftl->mbd)) {
kfree(nftl->ReplUnitTable);
- if (nftl->EUNtable)
kfree(nftl->EUNtable);
-
- nftl_gendisk.nr_real--;
- kfree(nftl);
-}
-
-/* Search the MTD device for NFTL partitions */
-static void NFTL_notify_add(struct mtd_info *mtd)
-{
- DEBUG(MTD_DEBUG_LEVEL1, "NFTL_notify_add for %s\n", mtd->name);
-
- if (mtd) {
- if (!mtd->read_oob) {
- /* If this MTD doesn't have out-of-band data,
- then there's no point continuing */
- DEBUG(MTD_DEBUG_LEVEL1, "No OOB data, quitting\n");
- return;
- }
- DEBUG(MTD_DEBUG_LEVEL3, "mtd->read = %p, size = %d, erasesize = %d\n",
- mtd->read, mtd->size, mtd->erasesize);
-
- NFTL_setup(mtd);
+ kfree(nftl);
+ return;
}
+#ifdef PSYCHO_DEBUG
+ printk(KERN_INFO "NFTL: Found new nftl%c\n", nftl->mbd.devnum + 'a');
+#endif
}
-static void NFTL_notify_remove(struct mtd_info *mtd)
+static void nftl_remove_dev(struct mtd_blktrans_dev *dev)
{
- int i;
+ struct NFTLrecord *nftl = (void *)dev;
- for (i = 0; i < MAX_NFTLS; i++) {
- if (NFTLs[i] && NFTLs[i]->mtd == mtd)
- NFTL_unsetup(i);
- }
+ DEBUG(MTD_DEBUG_LEVEL1, "NFTL: remove_dev (i=%d)\n", dev->devnum);
+
+ del_mtd_blktrans_dev(dev);
+ kfree(nftl->ReplUnitTable);
+ kfree(nftl->EUNtable);
+ kfree(nftl);
}
#ifdef CONFIG_NFTL_RW
@@ -262,7 +174,7 @@ static u16 NFTL_findfreeblock(struct NFTLrecord *nftl, int desperate )
if (!silly--) {
printk("Argh! No free blocks found! LastFreeEUN = %d, "
- "FirstEUN = %d\n", nftl->LastFreeEUN,
+ "FirstEUN = %d\n", nftl->LastFreeEUN,
le16_to_cpu(nftl->MediaHdr.FirstPhysicalEUN));
return 0xffff;
}
@@ -294,7 +206,7 @@ static u16 NFTL_foldchain (struct NFTLrecord *nftl, unsigned thisVUC, unsigned p
"Virtual Unit Chain %d!\n", thisVUC);
return BLOCK_NIL;
}
-
+
/* Scan to find the Erase Unit which holds the actual data for each
512-byte block within the Chain.
*/
@@ -305,13 +217,13 @@ static u16 NFTL_foldchain (struct NFTLrecord *nftl, unsigned thisVUC, unsigned p
targetEUN = thisEUN;
for (block = 0; block < nftl->EraseSize / 512; block ++) {
- MTD_READOOB(nftl->mtd,
+ MTD_READOOB(nftl->mbd.mtd,
(thisEUN * nftl->EraseSize) + (block * 512),
16 , &retlen, (char *)&oob);
if (block == 2) {
foldmark = oob.u.c.FoldMark | oob.u.c.FoldMark1;
if (foldmark == FOLD_MARK_IN_PROGRESS) {
- DEBUG(MTD_DEBUG_LEVEL1,
+ DEBUG(MTD_DEBUG_LEVEL1,
"Write Inhibited on EUN %d\n", thisEUN);
inplace = 0;
} else {
@@ -333,7 +245,7 @@ static u16 NFTL_foldchain (struct NFTLrecord *nftl, unsigned thisVUC, unsigned p
if (!BlockFreeFound[block])
BlockMap[block] = thisEUN;
else
- printk(KERN_WARNING
+ printk(KERN_WARNING
"SECTOR_USED found after SECTOR_FREE "
"in Virtual Unit Chain %d for block %d\n",
thisVUC, block);
@@ -342,7 +254,7 @@ static u16 NFTL_foldchain (struct NFTLrecord *nftl, unsigned thisVUC, unsigned p
if (!BlockFreeFound[block])
BlockMap[block] = BLOCK_NIL;
else
- printk(KERN_WARNING
+ printk(KERN_WARNING
"SECTOR_DELETED found after SECTOR_FREE "
"in Virtual Unit Chain %d for block %d\n",
thisVUC, block);
@@ -361,14 +273,14 @@ static u16 NFTL_foldchain (struct NFTLrecord *nftl, unsigned thisVUC, unsigned p
thisVUC);
return BLOCK_NIL;
}
-
+
thisEUN = nftl->ReplUnitTable[thisEUN];
}
if (inplace) {
/* We're being asked to be a fold-in-place. Check
that all blocks which actually have data associated
- with them (i.e. BlockMap[block] != BLOCK_NIL) are
+ with them (i.e. BlockMap[block] != BLOCK_NIL) are
either already present or SECTOR_FREE in the target
block. If not, we're going to have to fold out-of-place
anyway.
@@ -381,7 +293,7 @@ static u16 NFTL_foldchain (struct NFTLrecord *nftl, unsigned thisVUC, unsigned p
"block %d was %x lastEUN, "
"and is in EUN %d (%s) %d\n",
thisVUC, block, BlockLastState[block],
- BlockMap[block],
+ BlockMap[block],
BlockMap[block]== targetEUN ? "==" : "!=",
targetEUN);
inplace = 0;
@@ -398,17 +310,17 @@ static u16 NFTL_foldchain (struct NFTLrecord *nftl, unsigned thisVUC, unsigned p
inplace = 0;
}
}
-
+
if (!inplace) {
DEBUG(MTD_DEBUG_LEVEL1, "Cannot fold Virtual Unit Chain %d in place. "
"Trying out-of-place\n", thisVUC);
/* We need to find a targetEUN to fold into. */
targetEUN = NFTL_findfreeblock(nftl, 1);
if (targetEUN == BLOCK_NIL) {
- /* Ouch. Now we're screwed. We need to do a
+ /* Ouch. Now we're screwed. We need to do a
fold-in-place of another chain to make room
for this one. We need a better way of selecting
- which chain to fold, because makefreeblock will
+ which chain to fold, because makefreeblock will
only ask us to fold the same one again.
*/
printk(KERN_WARNING
@@ -422,7 +334,7 @@ static u16 NFTL_foldchain (struct NFTLrecord *nftl, unsigned thisVUC, unsigned p
chain by selecting the longer one */
oob.u.c.FoldMark = oob.u.c.FoldMark1 = cpu_to_le16(FOLD_MARK_IN_PROGRESS);
oob.u.c.unused = 0xffffffff;
- MTD_WRITEOOB(nftl->mtd, (nftl->EraseSize * targetEUN) + 2 * 512 + 8,
+ MTD_WRITEOOB(nftl->mbd.mtd, (nftl->EraseSize * targetEUN) + 2 * 512 + 8,
8, &retlen, (char *)&oob.u);
}
@@ -445,31 +357,33 @@ static u16 NFTL_foldchain (struct NFTLrecord *nftl, unsigned thisVUC, unsigned p
happen in case of media errors or deleted blocks) */
if (BlockMap[block] == BLOCK_NIL)
continue;
-
- ret = MTD_READECC(nftl->mtd, (nftl->EraseSize * BlockMap[block])
- + (block * 512), 512, &retlen, movebuf, (char *)&oob);
+
+ ret = MTD_READ(nftl->mbd.mtd, (nftl->EraseSize * BlockMap[block]) + (block * 512),
+ 512, &retlen, movebuf);
if (ret < 0) {
- ret = MTD_READECC(nftl->mtd, (nftl->EraseSize * BlockMap[block])
+ ret = MTD_READ(nftl->mbd.mtd, (nftl->EraseSize * BlockMap[block])
+ (block * 512), 512, &retlen,
- movebuf, (char *)&oob);
- if (ret != -EIO)
+ movebuf);
+ if (ret != -EIO)
printk("Error went away on retry.\n");
}
- MTD_WRITEECC(nftl->mtd, (nftl->EraseSize * targetEUN) + (block * 512),
- 512, &retlen, movebuf, (char *)&oob);
+ memset(&oob, 0xff, sizeof(struct nftl_oob));
+ oob.b.Status = oob.b.Status1 = SECTOR_USED;
+ MTD_WRITEECC(nftl->mbd.mtd, (nftl->EraseSize * targetEUN) + (block * 512),
+ 512, &retlen, movebuf, (char *)&oob, &nftl->oobinfo);
}
-
+
/* add the header so that it is now a valid chain */
oob.u.a.VirtUnitNum = oob.u.a.SpareVirtUnitNum
= cpu_to_le16(thisVUC);
oob.u.a.ReplUnitNum = oob.u.a.SpareReplUnitNum = 0xffff;
-
- MTD_WRITEOOB(nftl->mtd, (nftl->EraseSize * targetEUN) + 8,
+
+ MTD_WRITEOOB(nftl->mbd.mtd, (nftl->EraseSize * targetEUN) + 8,
8, &retlen, (char *)&oob.u);
/* OK. We've moved the whole lot into the new block. Now we have to free the original blocks. */
- /* At this point, we have two different chains for this Virtual Unit, and no way to tell
+ /* At this point, we have two different chains for this Virtual Unit, and no way to tell
them apart. If we crash now, we get confused. However, both contain the same data, so we
shouldn't actually lose data in this case. It's just that when we load up on a medium which
has duplicate chains, we need to free one of the chains because it's not necessary any more.
@@ -477,7 +391,7 @@ static u16 NFTL_foldchain (struct NFTLrecord *nftl, unsigned thisVUC, unsigned p
thisEUN = nftl->EUNtable[thisVUC];
DEBUG(MTD_DEBUG_LEVEL1,"Want to erase\n");
- /* For each block in the old chain (except the targetEUN of course),
+ /* For each block in the old chain (except the targetEUN of course),
free it and make it available for future use */
while (thisEUN <= nftl->lastEUN && thisEUN != targetEUN) {
unsigned int EUNtmp;
@@ -486,7 +400,6 @@ static u16 NFTL_foldchain (struct NFTLrecord *nftl, unsigned thisVUC, unsigned p
if (NFTL_formatblock(nftl, thisEUN) < 0) {
/* could not erase : mark block as reserved
- * FixMe: Update Bad Unit Table on disk
*/
nftl->ReplUnitTable[thisEUN] = BLOCK_RESERVED;
} else {
@@ -496,7 +409,7 @@ static u16 NFTL_foldchain (struct NFTLrecord *nftl, unsigned thisVUC, unsigned p
}
thisEUN = EUNtmp;
}
-
+
/* Make this the new start of chain for thisVUC */
nftl->ReplUnitTable[targetEUN] = BLOCK_NIL;
nftl->EUNtable[thisVUC] = targetEUN;
@@ -504,9 +417,9 @@ static u16 NFTL_foldchain (struct NFTLrecord *nftl, unsigned thisVUC, unsigned p
return targetEUN;
}
-u16 NFTL_makefreeblock( struct NFTLrecord *nftl , unsigned pendingblock)
+static u16 NFTL_makefreeblock( struct NFTLrecord *nftl , unsigned pendingblock)
{
- /* This is the part that needs some cleverness applied.
+ /* This is the part that needs some cleverness applied.
For now, I'm doing the minimum applicable to actually
get the thing to work.
Wear-levelling and other clever stuff needs to be implemented
@@ -553,7 +466,7 @@ u16 NFTL_makefreeblock( struct NFTLrecord *nftl , unsigned pendingblock)
return NFTL_foldchain (nftl, LongestChain, pendingblock);
}
-/* NFTL_findwriteunit: Return the unit number into which we can write
+/* NFTL_findwriteunit: Return the unit number into which we can write
for this block. Make it available if it isn't already
*/
static inline u16 NFTL_findwriteunit(struct NFTLrecord *nftl, unsigned block)
@@ -571,7 +484,7 @@ static inline u16 NFTL_findwriteunit(struct NFTLrecord *nftl, unsigned block)
a free space for the block in question.
*/
- /* This condition catches the 0x[7f]fff cases, as well as
+ /* This condition catches the 0x[7f]fff cases, as well as
being a sanity check for past-end-of-media access
*/
lastEUN = BLOCK_NIL;
@@ -584,9 +497,9 @@ static inline u16 NFTL_findwriteunit(struct NFTLrecord *nftl, unsigned block)
lastEUN = writeEUN;
- MTD_READOOB(nftl->mtd, (writeEUN * nftl->EraseSize) + blockofs,
+ MTD_READOOB(nftl->mbd.mtd, (writeEUN * nftl->EraseSize) + blockofs,
8, &retlen, (char *)&bci);
-
+
DEBUG(MTD_DEBUG_LEVEL2, "Status of block %d in EUN %d is %x\n",
block , writeEUN, le16_to_cpu(bci.Status));
@@ -601,10 +514,10 @@ static inline u16 NFTL_findwriteunit(struct NFTLrecord *nftl, unsigned block)
break;
default:
// Invalid block. Don't use it any more. Must implement.
- break;
+ break;
}
-
- if (!silly--) {
+
+ if (!silly--) {
printk(KERN_WARNING
"Infinite loop in Virtual Unit Chain 0x%x\n",
thisVUC);
@@ -615,7 +528,7 @@ static inline u16 NFTL_findwriteunit(struct NFTLrecord *nftl, unsigned block)
writeEUN = nftl->ReplUnitTable[writeEUN];
}
- /* OK. We didn't find one in the existing chain, or there
+ /* OK. We didn't find one in the existing chain, or there
is no existing chain. */
/* Try to find an already-free block */
@@ -629,12 +542,12 @@ static inline u16 NFTL_findwriteunit(struct NFTLrecord *nftl, unsigned block)
/* First remember the start of this chain */
//u16 startEUN = nftl->EUNtable[thisVUC];
-
+
//printk("Write to VirtualUnitChain %d, calling makefreeblock()\n", thisVUC);
writeEUN = NFTL_makefreeblock(nftl, 0xffff);
if (writeEUN == BLOCK_NIL) {
- /* OK, we accept that the above comment is
+ /* OK, we accept that the above comment is
lying - there may have been free blocks
last time we called NFTL_findfreeblock(),
but they are reserved for when we're
@@ -645,21 +558,21 @@ static inline u16 NFTL_findwriteunit(struct NFTLrecord *nftl, unsigned block)
}
if (writeEUN == BLOCK_NIL) {
/* Ouch. This should never happen - we should
- always be able to make some room somehow.
- If we get here, we've allocated more storage
+ always be able to make some room somehow.
+ If we get here, we've allocated more storage
space than actual media, or our makefreeblock
routine is missing something.
*/
printk(KERN_WARNING "Cannot make free space.\n");
return BLOCK_NIL;
- }
+ }
//printk("Restarting scan\n");
lastEUN = BLOCK_NIL;
continue;
}
/* We've found a free block. Insert it into the chain. */
-
+
if (lastEUN != BLOCK_NIL) {
thisVUC |= 0x8000; /* It's a replacement block */
} else {
@@ -672,12 +585,12 @@ static inline u16 NFTL_findwriteunit(struct NFTLrecord *nftl, unsigned block)
nftl->ReplUnitTable[writeEUN] = BLOCK_NIL;
/* ... and on the flash itself */
- MTD_READOOB(nftl->mtd, writeEUN * nftl->EraseSize + 8, 8,
+ MTD_READOOB(nftl->mbd.mtd, writeEUN * nftl->EraseSize + 8, 8,
&retlen, (char *)&oob.u);
oob.u.a.VirtUnitNum = oob.u.a.SpareVirtUnitNum = cpu_to_le16(thisVUC);
- MTD_WRITEOOB(nftl->mtd, writeEUN * nftl->EraseSize + 8, 8,
+ MTD_WRITEOOB(nftl->mbd.mtd, writeEUN * nftl->EraseSize + 8, 8,
&retlen, (char *)&oob.u);
/* we link the new block to the chain only after the
@@ -687,13 +600,13 @@ static inline u16 NFTL_findwriteunit(struct NFTLrecord *nftl, unsigned block)
/* Both in our cache... */
nftl->ReplUnitTable[lastEUN] = writeEUN;
/* ... and on the flash itself */
- MTD_READOOB(nftl->mtd, (lastEUN * nftl->EraseSize) + 8,
+ MTD_READOOB(nftl->mbd.mtd, (lastEUN * nftl->EraseSize) + 8,
8, &retlen, (char *)&oob.u);
oob.u.a.ReplUnitNum = oob.u.a.SpareReplUnitNum
= cpu_to_le16(writeEUN);
- MTD_WRITEOOB(nftl->mtd, (lastEUN * nftl->EraseSize) + 8,
+ MTD_WRITEOOB(nftl->mbd.mtd, (lastEUN * nftl->EraseSize) + 8,
8, &retlen, (char *)&oob.u);
}
@@ -706,12 +619,14 @@ static inline u16 NFTL_findwriteunit(struct NFTLrecord *nftl, unsigned block)
return 0xffff;
}
-static int NFTL_writeblock(struct NFTLrecord *nftl, unsigned block, char *buffer)
+static int nftl_writeblock(struct mtd_blktrans_dev *mbd, unsigned long block,
+ char *buffer)
{
+ struct NFTLrecord *nftl = (void *)mbd;
u16 writeEUN;
unsigned long blockofs = (block * 512) & (nftl->EraseSize - 1);
size_t retlen;
- u8 eccbuf[6];
+ struct nftl_oob oob;
writeEUN = NFTL_findwriteunit(nftl, block);
@@ -722,16 +637,20 @@ static int NFTL_writeblock(struct NFTLrecord *nftl, unsigned block, char *buffer
return 1;
}
- MTD_WRITEECC(nftl->mtd, (writeEUN * nftl->EraseSize) + blockofs,
- 512, &retlen, (char *)buffer, (char *)eccbuf);
- /* no need to write SECTOR_USED flags since they are written in mtd_writeecc */
+ memset(&oob, 0xff, sizeof(struct nftl_oob));
+ oob.b.Status = oob.b.Status1 = SECTOR_USED;
+ MTD_WRITEECC(nftl->mbd.mtd, (writeEUN * nftl->EraseSize) + blockofs,
+ 512, &retlen, (char *)buffer, (char *)&oob, &nftl->oobinfo);
+ /* need to write SECTOR_USED flags since they are not written in mtd_writeecc */
return 0;
}
#endif /* CONFIG_NFTL_RW */
-static int NFTL_readblock(struct NFTLrecord *nftl, unsigned block, char *buffer)
+static int nftl_readblock(struct mtd_blktrans_dev *mbd, unsigned long block,
+ char *buffer)
{
+ struct NFTLrecord *nftl = (void *)mbd;
u16 lastgoodEUN;
u16 thisEUN = nftl->EUNtable[block / (nftl->EraseSize / 512)];
unsigned long blockofs = (block * 512) & (nftl->EraseSize - 1);
@@ -744,7 +663,7 @@ static int NFTL_readblock(struct NFTLrecord *nftl, unsigned block, char *buffer)
if (thisEUN != BLOCK_NIL) {
while (thisEUN < nftl->nb_blocks) {
- if (MTD_READOOB(nftl->mtd, (thisEUN * nftl->EraseSize) + blockofs,
+ if (MTD_READOOB(nftl->mbd.mtd, (thisEUN * nftl->EraseSize) + blockofs,
8, &retlen, (char *)&bci) < 0)
status = SECTOR_IGNORE;
else
@@ -763,13 +682,13 @@ static int NFTL_readblock(struct NFTLrecord *nftl, unsigned block, char *buffer)
case SECTOR_IGNORE:
break;
default:
- printk("Unknown status for block %d in EUN %d: %x\n",
+ printk("Unknown status for block %ld in EUN %d: %x\n",
block, thisEUN, status);
break;
}
if (!silly--) {
- printk(KERN_WARNING "Infinite loop in Virtual Unit Chain 0x%x\n",
+ printk(KERN_WARNING "Infinite loop in Virtual Unit Chain 0x%lx\n",
block / (nftl->EraseSize / 512));
return 1;
}
@@ -784,265 +703,22 @@ static int NFTL_readblock(struct NFTLrecord *nftl, unsigned block, char *buffer)
} else {
loff_t ptr = (lastgoodEUN * nftl->EraseSize) + blockofs;
size_t retlen;
- u_char eccbuf[6];
- if (MTD_READECC(nftl->mtd, ptr, 512, &retlen, buffer, eccbuf))
+ if (MTD_READ(nftl->mbd.mtd, ptr, 512, &retlen, buffer))
return -EIO;
}
return 0;
}
-static int nftl_ioctl(struct inode * inode, struct file * file, unsigned int cmd, unsigned long arg)
-{
- struct NFTLrecord *nftl;
- int p;
-
- nftl = NFTLs[MINOR(inode->i_rdev) >> NFTL_PARTN_BITS];
-
- if (!nftl) return -EINVAL;
-
- switch (cmd) {
- case HDIO_GETGEO: {
- struct hd_geometry g;
-
- g.heads = nftl->heads;
- g.sectors = nftl->sectors;
- g.cylinders = nftl->cylinders;
- g.start = part_table[MINOR(inode->i_rdev)].start_sect;
- return copy_to_user((void *)arg, &g, sizeof g) ? -EFAULT : 0;
- }
- case BLKGETSIZE: /* Return device size */
- return put_user(part_table[MINOR(inode->i_rdev)].nr_sects,
- (unsigned long *) arg);
-
-#ifdef BLKGETSIZE64
- case BLKGETSIZE64:
- return put_user((u64)part_table[MINOR(inode->i_rdev)].nr_sects << 9,
- (u64 *)arg);
-#endif
-
- case BLKFLSBUF:
- if (!capable(CAP_SYS_ADMIN)) return -EACCES;
- fsync_dev(inode->i_rdev);
- invalidate_buffers(inode->i_rdev);
- if (nftl->mtd->sync)
- nftl->mtd->sync(nftl->mtd);
- return 0;
-
- case BLKRRPART:
- if (!capable(CAP_SYS_ADMIN)) return -EACCES;
- if (nftl->usecount > 1) return -EBUSY;
- /*
- * We have to flush all buffers and invalidate caches,
- * or we won't be able to re-use the partitions,
- * if there was a change and we don't want to reboot
- */
- p = (1<<NFTL_PARTN_BITS) - 1;
- while (p-- > 0) {
- kdev_t devp = MKDEV(MAJOR(inode->i_dev), MINOR(inode->i_dev)+p);
- if (part_table[p].nr_sects > 0)
- invalidate_device (devp, 1);
-
- part_table[MINOR(inode->i_dev)+p].start_sect = 0;
- part_table[MINOR(inode->i_dev)+p].nr_sects = 0;
- }
-
-#if LINUX_VERSION_CODE < 0x20328
- resetup_one_dev(&nftl_gendisk, MINOR(inode->i_rdev) >> NFTL_PARTN_BITS);
-#else
- grok_partitions(&nftl_gendisk, MINOR(inode->i_rdev) >> NFTL_PARTN_BITS,
- 1<<NFTL_PARTN_BITS, nftl->nr_sects);
-#endif
- return 0;
-
-#if (LINUX_VERSION_CODE < 0x20303)
- RO_IOCTLS(inode->i_rdev, arg); /* ref. linux/blk.h */
-#else
- case BLKROSET:
- case BLKROGET:
- case BLKSSZGET:
- return blk_ioctl(inode->i_rdev, cmd, arg);
-#endif
-
- default:
- return -EINVAL;
- }
-}
-
-void nftl_request(RQFUNC_ARG)
-{
- unsigned int dev, block, nsect;
- struct NFTLrecord *nftl;
- char *buffer;
- struct request *req;
- int res;
-
- while (1) {
- INIT_REQUEST; /* blk.h */
- req = CURRENT;
-
- /* We can do this because the generic code knows not to
- touch the request at the head of the queue */
- spin_unlock_irq(&io_request_lock);
-
- DEBUG(MTD_DEBUG_LEVEL2, "NFTL_request\n");
- DEBUG(MTD_DEBUG_LEVEL3, "NFTL %s request, from sector 0x%04lx for 0x%04lx sectors\n",
- (req->cmd == READ) ? "Read " : "Write",
- req->sector, req->current_nr_sectors);
-
- dev = MINOR(req->rq_dev);
- block = req->sector;
- nsect = req->current_nr_sectors;
- buffer = req->buffer;
- res = 1; /* succeed */
-
- if (dev >= MAX_NFTLS * (1<<NFTL_PARTN_BITS)) {
- /* there is no such partition */
- printk("nftl: bad minor number: device = %s\n",
- kdevname(req->rq_dev));
- res = 0; /* fail */
- goto repeat;
- }
-
- nftl = NFTLs[dev / (1<<NFTL_PARTN_BITS)];
- DEBUG(MTD_DEBUG_LEVEL3, "Waiting for mutex\n");
- down(&nftl->mutex);
- DEBUG(MTD_DEBUG_LEVEL3, "Got mutex\n");
-
- if (block + nsect > part_table[dev].nr_sects) {
- /* access past the end of device */
- printk("nftl%c%d: bad access: block = %d, count = %d\n",
- (MINOR(req->rq_dev)>>6)+'a', dev & 0xf, block, nsect);
- up(&nftl->mutex);
- res = 0; /* fail */
- goto repeat;
- }
-
- block += part_table[dev].start_sect;
-
- if (req->cmd == READ) {
- DEBUG(MTD_DEBUG_LEVEL2, "NFTL read request of 0x%x sectors @ %x "
- "(req->nr_sectors == %lx)\n", nsect, block, req->nr_sectors);
-
- for ( ; nsect > 0; nsect-- , block++, buffer += 512) {
- /* Read a single sector to req->buffer + (512 * i) */
- if (NFTL_readblock(nftl, block, buffer)) {
- DEBUG(MTD_DEBUG_LEVEL2, "NFTL read request failed\n");
- up(&nftl->mutex);
- res = 0;
- goto repeat;
- }
- }
-
- DEBUG(MTD_DEBUG_LEVEL2,"NFTL read request completed OK\n");
- up(&nftl->mutex);
- goto repeat;
- } else if (req->cmd == WRITE) {
- DEBUG(MTD_DEBUG_LEVEL2, "NFTL write request of 0x%x sectors @ %x "
- "(req->nr_sectors == %lx)\n", nsect, block,
- req->nr_sectors);
-#ifdef CONFIG_NFTL_RW
- for ( ; nsect > 0; nsect-- , block++, buffer += 512) {
- /* Read a single sector to req->buffer + (512 * i) */
- if (NFTL_writeblock(nftl, block, buffer)) {
- DEBUG(MTD_DEBUG_LEVEL1,"NFTL write request failed\n");
- up(&nftl->mutex);
- res = 0;
- goto repeat;
- }
- }
- DEBUG(MTD_DEBUG_LEVEL2,"NFTL write request completed OK\n");
-#else
- res = 0; /* Writes always fail */
-#endif /* CONFIG_NFTL_RW */
- up(&nftl->mutex);
- goto repeat;
- } else {
- DEBUG(MTD_DEBUG_LEVEL0, "NFTL unknown request\n");
- up(&nftl->mutex);
- res = 0;
- goto repeat;
- }
- repeat:
- DEBUG(MTD_DEBUG_LEVEL3, "end_request(%d)\n", res);
- spin_lock_irq(&io_request_lock);
- end_request(res);
- }
-}
-
-static int nftl_open(struct inode *ip, struct file *fp)
-{
- int nftlnum = MINOR(ip->i_rdev) >> NFTL_PARTN_BITS;
- struct NFTLrecord *thisNFTL;
- thisNFTL = NFTLs[nftlnum];
-
- DEBUG(MTD_DEBUG_LEVEL2,"NFTL_open\n");
-
-#ifdef CONFIG_KMOD
- if (!thisNFTL && nftlnum == 0) {
- request_module("docprobe");
- thisNFTL = NFTLs[nftlnum];
- }
-#endif
- if (!thisNFTL) {
- DEBUG(MTD_DEBUG_LEVEL2,"ENODEV: thisNFTL = %d, minor = %d, ip = %p, fp = %p\n",
- nftlnum, ip->i_rdev, ip, fp);
- return -ENODEV;
- }
-
-#ifndef CONFIG_NFTL_RW
- if (fp->f_mode & FMODE_WRITE)
- return -EROFS;
-#endif /* !CONFIG_NFTL_RW */
-
- thisNFTL->usecount++;
- BLK_INC_USE_COUNT;
- if (!get_mtd_device(thisNFTL->mtd, -1)) {
- BLK_DEC_USE_COUNT;
- return -ENXIO;
- }
-
- return 0;
-}
-
-static int nftl_release(struct inode *inode, struct file *fp)
+static int nftl_getgeo(struct mtd_blktrans_dev *dev, struct hd_geometry *geo)
{
- struct NFTLrecord *thisNFTL;
-
- thisNFTL = NFTLs[MINOR(inode->i_rdev) / 16];
-
- DEBUG(MTD_DEBUG_LEVEL2, "NFTL_release\n");
-
- if (thisNFTL->mtd->sync)
- thisNFTL->mtd->sync(thisNFTL->mtd);
- thisNFTL->usecount--;
- BLK_DEC_USE_COUNT;
+ struct NFTLrecord *nftl = (void *)dev;
- put_mtd_device(thisNFTL->mtd);
+ geo->heads = nftl->heads;
+ geo->sectors = nftl->sectors;
+ geo->cylinders = nftl->cylinders;
return 0;
}
-#if LINUX_VERSION_CODE < 0x20326
-static struct file_operations nftl_fops = {
- read: block_read,
- write: block_write,
- ioctl: nftl_ioctl,
- open: nftl_open,
- release: nftl_release,
- fsync: block_fsync,
-};
-#else
-static struct block_device_operations nftl_fops =
-{
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,14)
- owner: THIS_MODULE,
-#endif
- open: nftl_open,
- release: nftl_release,
- ioctl: nftl_ioctl
-};
-#endif
-
-
/****************************************************************************
*
@@ -1050,49 +726,33 @@ static struct block_device_operations nftl_fops =
*
****************************************************************************/
-static struct mtd_notifier nftl_notifier = {
- add: NFTL_notify_add,
- remove: NFTL_notify_remove
+
+static struct mtd_blktrans_ops nftl_tr = {
+ .name = "nftl",
+ .major = NFTL_MAJOR,
+ .part_bits = NFTL_PARTN_BITS,
+ .getgeo = nftl_getgeo,
+ .readsect = nftl_readblock,
+#ifdef CONFIG_NFTL_RW
+ .writesect = nftl_writeblock,
+#endif
+ .add_mtd = nftl_add_mtd,
+ .remove_dev = nftl_remove_dev,
+ .owner = THIS_MODULE,
};
extern char nftlmountrev[];
-int __init init_nftl(void)
+static int __init init_nftl(void)
{
- int i;
-
-#ifdef PRERELEASE
- printk(KERN_INFO "NFTL driver: nftlcore.c $Revision: 1.85 $, nftlmount.c %s\n", nftlmountrev);
-#endif
+ printk(KERN_INFO "NFTL driver: nftlcore.c $Revision: 1.99 $, nftlmount.c %s\n", nftlmountrev);
- if (register_blkdev(MAJOR_NR, "nftl", &nftl_fops)){
- printk("unable to register NFTL block device on major %d\n", MAJOR_NR);
- return -EBUSY;
- } else {
- blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), &nftl_request);
-
- /* set block size to 1kB each */
- for (i = 0; i < 256; i++) {
- nftl_blocksizes[i] = 1024;
- }
- blksize_size[MAJOR_NR] = nftl_blocksizes;
-
- add_gendisk(&nftl_gendisk);
- }
-
- register_mtd_user(&nftl_notifier);
-
- return 0;
+ return register_mtd_blktrans(&nftl_tr);
}
static void __exit cleanup_nftl(void)
{
- unregister_mtd_user(&nftl_notifier);
- unregister_blkdev(MAJOR_NR, "nftl");
-
- blk_cleanup_queue(BLK_DEFAULT_QUEUE(MAJOR_NR));
-
- del_gendisk(&nftl_gendisk);
+ deregister_mtd_blktrans(&nftl_tr);
}
module_init(init_nftl);
diff --git a/linux-2.4.x/drivers/mtd/nftlmount.c b/linux-2.4.x/drivers/mtd/nftlmount.c
index 56b42a5..3b104eb 100644
--- a/linux-2.4.x/drivers/mtd/nftlmount.c
+++ b/linux-2.4.x/drivers/mtd/nftlmount.c
@@ -1,10 +1,10 @@
-/*
+/*
* NFTL mount code with extensive checks
*
- * Author: Fabrice Bellard (fabrice.bellard@netgem.com)
+ * Author: Fabrice Bellard (fabrice.bellard@netgem.com)
* Copyright (C) 2000 Netgem S.A.
*
- * $Id: nftlmount.c,v 1.25 2001/11/30 16:46:27 dwmw2 Exp $
+ * $Id: nftlmount.c,v 1.41 2005/11/07 11:14:21 gleixner Exp $
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -21,25 +21,17 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
-#define __NO_VERSION__
#include <linux/kernel.h>
-#include <linux/module.h>
#include <asm/errno.h>
-#include <asm/io.h>
-#include <asm/uaccess.h>
-#include <linux/miscdevice.h>
-#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/slab.h>
-#include <linux/sched.h>
-#include <linux/init.h>
#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
#include <linux/mtd/nftl.h>
-#include <linux/mtd/compatmac.h>
#define SECTORSIZE 512
-char nftlmountrev[]="$Revision: 1.25 $";
+char nftlmountrev[]="$Revision: 1.41 $";
/* find_boot_record: Find the NFTL Media Header and its Spare copy which contains the
* various device information of the NFTL partition and Bad Unit Table. Update
@@ -49,13 +41,21 @@ char nftlmountrev[]="$Revision: 1.25 $";
static int find_boot_record(struct NFTLrecord *nftl)
{
struct nftl_uci1 h1;
- struct nftl_oob oob;
unsigned int block, boot_record_count = 0;
- int retlen;
+ size_t retlen;
u8 buf[SECTORSIZE];
struct NFTLMediaHeader *mh = &nftl->MediaHdr;
unsigned int i;
+ /* Assume logical EraseSize == physical erasesize for starting the scan.
+ We'll sort it out later if we find a MediaHeader which says otherwise */
+ /* Actually, we won't. The new DiskOnChip driver has already scanned
+ the MediaHeader and adjusted the virtual erasesize it presents in
+ the mtd device accordingly. We could even get rid of
+ nftl->EraseSize if there were any point in doing so. */
+ nftl->EraseSize = nftl->mbd.mtd->erasesize;
+ nftl->nb_blocks = nftl->mbd.mtd->size / nftl->EraseSize;
+
nftl->MediaUnit = BLOCK_NIL;
nftl->SpareMediaUnit = BLOCK_NIL;
@@ -65,12 +65,15 @@ static int find_boot_record(struct NFTLrecord *nftl)
/* Check for ANAND header first. Then can whinge if it's found but later
checks fail */
- if ((ret = MTD_READ(nftl->mtd, block * nftl->EraseSize, SECTORSIZE, &retlen, buf))) {
+ ret = MTD_READ(nftl->mbd.mtd, block * nftl->EraseSize, SECTORSIZE, &retlen, buf);
+ /* We ignore ret in case the ECC of the MediaHeader is invalid
+ (which is apparently acceptable) */
+ if (retlen != SECTORSIZE) {
static int warncount = 5;
if (warncount) {
printk(KERN_WARNING "Block read at 0x%x of mtd%d failed: %d\n",
- block * nftl->EraseSize, nftl->mtd->index, ret);
+ block * nftl->EraseSize, nftl->mbd.mtd->index, ret);
if (!--warncount)
printk(KERN_WARNING "Further failures for this block will not be printed\n");
}
@@ -80,17 +83,17 @@ static int find_boot_record(struct NFTLrecord *nftl)
if (retlen < 6 || memcmp(buf, "ANAND", 6)) {
/* ANAND\0 not found. Continue */
#if 0
- printk(KERN_DEBUG "ANAND header not found at 0x%x in mtd%d\n",
- block * nftl->EraseSize, nftl->mtd->index);
-#endif
+ printk(KERN_DEBUG "ANAND header not found at 0x%x in mtd%d\n",
+ block * nftl->EraseSize, nftl->mbd.mtd->index);
+#endif
continue;
}
/* To be safer with BIOS, also use erase mark as discriminant */
- if ((ret = MTD_READOOB(nftl->mtd, block * nftl->EraseSize + SECTORSIZE + 8,
+ if ((ret = MTD_READOOB(nftl->mbd.mtd, block * nftl->EraseSize + SECTORSIZE + 8,
8, &retlen, (char *)&h1) < 0)) {
printk(KERN_WARNING "ANAND header found at 0x%x in mtd%d, but OOB data read failed (err %d)\n",
- block * nftl->EraseSize, nftl->mtd->index, ret);
+ block * nftl->EraseSize, nftl->mbd.mtd->index, ret);
continue;
}
@@ -100,29 +103,28 @@ static int find_boot_record(struct NFTLrecord *nftl)
*/
if (le16_to_cpu(h1.EraseMark | h1.EraseMark1) != ERASE_MARK) {
printk(KERN_NOTICE "ANAND header found at 0x%x in mtd%d, but erase mark not present (0x%04x,0x%04x instead)\n",
- block * nftl->EraseSize, nftl->mtd->index,
+ block * nftl->EraseSize, nftl->mbd.mtd->index,
le16_to_cpu(h1.EraseMark), le16_to_cpu(h1.EraseMark1));
continue;
}
/* Finally reread to check ECC */
- if ((ret = MTD_READECC(nftl->mtd, block * nftl->EraseSize, SECTORSIZE,
- &retlen, buf, (char *)&oob) < 0)) {
+ if ((ret = MTD_READECC(nftl->mbd.mtd, block * nftl->EraseSize, SECTORSIZE,
+ &retlen, buf, (char *)&oob, NULL) < 0)) {
printk(KERN_NOTICE "ANAND header found at 0x%x in mtd%d, but ECC read failed (err %d)\n",
- block * nftl->EraseSize, nftl->mtd->index, ret);
+ block * nftl->EraseSize, nftl->mbd.mtd->index, ret);
continue;
}
/* Paranoia. Check the ANAND header is still there after the ECC read */
if (memcmp(buf, "ANAND", 6)) {
printk(KERN_NOTICE "ANAND header found at 0x%x in mtd%d, but went away on reread!\n",
- block * nftl->EraseSize, nftl->mtd->index);
+ block * nftl->EraseSize, nftl->mbd.mtd->index);
printk(KERN_NOTICE "New data are: %02x %02x %02x %02x %02x %02x\n",
buf[0], buf[1], buf[2], buf[3], buf[4], buf[5]);
continue;
}
#endif
-
/* OK, we like it. */
if (boot_record_count) {
@@ -132,11 +134,19 @@ static int find_boot_record(struct NFTLrecord *nftl)
printk(KERN_NOTICE "NFTL Media Headers at 0x%x and 0x%x disagree.\n",
nftl->MediaUnit * nftl->EraseSize, block * nftl->EraseSize);
/* if (debug) Print both side by side */
- return -1;
+ if (boot_record_count < 2) {
+ /* We haven't yet seen two real ones */
+ return -1;
+ }
+ continue;
}
if (boot_record_count == 1)
nftl->SpareMediaUnit = block;
+ /* Mark this boot record (NFTL MediaHeader) block as reserved */
+ nftl->ReplUnitTable[block] = BLOCK_RESERVED;
+
+
boot_record_count++;
continue;
}
@@ -145,16 +155,27 @@ static int find_boot_record(struct NFTLrecord *nftl)
memcpy(mh, buf, sizeof(struct NFTLMediaHeader));
/* Do some sanity checks on it */
- if (mh->UnitSizeFactor != 0xff) {
- printk(KERN_NOTICE "Sorry, we don't support UnitSizeFactor "
- "of != 1 yet.\n");
+#if 0
+The new DiskOnChip driver scans the MediaHeader itself, and presents a virtual
+erasesize based on UnitSizeFactor. So the erasesize we read from the mtd
+device is already correct.
+ if (mh->UnitSizeFactor == 0) {
+ printk(KERN_NOTICE "NFTL: UnitSizeFactor 0x00 detected. This violates the spec but we think we know what it means...\n");
+ } else if (mh->UnitSizeFactor < 0xfc) {
+ printk(KERN_NOTICE "Sorry, we don't support UnitSizeFactor 0x%02x\n",
+ mh->UnitSizeFactor);
return -1;
+ } else if (mh->UnitSizeFactor != 0xff) {
+ printk(KERN_NOTICE "WARNING: Support for NFTL with UnitSizeFactor 0x%02x is experimental\n",
+ mh->UnitSizeFactor);
+ nftl->EraseSize = nftl->mbd.mtd->erasesize << (0xff - mh->UnitSizeFactor);
+ nftl->nb_blocks = nftl->mbd.mtd->size / nftl->EraseSize;
}
-
+#endif
nftl->nb_boot_blocks = le16_to_cpu(mh->FirstPhysicalEUN);
if ((nftl->nb_boot_blocks + 2) >= nftl->nb_blocks) {
printk(KERN_NOTICE "NFTL Media Header sanity check failed:\n");
- printk(KERN_NOTICE "nb_boot_blocks (%d) + 2 > nb_blocks (%d)\n",
+ printk(KERN_NOTICE "nb_boot_blocks (%d) + 2 > nb_blocks (%d)\n",
nftl->nb_boot_blocks, nftl->nb_blocks);
return -1;
}
@@ -166,35 +187,70 @@ static int find_boot_record(struct NFTLrecord *nftl)
nftl->numvunits, nftl->nb_blocks, nftl->nb_boot_blocks);
return -1;
}
-
- nftl->nr_sects = nftl->numvunits * (nftl->EraseSize / SECTORSIZE);
-
+
+ nftl->mbd.size = nftl->numvunits * (nftl->EraseSize / SECTORSIZE);
+
/* If we're not using the last sectors in the device for some reason,
reduce nb_blocks accordingly so we forget they're there */
nftl->nb_blocks = le16_to_cpu(mh->NumEraseUnits) + le16_to_cpu(mh->FirstPhysicalEUN);
+ /* XXX: will be suppressed */
+ nftl->lastEUN = nftl->nb_blocks - 1;
+
+ /* memory alloc */
+ nftl->EUNtable = kmalloc(nftl->nb_blocks * sizeof(u16), GFP_KERNEL);
+ if (!nftl->EUNtable) {
+ printk(KERN_NOTICE "NFTL: allocation of EUNtable failed\n");
+ return -ENOMEM;
+ }
+
+ nftl->ReplUnitTable = kmalloc(nftl->nb_blocks * sizeof(u16), GFP_KERNEL);
+ if (!nftl->ReplUnitTable) {
+ kfree(nftl->EUNtable);
+ printk(KERN_NOTICE "NFTL: allocation of ReplUnitTable failed\n");
+ return -ENOMEM;
+ }
+
+ /* mark the bios blocks (blocks before NFTL MediaHeader) as reserved */
+ for (i = 0; i < nftl->nb_boot_blocks; i++)
+ nftl->ReplUnitTable[i] = BLOCK_RESERVED;
+ /* mark all remaining blocks as potentially containing data */
+ for (; i < nftl->nb_blocks; i++) {
+ nftl->ReplUnitTable[i] = BLOCK_NOTEXPLORED;
+ }
+
+ /* Mark this boot record (NFTL MediaHeader) block as reserved */
+ nftl->ReplUnitTable[block] = BLOCK_RESERVED;
+
/* read the Bad Erase Unit Table and modify ReplUnitTable[] accordingly */
for (i = 0; i < nftl->nb_blocks; i++) {
+#if 0
+The new DiskOnChip driver already scanned the bad block table. Just query it.
if ((i & (SECTORSIZE - 1)) == 0) {
/* read one sector for every SECTORSIZE of blocks */
- if ((ret = MTD_READECC(nftl->mtd, block * nftl->EraseSize +
- i + SECTORSIZE, SECTORSIZE,
- &retlen, buf, (char *)&oob)) < 0) {
+ if ((ret = MTD_READECC(nftl->mbd.mtd, block * nftl->EraseSize +
+ i + SECTORSIZE, SECTORSIZE, &retlen, buf,
+ (char *)&oob, NULL)) < 0) {
printk(KERN_NOTICE "Read of bad sector table failed (err %d)\n",
ret);
+ kfree(nftl->ReplUnitTable);
+ kfree(nftl->EUNtable);
return -1;
}
}
/* mark the Bad Erase Unit as RESERVED in ReplUnitTable */
if (buf[i & (SECTORSIZE - 1)] != 0xff)
nftl->ReplUnitTable[i] = BLOCK_RESERVED;
+#endif
+ if (nftl->mbd.mtd->block_isbad(nftl->mbd.mtd, i * nftl->EraseSize))
+ nftl->ReplUnitTable[i] = BLOCK_RESERVED;
}
-
+
nftl->MediaUnit = block;
boot_record_count++;
-
+
} /* foreach (block) */
-
+
return boot_record_count?0:-1;
}
@@ -209,25 +265,21 @@ static int memcmpb(void *a, int c, int n)
}
/* check_free_sector: check if a free sector is actually FREE, i.e. All 0xff in data and oob area */
-static int check_free_sectors(struct NFTLrecord *nftl, unsigned int address, int len,
+static int check_free_sectors(struct NFTLrecord *nftl, unsigned int address, int len,
int check_oob)
{
- int i, retlen;
- u8 buf[SECTORSIZE];
+ int i;
+ size_t retlen;
+ u8 buf[SECTORSIZE + nftl->mbd.mtd->oobsize];
for (i = 0; i < len; i += SECTORSIZE) {
- /* we want to read the sector without ECC check here since a free
- sector does not have ECC syndrome on it yet */
- if (MTD_READ(nftl->mtd, address, SECTORSIZE, &retlen, buf) < 0)
+ if (MTD_READECC(nftl->mbd.mtd, address, SECTORSIZE, &retlen, buf, &buf[SECTORSIZE], &nftl->oobinfo) < 0)
return -1;
if (memcmpb(buf, 0xff, SECTORSIZE) != 0)
return -1;
if (check_oob) {
- if (MTD_READOOB(nftl->mtd, address, nftl->mtd->oobsize,
- &retlen, buf) < 0)
- return -1;
- if (memcmpb(buf, 0xff, nftl->mtd->oobsize) != 0)
+ if (memcmpb(buf + SECTORSIZE, 0xff, nftl->mbd.mtd->oobsize) != 0)
return -1;
}
address += SECTORSIZE;
@@ -241,18 +293,17 @@ static int check_free_sectors(struct NFTLrecord *nftl, unsigned int address, int
*
* Return: 0 when succeed, -1 on error.
*
- * ToDo: 1. Is it neceressary to check_free_sector after erasing ??
- * 2. UnitSizeFactor != 0xFF
+ * ToDo: 1. Is it neceressary to check_free_sector after erasing ??
*/
int NFTL_formatblock(struct NFTLrecord *nftl, int block)
{
- int retlen;
+ size_t retlen;
unsigned int nb_erases, erase_mark;
struct nftl_uci1 uci;
struct erase_info *instr = &nftl->instr;
/* Read the Unit Control Information #1 for Wear-Leveling */
- if (MTD_READOOB(nftl->mtd, block * nftl->EraseSize + SECTORSIZE + 8,
+ if (MTD_READOOB(nftl->mbd.mtd, block * nftl->EraseSize + SECTORSIZE + 8,
8, &retlen, (char *)&uci) < 0)
goto default_uci1;
@@ -267,16 +318,16 @@ int NFTL_formatblock(struct NFTLrecord *nftl, int block)
memset(instr, 0, sizeof(struct erase_info));
/* XXX: use async erase interface, XXX: test return code */
+ instr->mtd = nftl->mbd.mtd;
instr->addr = block * nftl->EraseSize;
instr->len = nftl->EraseSize;
- MTD_ERASE(nftl->mtd, instr);
+ MTD_ERASE(nftl->mbd.mtd, instr);
if (instr->state == MTD_ERASE_FAILED) {
- /* could not format, FixMe: We should update the BadUnitTable
- both in memory and on disk */
printk("Error while formatting block %d\n", block);
- return -1;
- } else {
+ goto fail;
+ }
+
/* increase and write Wear-Leveling info */
nb_erases = le32_to_cpu(uci.WearInfo);
nb_erases++;
@@ -289,14 +340,18 @@ int NFTL_formatblock(struct NFTLrecord *nftl, int block)
* FixMe: is this check really necessary ? since we have check the
* return code after the erase operation. */
if (check_free_sectors(nftl, instr->addr, nftl->EraseSize, 1) != 0)
- return -1;
+ goto fail;
uci.WearInfo = le32_to_cpu(nb_erases);
- if (MTD_WRITEOOB(nftl->mtd, block * nftl->EraseSize + SECTORSIZE + 8, 8,
+ if (MTD_WRITEOOB(nftl->mbd.mtd, block * nftl->EraseSize + SECTORSIZE + 8, 8,
&retlen, (char *)&uci) < 0)
- return -1;
+ goto fail;
return 0;
- }
+fail:
+ /* could not format, update the bad block table (caller is responsible
+ for setting the ReplUnitTable to BLOCK_RESERVED on failure) */
+ nftl->mbd.mtd->block_markbad(nftl->mbd.mtd, instr->addr);
+ return -1;
}
/* check_sectors_in_chain: Check that each sector of a Virtual Unit Chain is correct.
@@ -312,13 +367,14 @@ static void check_sectors_in_chain(struct NFTLrecord *nftl, unsigned int first_b
{
unsigned int block, i, status;
struct nftl_bci bci;
- int sectors_per_block, retlen;
+ int sectors_per_block;
+ size_t retlen;
sectors_per_block = nftl->EraseSize / SECTORSIZE;
block = first_block;
for (;;) {
for (i = 0; i < sectors_per_block; i++) {
- if (MTD_READOOB(nftl->mtd, block * nftl->EraseSize + i * SECTORSIZE,
+ if (MTD_READOOB(nftl->mbd.mtd, block * nftl->EraseSize + i * SECTORSIZE,
8, &retlen, (char *)&bci) < 0)
status = SECTOR_IGNORE;
else
@@ -329,7 +385,7 @@ static void check_sectors_in_chain(struct NFTLrecord *nftl, unsigned int first_b
/* verify that the sector is really free. If not, mark
as ignore */
if (memcmpb(&bci, 0xff, 8) != 0 ||
- check_free_sectors(nftl, block * nftl->EraseSize + i * SECTORSIZE,
+ check_free_sectors(nftl, block * nftl->EraseSize + i * SECTORSIZE,
SECTORSIZE, 0) != 0) {
printk("Incorrect free sector %d in block %d: "
"marking it as ignored\n",
@@ -338,7 +394,7 @@ static void check_sectors_in_chain(struct NFTLrecord *nftl, unsigned int first_b
/* sector not free actually : mark it as SECTOR_IGNORE */
bci.Status = SECTOR_IGNORE;
bci.Status1 = SECTOR_IGNORE;
- MTD_WRITEOOB(nftl->mtd,
+ MTD_WRITEOOB(nftl->mbd.mtd,
block * nftl->EraseSize + i * SECTORSIZE,
8, &retlen, (char *)&bci);
}
@@ -401,8 +457,7 @@ static void format_chain(struct NFTLrecord *nftl, unsigned int first_block)
printk("Formatting block %d\n", block);
if (NFTL_formatblock(nftl, block) < 0) {
- /* cannot format !!!! Mark it as Bad Unit,
- FixMe: update the BadUnitTable on disk */
+ /* cannot format !!!! Mark it as Bad Unit */
nftl->ReplUnitTable[block] = BLOCK_RESERVED;
} else {
nftl->ReplUnitTable[block] = BLOCK_FREE;
@@ -428,10 +483,10 @@ static int check_and_mark_free_block(struct NFTLrecord *nftl, int block)
{
struct nftl_uci1 h1;
unsigned int erase_mark;
- int retlen;
+ size_t retlen;
/* check erase mark. */
- if (MTD_READOOB(nftl->mtd, block * nftl->EraseSize + SECTORSIZE + 8, 8,
+ if (MTD_READOOB(nftl->mbd.mtd, block * nftl->EraseSize + SECTORSIZE + 8, 8,
&retlen, (char *)&h1) < 0)
return -1;
@@ -446,7 +501,7 @@ static int check_and_mark_free_block(struct NFTLrecord *nftl, int block)
h1.EraseMark = cpu_to_le16(ERASE_MARK);
h1.EraseMark1 = cpu_to_le16(ERASE_MARK);
h1.WearInfo = cpu_to_le32(0);
- if (MTD_WRITEOOB(nftl->mtd, block * nftl->EraseSize + SECTORSIZE + 8, 8,
+ if (MTD_WRITEOOB(nftl->mbd.mtd, block * nftl->EraseSize + SECTORSIZE + 8, 8,
&retlen, (char *)&h1) < 0)
return -1;
} else {
@@ -458,7 +513,7 @@ static int check_and_mark_free_block(struct NFTLrecord *nftl, int block)
SECTORSIZE, 0) != 0)
return -1;
- if (MTD_READOOB(nftl->mtd, block * nftl->EraseSize + i,
+ if (MTD_READOOB(nftl->mbd.mtd, block * nftl->EraseSize + i,
16, &retlen, buf) < 0)
return -1;
if (i == SECTORSIZE) {
@@ -486,9 +541,9 @@ static int check_and_mark_free_block(struct NFTLrecord *nftl, int block)
static int get_fold_mark(struct NFTLrecord *nftl, unsigned int block)
{
struct nftl_uci2 uci;
- int retlen;
+ size_t retlen;
- if (MTD_READOOB(nftl->mtd, block * nftl->EraseSize + 2 * SECTORSIZE + 8,
+ if (MTD_READOOB(nftl->mbd.mtd, block * nftl->EraseSize + 2 * SECTORSIZE + 8,
8, &retlen, (char *)&uci) < 0)
return 0;
@@ -503,44 +558,14 @@ int NFTL_mount(struct NFTLrecord *s)
int chain_length, do_format_chain;
struct nftl_uci0 h0;
struct nftl_uci1 h1;
- int retlen;
-
- /* XXX: will be suppressed */
- s->lastEUN = s->nb_blocks - 1;
-
- /* memory alloc */
- s->EUNtable = kmalloc(s->nb_blocks * sizeof(u16), GFP_KERNEL);
- s->ReplUnitTable = kmalloc(s->nb_blocks * sizeof(u16), GFP_KERNEL);
- if (!s->EUNtable || !s->ReplUnitTable) {
- fail:
- if (s->EUNtable)
- kfree(s->EUNtable);
- if (s->ReplUnitTable)
- kfree(s->ReplUnitTable);
- return -1;
- }
-
- /* mark all blocks as potentially containing data */
- for (i = 0; i < s->nb_blocks; i++) {
- s->ReplUnitTable[i] = BLOCK_NOTEXPLORED;
- }
+ size_t retlen;
/* search for NFTL MediaHeader and Spare NFTL Media Header */
if (find_boot_record(s) < 0) {
printk("Could not find valid boot record\n");
- goto fail;
+ return -1;
}
- /* mark the bios blocks (blocks before NFTL MediaHeader) as reserved */
- for (i = 0; i < s->nb_boot_blocks; i++)
- s->ReplUnitTable[i] = BLOCK_RESERVED;
-
- /* also mark the boot records (NFTL MediaHeader) blocks as reserved */
- if (s->MediaUnit != BLOCK_NIL)
- s->ReplUnitTable[s->MediaUnit] = BLOCK_RESERVED;
- if (s->SpareMediaUnit != BLOCK_NIL)
- s->ReplUnitTable[s->SpareMediaUnit] = BLOCK_RESERVED;
-
/* init the logical to physical table */
for (i = 0; i < s->nb_blocks; i++) {
s->EUNtable[i] = BLOCK_NIL;
@@ -557,9 +582,9 @@ int NFTL_mount(struct NFTLrecord *s)
for (;;) {
/* read the block header. If error, we format the chain */
- if (MTD_READOOB(s->mtd, block * s->EraseSize + 8, 8,
+ if (MTD_READOOB(s->mbd.mtd, block * s->EraseSize + 8, 8,
&retlen, (char *)&h0) < 0 ||
- MTD_READOOB(s->mtd, block * s->EraseSize + SECTORSIZE + 8, 8,
+ MTD_READOOB(s->mbd.mtd, block * s->EraseSize + SECTORSIZE + 8, 8,
&retlen, (char *)&h1) < 0) {
s->ReplUnitTable[block] = BLOCK_NIL;
do_format_chain = 1;
@@ -614,7 +639,7 @@ int NFTL_mount(struct NFTLrecord *s)
first_logical_block = logical_block;
} else {
if (logical_block != first_logical_block) {
- printk("Block %d: incorrect logical block: %d expected: %d\n",
+ printk("Block %d: incorrect logical block: %d expected: %d\n",
block, logical_block, first_logical_block);
/* the chain is incorrect : we must format it,
but we need to read it completly */
@@ -643,7 +668,7 @@ int NFTL_mount(struct NFTLrecord *s)
s->ReplUnitTable[block] = BLOCK_NIL;
break;
} else if (rep_block >= s->nb_blocks) {
- printk("Block %d: referencing invalid block %d\n",
+ printk("Block %d: referencing invalid block %d\n",
block, rep_block);
do_format_chain = 1;
s->ReplUnitTable[block] = BLOCK_NIL;
@@ -663,7 +688,7 @@ int NFTL_mount(struct NFTLrecord *s)
s->ReplUnitTable[block] = rep_block;
s->EUNtable[first_logical_block] = BLOCK_NIL;
} else {
- printk("Block %d: referencing block %d already in another chain\n",
+ printk("Block %d: referencing block %d already in another chain\n",
block, rep_block);
/* XXX: should handle correctly fold in progress chains */
do_format_chain = 1;
@@ -685,7 +710,7 @@ int NFTL_mount(struct NFTLrecord *s)
} else {
unsigned int first_block1, chain_to_format, chain_length1;
int fold_mark;
-
+
/* valid chain : get foldmark */
fold_mark = get_fold_mark(s, first_block);
if (fold_mark == 0) {
@@ -704,9 +729,9 @@ int NFTL_mount(struct NFTLrecord *s)
if (first_block1 != BLOCK_NIL) {
/* XXX: what to do if same length ? */
chain_length1 = calc_chain_length(s, first_block1);
- printk("Two chains at blocks %d (len=%d) and %d (len=%d)\n",
+ printk("Two chains at blocks %d (len=%d) and %d (len=%d)\n",
first_block1, chain_length1, first_block, chain_length);
-
+
if (chain_length >= chain_length1) {
chain_to_format = first_block1;
s->EUNtable[first_logical_block] = first_block;
@@ -725,7 +750,7 @@ int NFTL_mount(struct NFTLrecord *s)
/* second pass to format unreferenced blocks and init free block count */
s->numfreeEUNs = 0;
- s->LastFreeEUN = BLOCK_NIL;
+ s->LastFreeEUN = le16_to_cpu(s->MediaHdr.FirstPhysicalEUN);
for (block = 0; block < s->nb_blocks; block++) {
if (s->ReplUnitTable[block] == BLOCK_NOTEXPLORED) {
diff --git a/linux-2.4.x/drivers/mtd/redboot.c b/linux-2.4.x/drivers/mtd/redboot.c
index 08d029f..5b58523 100644
--- a/linux-2.4.x/drivers/mtd/redboot.c
+++ b/linux-2.4.x/drivers/mtd/redboot.c
@@ -1,5 +1,5 @@
/*
- * $Id: redboot.c,v 1.6 2001/10/25 09:16:06 dwmw2 Exp $
+ * $Id: redboot.c,v 1.21 2006/03/30 18:34:37 bjd Exp $
*
* Parse RedBoot-style Flash Image System (FIS) tables and
* produce a Linux partition array to match.
@@ -7,20 +7,22 @@
#include <linux/kernel.h>
#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/vmalloc.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
struct fis_image_desc {
unsigned char name[16]; // Null terminated name
- unsigned long flash_base; // Address within FLASH of image
- unsigned long mem_base; // Address in memory where it executes
- unsigned long size; // Length of image
- unsigned long entry_point; // Execution entry point
- unsigned long data_length; // Length of actual data
- unsigned char _pad[256-(16+7*sizeof(unsigned long))];
- unsigned long desc_cksum; // Checksum over image descriptor
- unsigned long file_cksum; // Checksum over image data
+ uint32_t flash_base; // Address within FLASH of image
+ uint32_t mem_base; // Address in memory where it executes
+ uint32_t size; // Length of image
+ uint32_t entry_point; // Execution entry point
+ uint32_t data_length; // Length of actual data
+ unsigned char _pad[256-(16+7*sizeof(uint32_t))];
+ uint32_t desc_cksum; // Checksum over image descriptor
+ uint32_t file_cksum; // Checksum over image data
};
struct fis_list {
@@ -28,13 +30,18 @@ struct fis_list {
struct fis_list *next;
};
+static int directory = CONFIG_MTD_REDBOOT_DIRECTORY_BLOCK;
+module_param(directory, int, 0);
+
static inline int redboot_checksum(struct fis_image_desc *img)
{
/* RedBoot doesn't actually write the desc_cksum field yet AFAICT */
return 1;
}
-int parse_redboot_partitions(struct mtd_info *master, struct mtd_partition **pparts)
+static int parse_redboot_partitions(struct mtd_info *master,
+ struct mtd_partition **pparts,
+ unsigned long fis_origin)
{
int nrparts = 0;
struct fis_image_desc *buf;
@@ -43,31 +50,69 @@ int parse_redboot_partitions(struct mtd_info *master, struct mtd_partition **ppa
int ret, i;
size_t retlen;
char *names;
+ char *nullname;
int namelen = 0;
+ int nulllen = 0;
+ int numslots;
+ unsigned long offset;
+#ifdef CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED
+ static char nullstring[] = "unallocated";
+#endif
- buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ buf = vmalloc(master->erasesize);
if (!buf)
return -ENOMEM;
- /* Read the start of the last erase block */
- ret = master->read(master, master->size - master->erasesize,
- PAGE_SIZE, &retlen, (void *)buf);
+ if ( directory < 0 )
+ offset = master->size + directory*master->erasesize;
+ else
+ offset = directory*master->erasesize;
+
+ printk(KERN_NOTICE "Searching for RedBoot partition table in %s at offset 0x%lx\n",
+ master->name, offset);
+
+ ret = master->read(master, offset,
+ master->erasesize, &retlen, (void *)buf);
if (ret)
goto out;
- if (retlen != PAGE_SIZE) {
+ if (retlen != master->erasesize) {
ret = -EIO;
goto out;
}
- /* RedBoot image could appear in any of the first three slots */
- for (i = 0; i < 3; i++) {
- if (!memcmp(buf[i].name, "RedBoot", 8))
+ numslots = (master->erasesize / sizeof(struct fis_image_desc));
+ for (i = 0; i < numslots; i++) {
+ if (!memcmp(buf[i].name, "FIS directory", 14)) {
+ /* This is apparently the FIS directory entry for the
+ * FIS directory itself. The FIS directory size is
+ * one erase block; if the buf[i].size field is
+ * swab32(erasesize) then we know we are looking at
+ * a byte swapped FIS directory - swap all the entries!
+ * (NOTE: this is 'size' not 'data_length'; size is
+ * the full size of the entry.)
+ */
+ if (swab32(buf[i].size) == master->erasesize) {
+ int j;
+ for (j = 0; j < numslots && buf[j].name[0] != 0xff; ++j) {
+ /* The unsigned long fields were written with the
+ * wrong byte sex, name and pad have no byte sex.
+ */
+ swab32s(&buf[j].flash_base);
+ swab32s(&buf[j].mem_base);
+ swab32s(&buf[j].size);
+ swab32s(&buf[j].entry_point);
+ swab32s(&buf[j].data_length);
+ swab32s(&buf[j].desc_cksum);
+ swab32s(&buf[j].file_cksum);
+ }
+ }
break;
+ }
}
- if (i == 3) {
+ if (i == numslots) {
/* Didn't find it */
printk(KERN_NOTICE "No RedBoot partition table detected in %s\n",
master->name);
@@ -75,11 +120,11 @@ int parse_redboot_partitions(struct mtd_info *master, struct mtd_partition **ppa
goto out;
}
- for (i = 0; i < PAGE_SIZE / sizeof(struct fis_image_desc); i++) {
+ for (i = 0; i < numslots; i++) {
struct fis_list *new_fl, **prev;
if (buf[i].name[0] == 0xff)
- break;
+ continue;
if (!redboot_checksum(&buf[i]))
break;
@@ -90,7 +135,11 @@ int parse_redboot_partitions(struct mtd_info *master, struct mtd_partition **ppa
goto out;
}
new_fl->img = &buf[i];
- buf[i].flash_base &= master->size-1;
+ if (fis_origin) {
+ buf[i].flash_base -= fis_origin;
+ } else {
+ buf[i].flash_base &= master->size-1;
+ }
/* I'm sure the JFFS2 code has done me permanent damage.
* I now think the following is _normal_
@@ -103,42 +152,69 @@ int parse_redboot_partitions(struct mtd_info *master, struct mtd_partition **ppa
nrparts++;
}
- if (fl->img->flash_base)
+#ifdef CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED
+ if (fl->img->flash_base) {
nrparts++;
+ nulllen = sizeof(nullstring);
+ }
for (tmp_fl = fl; tmp_fl->next; tmp_fl = tmp_fl->next) {
- if (tmp_fl->img->flash_base + tmp_fl->img->size + master->erasesize < tmp_fl->next->img->flash_base)
+ if (tmp_fl->img->flash_base + tmp_fl->img->size + master->erasesize <= tmp_fl->next->img->flash_base) {
nrparts++;
+ nulllen = sizeof(nullstring);
+ }
}
- parts = kmalloc(sizeof(*parts)*nrparts + namelen, GFP_KERNEL);
+#endif
+ parts = kmalloc(sizeof(*parts)*nrparts + nulllen + namelen, GFP_KERNEL);
if (!parts) {
ret = -ENOMEM;
goto out;
}
- names = (char *)&parts[nrparts];
- memset(parts, 0, sizeof(*parts)*nrparts + namelen);
+
+ memset(parts, 0, sizeof(*parts)*nrparts + nulllen + namelen);
+
+ nullname = (char *)&parts[nrparts];
+#ifdef CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED
+ if (nulllen > 0) {
+ strcpy(nullname, nullstring);
+ }
+#endif
+ names = nullname + nulllen;
+
i=0;
+#ifdef CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED
if (fl->img->flash_base) {
- parts[0].name = "unallocated space";
+ parts[0].name = nullname;
parts[0].size = fl->img->flash_base;
parts[0].offset = 0;
+ i++;
}
+#endif
for ( ; i<nrparts; i++) {
parts[i].size = fl->img->size;
parts[i].offset = fl->img->flash_base;
parts[i].name = names;
strcpy(names, fl->img->name);
+#ifdef CONFIG_MTD_REDBOOT_PARTS_READONLY
+ if (!memcmp(names, "RedBoot", 8) ||
+ !memcmp(names, "RedBoot config", 15) ||
+ !memcmp(names, "FIS directory", 14)) {
+ parts[i].mask_flags = MTD_WRITEABLE;
+ }
+#endif
names += strlen(names)+1;
- if(fl->next && fl->img->flash_base + fl->img->size + master->erasesize < fl->next->img->flash_base) {
+#ifdef CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED
+ if(fl->next && fl->img->flash_base + fl->img->size + master->erasesize <= fl->next->img->flash_base) {
i++;
parts[i].offset = parts[i-1].size + parts[i-1].offset;
parts[i].size = fl->next->img->flash_base - parts[i].offset;
- parts[i].name = "unallocated space";
+ parts[i].name = nullname;
}
+#endif
tmp_fl = fl;
fl = fl->next;
kfree(tmp_fl);
@@ -151,11 +227,28 @@ int parse_redboot_partitions(struct mtd_info *master, struct mtd_partition **ppa
fl = fl->next;
kfree(old);
}
- kfree(buf);
+ vfree(buf);
return ret;
}
-EXPORT_SYMBOL(parse_redboot_partitions);
+static struct mtd_part_parser redboot_parser = {
+ .owner = THIS_MODULE,
+ .parse_fn = parse_redboot_partitions,
+ .name = "RedBoot",
+};
+
+static int __init redboot_parser_init(void)
+{
+ return register_mtd_parser(&redboot_parser);
+}
+
+static void __exit redboot_parser_exit(void)
+{
+ deregister_mtd_parser(&redboot_parser);
+}
+
+module_init(redboot_parser_init);
+module_exit(redboot_parser_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Red Hat, Inc. - David Woodhouse <dwmw2@cambridge.redhat.com>");
diff --git a/linux-2.4.x/drivers/mtd/rfd_ftl.c b/linux-2.4.x/drivers/mtd/rfd_ftl.c
new file mode 100644
index 0000000..0031cf5
--- /dev/null
+++ b/linux-2.4.x/drivers/mtd/rfd_ftl.c
@@ -0,0 +1,856 @@
+/*
+ * rfd_ftl.c -- resident flash disk (flash translation layer)
+ *
+ * Copyright (C) 2005 Sean Young <sean@mess.org>
+ *
+ * $Id: rfd_ftl.c,v 1.8 2006/01/15 12:51:44 sean Exp $
+ *
+ * This type of flash translation layer (FTL) is used by the Embedded BIOS
+ * by General Software. It is known as the Resident Flash Disk (RFD), see:
+ *
+ * http://www.gensw.com/pages/prod/bios/rfd.htm
+ *
+ * based on ftl.c
+ */
+
+#include <linux/hdreg.h>
+#include <linux/init.h>
+#include <linux/mtd/blktrans.h>
+#include <linux/mtd/mtd.h>
+#include <linux/vmalloc.h>
+#include <linux/jiffies.h>
+
+#include <asm/types.h>
+
+#define const_cpu_to_le16 __constant_cpu_to_le16
+
+static int block_size = 0;
+module_param(block_size, int, 0);
+MODULE_PARM_DESC(block_size, "Block size to use by RFD, defaults to erase unit size");
+
+#define PREFIX "rfd_ftl: "
+
+/* This major has been assigned by device@lanana.org */
+#ifndef RFD_FTL_MAJOR
+#define RFD_FTL_MAJOR 256
+#endif
+
+/* Maximum number of partitions in an FTL region */
+#define PART_BITS 4
+
+/* An erase unit should start with this value */
+#define RFD_MAGIC 0x9193
+
+/* the second value is 0xffff or 0xffc8; function unknown */
+
+/* the third value is always 0xffff, ignored */
+
+/* next is an array of mapping for each corresponding sector */
+#define HEADER_MAP_OFFSET 3
+#define SECTOR_DELETED 0x0000
+#define SECTOR_ZERO 0xfffe
+#define SECTOR_FREE 0xffff
+
+#define SECTOR_SIZE 512
+
+#define SECTORS_PER_TRACK 63
+
+struct block {
+ enum {
+ BLOCK_OK,
+ BLOCK_ERASING,
+ BLOCK_ERASED,
+ BLOCK_UNUSED,
+ BLOCK_FAILED
+ } state;
+ int free_sectors;
+ int used_sectors;
+ int erases;
+ u_long offset;
+};
+
+struct partition {
+ struct mtd_blktrans_dev mbd;
+
+ u_int block_size; /* size of erase unit */
+ u_int total_blocks; /* number of erase units */
+ u_int header_sectors_per_block; /* header sectors in erase unit */
+ u_int data_sectors_per_block; /* data sectors in erase unit */
+ u_int sector_count; /* sectors in translated disk */
+ u_int header_size; /* bytes in header sector */
+ int reserved_block; /* block next up for reclaim */
+ int current_block; /* block to write to */
+ u16 *header_cache; /* cached header */
+
+ int is_reclaiming;
+ int cylinders;
+ int errors;
+ u_long *sector_map;
+ struct block *blocks;
+};
+
+static int rfd_ftl_writesect(struct mtd_blktrans_dev *dev, u_long sector, char *buf);
+
+static int build_block_map(struct partition *part, int block_no)
+{
+ struct block *block = &part->blocks[block_no];
+ int i;
+
+ block->offset = part->block_size * block_no;
+
+ if (le16_to_cpu(part->header_cache[0]) != RFD_MAGIC) {
+ block->state = BLOCK_UNUSED;
+ return -ENOENT;
+ }
+
+ block->state = BLOCK_OK;
+
+ for (i=0; i<part->data_sectors_per_block; i++) {
+ u16 entry;
+
+ entry = le16_to_cpu(part->header_cache[HEADER_MAP_OFFSET + i]);
+
+ if (entry == SECTOR_DELETED)
+ continue;
+
+ if (entry == SECTOR_FREE) {
+ block->free_sectors++;
+ continue;
+ }
+
+ if (entry == SECTOR_ZERO)
+ entry = 0;
+
+ if (entry >= part->sector_count) {
+ printk(KERN_WARNING PREFIX
+ "'%s': unit #%d: entry %d corrupt, "
+ "sector %d out of range\n",
+ part->mbd.mtd->name, block_no, i, entry);
+ continue;
+ }
+
+ if (part->sector_map[entry] != -1) {
+ printk(KERN_WARNING PREFIX
+ "'%s': more than one entry for sector %d\n",
+ part->mbd.mtd->name, entry);
+ part->errors = 1;
+ continue;
+ }
+
+ part->sector_map[entry] = block->offset +
+ (i + part->header_sectors_per_block) * SECTOR_SIZE;
+
+ block->used_sectors++;
+ }
+
+ if (block->free_sectors == part->data_sectors_per_block)
+ part->reserved_block = block_no;
+
+ return 0;
+}
+
+static int scan_header(struct partition *part)
+{
+ int sectors_per_block;
+ int i, rc = -ENOMEM;
+ int blocks_found;
+ size_t retlen;
+
+ sectors_per_block = part->block_size / SECTOR_SIZE;
+ part->total_blocks = part->mbd.mtd->size / part->block_size;
+
+ if (part->total_blocks < 2)
+ return -ENOENT;
+
+ /* each erase block has three bytes header, followed by the map */
+ part->header_sectors_per_block =
+ ((HEADER_MAP_OFFSET + sectors_per_block) *
+ sizeof(u16) + SECTOR_SIZE - 1) / SECTOR_SIZE;
+
+ part->data_sectors_per_block = sectors_per_block -
+ part->header_sectors_per_block;
+
+ part->header_size = (HEADER_MAP_OFFSET +
+ part->data_sectors_per_block) * sizeof(u16);
+
+ part->cylinders = (part->data_sectors_per_block *
+ (part->total_blocks - 1) - 1) / SECTORS_PER_TRACK;
+
+ part->sector_count = part->cylinders * SECTORS_PER_TRACK;
+
+ part->current_block = -1;
+ part->reserved_block = -1;
+ part->is_reclaiming = 0;
+
+ part->header_cache = kmalloc(part->header_size, GFP_KERNEL);
+ if (!part->header_cache)
+ goto err;
+
+ part->blocks = kcalloc(part->total_blocks, sizeof(struct block),
+ GFP_KERNEL);
+ if (!part->blocks)
+ goto err;
+
+ part->sector_map = vmalloc(part->sector_count * sizeof(u_long));
+ if (!part->sector_map) {
+ printk(KERN_ERR PREFIX "'%s': unable to allocate memory for "
+ "sector map", part->mbd.mtd->name);
+ goto err;
+ }
+
+ for (i=0; i<part->sector_count; i++)
+ part->sector_map[i] = -1;
+
+ for (i=0, blocks_found=0; i<part->total_blocks; i++) {
+ rc = part->mbd.mtd->read(part->mbd.mtd,
+ i * part->block_size, part->header_size,
+ &retlen, (u_char*)part->header_cache);
+
+ if (!rc && retlen != part->header_size)
+ rc = -EIO;
+
+ if (rc)
+ goto err;
+
+ if (!build_block_map(part, i))
+ blocks_found++;
+ }
+
+ if (blocks_found == 0) {
+ printk(KERN_NOTICE PREFIX "no RFD magic found in '%s'\n",
+ part->mbd.mtd->name);
+ rc = -ENOENT;
+ goto err;
+ }
+
+ if (part->reserved_block == -1) {
+ printk(KERN_WARNING PREFIX "'%s': no empty erase unit found\n",
+ part->mbd.mtd->name);
+
+ part->errors = 1;
+ }
+
+ return 0;
+
+err:
+ vfree(part->sector_map);
+ kfree(part->header_cache);
+ kfree(part->blocks);
+
+ return rc;
+}
+
+static int rfd_ftl_readsect(struct mtd_blktrans_dev *dev, u_long sector, char *buf)
+{
+ struct partition *part = (struct partition*)dev;
+ u_long addr;
+ size_t retlen;
+ int rc;
+
+ if (sector >= part->sector_count)
+ return -EIO;
+
+ addr = part->sector_map[sector];
+ if (addr != -1) {
+ rc = part->mbd.mtd->read(part->mbd.mtd, addr, SECTOR_SIZE,
+ &retlen, (u_char*)buf);
+ if (!rc && retlen != SECTOR_SIZE)
+ rc = -EIO;
+
+ if (rc) {
+ printk(KERN_WARNING PREFIX "error reading '%s' at "
+ "0x%lx\n", part->mbd.mtd->name, addr);
+ return rc;
+ }
+ } else
+ memset(buf, 0, SECTOR_SIZE);
+
+ return 0;
+}
+
+static void erase_callback(struct erase_info *erase)
+{
+ struct partition *part;
+ u16 magic;
+ int i, rc;
+ size_t retlen;
+
+ part = (struct partition*)erase->priv;
+
+ i = erase->addr / part->block_size;
+ if (i >= part->total_blocks || part->blocks[i].offset != erase->addr) {
+ printk(KERN_ERR PREFIX "erase callback for unknown offset %x "
+ "on '%s'\n", erase->addr, part->mbd.mtd->name);
+ return;
+ }
+
+ if (erase->state != MTD_ERASE_DONE) {
+ printk(KERN_WARNING PREFIX "erase failed at 0x%x on '%s', "
+ "state %d\n", erase->addr,
+ part->mbd.mtd->name, erase->state);
+
+ part->blocks[i].state = BLOCK_FAILED;
+ part->blocks[i].free_sectors = 0;
+ part->blocks[i].used_sectors = 0;
+
+ kfree(erase);
+
+ return;
+ }
+
+ magic = const_cpu_to_le16(RFD_MAGIC);
+
+ part->blocks[i].state = BLOCK_ERASED;
+ part->blocks[i].free_sectors = part->data_sectors_per_block;
+ part->blocks[i].used_sectors = 0;
+ part->blocks[i].erases++;
+
+ rc = part->mbd.mtd->write(part->mbd.mtd,
+ part->blocks[i].offset, sizeof(magic), &retlen,
+ (u_char*)&magic);
+
+ if (!rc && retlen != sizeof(magic))
+ rc = -EIO;
+
+ if (rc) {
+ printk(KERN_ERR PREFIX "'%s': unable to write RFD "
+ "header at 0x%lx\n",
+ part->mbd.mtd->name,
+ part->blocks[i].offset);
+ part->blocks[i].state = BLOCK_FAILED;
+ }
+ else
+ part->blocks[i].state = BLOCK_OK;
+
+ kfree(erase);
+}
+
+static int erase_block(struct partition *part, int block)
+{
+ struct erase_info *erase;
+ int rc = -ENOMEM;
+
+ erase = kmalloc(sizeof(struct erase_info), GFP_KERNEL);
+ if (!erase)
+ goto err;
+
+ erase->mtd = part->mbd.mtd;
+ erase->callback = erase_callback;
+ erase->addr = part->blocks[block].offset;
+ erase->len = part->block_size;
+ erase->priv = (u_long)part;
+
+ part->blocks[block].state = BLOCK_ERASING;
+ part->blocks[block].free_sectors = 0;
+
+ rc = part->mbd.mtd->erase(part->mbd.mtd, erase);
+
+ if (rc) {
+ printk(KERN_ERR PREFIX "erase of region %x,%x on '%s' "
+ "failed\n", erase->addr, erase->len,
+ part->mbd.mtd->name);
+ kfree(erase);
+ }
+
+err:
+ return rc;
+}
+
+static int move_block_contents(struct partition *part, int block_no, u_long *old_sector)
+{
+ void *sector_data;
+ u16 *map;
+ size_t retlen;
+ int i, rc = -ENOMEM;
+
+ part->is_reclaiming = 1;
+
+ sector_data = kmalloc(SECTOR_SIZE, GFP_KERNEL);
+ if (!sector_data)
+ goto err3;
+
+ map = kmalloc(part->header_size, GFP_KERNEL);
+ if (!map)
+ goto err2;
+
+ rc = part->mbd.mtd->read(part->mbd.mtd,
+ part->blocks[block_no].offset, part->header_size,
+ &retlen, (u_char*)map);
+
+ if (!rc && retlen != part->header_size)
+ rc = -EIO;
+
+ if (rc) {
+ printk(KERN_ERR PREFIX "error reading '%s' at "
+ "0x%lx\n", part->mbd.mtd->name,
+ part->blocks[block_no].offset);
+
+ goto err;
+ }
+
+ for (i=0; i<part->data_sectors_per_block; i++) {
+ u16 entry = le16_to_cpu(map[HEADER_MAP_OFFSET + i]);
+ u_long addr;
+
+
+ if (entry == SECTOR_FREE || entry == SECTOR_DELETED)
+ continue;
+
+ if (entry == SECTOR_ZERO)
+ entry = 0;
+
+ /* already warned about and ignored in build_block_map() */
+ if (entry >= part->sector_count)
+ continue;
+
+ addr = part->blocks[block_no].offset +
+ (i + part->header_sectors_per_block) * SECTOR_SIZE;
+
+ if (*old_sector == addr) {
+ *old_sector = -1;
+ if (!part->blocks[block_no].used_sectors--) {
+ rc = erase_block(part, block_no);
+ break;
+ }
+ continue;
+ }
+ rc = part->mbd.mtd->read(part->mbd.mtd, addr,
+ SECTOR_SIZE, &retlen, sector_data);
+
+ if (!rc && retlen != SECTOR_SIZE)
+ rc = -EIO;
+
+ if (rc) {
+ printk(KERN_ERR PREFIX "'%s': Unable to "
+ "read sector for relocation\n",
+ part->mbd.mtd->name);
+
+ goto err;
+ }
+
+ rc = rfd_ftl_writesect((struct mtd_blktrans_dev*)part,
+ entry, sector_data);
+
+ if (rc)
+ goto err;
+ }
+
+err:
+ kfree(map);
+err2:
+ kfree(sector_data);
+err3:
+ part->is_reclaiming = 0;
+
+ return rc;
+}
+
+static int reclaim_block(struct partition *part, u_long *old_sector)
+{
+ int block, best_block, score, old_sector_block;
+ int rc;
+
+ /* we have a race if sync doesn't exist */
+ if (part->mbd.mtd->sync)
+ part->mbd.mtd->sync(part->mbd.mtd);
+
+ score = 0x7fffffff; /* MAX_INT */
+ best_block = -1;
+ if (*old_sector != -1)
+ old_sector_block = *old_sector / part->block_size;
+ else
+ old_sector_block = -1;
+
+ for (block=0; block<part->total_blocks; block++) {
+ int this_score;
+
+ if (block == part->reserved_block)
+ continue;
+
+ /*
+ * Postpone reclaiming if there is a free sector as
+ * more removed sectors is more efficient (have to move
+ * less).
+ */
+ if (part->blocks[block].free_sectors)
+ return 0;
+
+ this_score = part->blocks[block].used_sectors;
+
+ if (block == old_sector_block)
+ this_score--;
+ else {
+ /* no point in moving a full block */
+ if (part->blocks[block].used_sectors ==
+ part->data_sectors_per_block)
+ continue;
+ }
+
+ this_score += part->blocks[block].erases;
+
+ if (this_score < score) {
+ best_block = block;
+ score = this_score;
+ }
+ }
+
+ if (best_block == -1)
+ return -ENOSPC;
+
+ part->current_block = -1;
+ part->reserved_block = best_block;
+
+ pr_debug("reclaim_block: reclaiming block #%d with %d used "
+ "%d free sectors\n", best_block,
+ part->blocks[best_block].used_sectors,
+ part->blocks[best_block].free_sectors);
+
+ if (part->blocks[best_block].used_sectors)
+ rc = move_block_contents(part, best_block, old_sector);
+ else
+ rc = erase_block(part, best_block);
+
+ return rc;
+}
+
+/*
+ * IMPROVE: It would be best to choose the block with the most deleted sectors,
+ * because if we fill that one up first it'll have the most chance of having
+ * the least live sectors at reclaim.
+ */
+static int find_free_block(struct partition *part)
+{
+ int block, stop;
+
+ block = part->current_block == -1 ?
+ jiffies % part->total_blocks : part->current_block;
+ stop = block;
+
+ do {
+ if (part->blocks[block].free_sectors &&
+ block != part->reserved_block)
+ return block;
+
+ if (part->blocks[block].state == BLOCK_UNUSED)
+ erase_block(part, block);
+
+ if (++block >= part->total_blocks)
+ block = 0;
+
+ } while (block != stop);
+
+ return -1;
+}
+
+static int find_writable_block(struct partition *part, u_long *old_sector)
+{
+ int rc, block;
+ size_t retlen;
+
+ block = find_free_block(part);
+
+ if (block == -1) {
+ if (!part->is_reclaiming) {
+ rc = reclaim_block(part, old_sector);
+ if (rc)
+ goto err;
+
+ block = find_free_block(part);
+ }
+
+ if (block == -1) {
+ rc = -ENOSPC;
+ goto err;
+ }
+ }
+
+ rc = part->mbd.mtd->read(part->mbd.mtd, part->blocks[block].offset,
+ part->header_size, &retlen, (u_char*)part->header_cache);
+
+ if (!rc && retlen != part->header_size)
+ rc = -EIO;
+
+ if (rc) {
+ printk(KERN_ERR PREFIX "'%s': unable to read header at "
+ "0x%lx\n", part->mbd.mtd->name,
+ part->blocks[block].offset);
+ goto err;
+ }
+
+ part->current_block = block;
+
+err:
+ return rc;
+}
+
+static int mark_sector_deleted(struct partition *part, u_long old_addr)
+{
+ int block, offset, rc;
+ u_long addr;
+ size_t retlen;
+ u16 del = const_cpu_to_le16(SECTOR_DELETED);
+
+ block = old_addr / part->block_size;
+ offset = (old_addr % part->block_size) / SECTOR_SIZE -
+ part->header_sectors_per_block;
+
+ addr = part->blocks[block].offset +
+ (HEADER_MAP_OFFSET + offset) * sizeof(u16);
+ rc = part->mbd.mtd->write(part->mbd.mtd, addr,
+ sizeof(del), &retlen, (u_char*)&del);
+
+ if (!rc && retlen != sizeof(del))
+ rc = -EIO;
+
+ if (rc) {
+ printk(KERN_ERR PREFIX "error writing '%s' at "
+ "0x%lx\n", part->mbd.mtd->name, addr);
+ if (rc)
+ goto err;
+ }
+ if (block == part->current_block)
+ part->header_cache[offset + HEADER_MAP_OFFSET] = del;
+
+ part->blocks[block].used_sectors--;
+
+ if (!part->blocks[block].used_sectors &&
+ !part->blocks[block].free_sectors)
+ rc = erase_block(part, block);
+
+err:
+ return rc;
+}
+
+static int find_free_sector(const struct partition *part, const struct block *block)
+{
+ int i, stop;
+
+ i = stop = part->data_sectors_per_block - block->free_sectors;
+
+ do {
+ if (le16_to_cpu(part->header_cache[HEADER_MAP_OFFSET + i])
+ == SECTOR_FREE)
+ return i;
+
+ if (++i == part->data_sectors_per_block)
+ i = 0;
+ }
+ while(i != stop);
+
+ return -1;
+}
+
+static int do_writesect(struct mtd_blktrans_dev *dev, u_long sector, char *buf, ulong *old_addr)
+{
+ struct partition *part = (struct partition*)dev;
+ struct block *block;
+ u_long addr;
+ int i;
+ int rc;
+ size_t retlen;
+ u16 entry;
+
+ if (part->current_block == -1 ||
+ !part->blocks[part->current_block].free_sectors) {
+
+ rc = find_writable_block(part, old_addr);
+ if (rc)
+ goto err;
+ }
+
+ block = &part->blocks[part->current_block];
+
+ i = find_free_sector(part, block);
+
+ if (i < 0) {
+ rc = -ENOSPC;
+ goto err;
+ }
+
+ addr = (i + part->header_sectors_per_block) * SECTOR_SIZE +
+ block->offset;
+ rc = part->mbd.mtd->write(part->mbd.mtd,
+ addr, SECTOR_SIZE, &retlen, (u_char*)buf);
+
+ if (!rc && retlen != SECTOR_SIZE)
+ rc = -EIO;
+
+ if (rc) {
+ printk(KERN_ERR PREFIX "error writing '%s' at 0x%lx\n",
+ part->mbd.mtd->name, addr);
+ if (rc)
+ goto err;
+ }
+
+ part->sector_map[sector] = addr;
+
+ entry = cpu_to_le16(sector == 0 ? SECTOR_ZERO : sector);
+
+ part->header_cache[i + HEADER_MAP_OFFSET] = entry;
+
+ addr = block->offset + (HEADER_MAP_OFFSET + i) * sizeof(u16);
+ rc = part->mbd.mtd->write(part->mbd.mtd, addr,
+ sizeof(entry), &retlen, (u_char*)&entry);
+
+ if (!rc && retlen != sizeof(entry))
+ rc = -EIO;
+
+ if (rc) {
+ printk(KERN_ERR PREFIX "error writing '%s' at 0x%lx\n",
+ part->mbd.mtd->name, addr);
+ if (rc)
+ goto err;
+ }
+ block->used_sectors++;
+ block->free_sectors--;
+
+err:
+ return rc;
+}
+
+static int rfd_ftl_writesect(struct mtd_blktrans_dev *dev, u_long sector, char *buf)
+{
+ struct partition *part = (struct partition*)dev;
+ u_long old_addr;
+ int i;
+ int rc = 0;
+
+ pr_debug("rfd_ftl_writesect(sector=0x%lx)\n", sector);
+
+ if (part->reserved_block == -1) {
+ rc = -EACCES;
+ goto err;
+ }
+
+ if (sector >= part->sector_count) {
+ rc = -EIO;
+ goto err;
+ }
+
+ old_addr = part->sector_map[sector];
+
+ for (i=0; i<SECTOR_SIZE; i++) {
+ if (!buf[i])
+ continue;
+
+ rc = do_writesect(dev, sector, buf, &old_addr);
+ if (rc)
+ goto err;
+ break;
+ }
+
+ if (i == SECTOR_SIZE)
+ part->sector_map[sector] = -1;
+
+ if (old_addr != -1)
+ rc = mark_sector_deleted(part, old_addr);
+
+err:
+ return rc;
+}
+
+static int rfd_ftl_getgeo(struct mtd_blktrans_dev *dev, struct hd_geometry *geo)
+{
+ struct partition *part = (struct partition*)dev;
+
+ geo->heads = 1;
+ geo->sectors = SECTORS_PER_TRACK;
+ geo->cylinders = part->cylinders;
+
+ return 0;
+}
+
+static void rfd_ftl_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
+{
+ struct partition *part;
+
+ if (mtd->type != MTD_NORFLASH)
+ return;
+
+ part = kcalloc(1, sizeof(struct partition), GFP_KERNEL);
+ if (!part)
+ return;
+
+ part->mbd.mtd = mtd;
+
+ if (block_size)
+ part->block_size = block_size;
+ else {
+ if (!mtd->erasesize) {
+ printk(KERN_WARNING PREFIX "please provide block_size");
+ return;
+ }
+ else
+ part->block_size = mtd->erasesize;
+ }
+
+ if (scan_header(part) == 0) {
+ part->mbd.size = part->sector_count;
+ part->mbd.blksize = SECTOR_SIZE;
+ part->mbd.tr = tr;
+ part->mbd.devnum = -1;
+ if (!(mtd->flags & MTD_WRITEABLE))
+ part->mbd.readonly = 1;
+ else if (part->errors) {
+ printk(KERN_WARNING PREFIX "'%s': errors found, "
+ "setting read-only\n", mtd->name);
+ part->mbd.readonly = 1;
+ }
+
+ printk(KERN_INFO PREFIX "name: '%s' type: %d flags %x\n",
+ mtd->name, mtd->type, mtd->flags);
+
+ if (!add_mtd_blktrans_dev((void*)part))
+ return;
+ }
+
+ kfree(part);
+}
+
+static void rfd_ftl_remove_dev(struct mtd_blktrans_dev *dev)
+{
+ struct partition *part = (struct partition*)dev;
+ int i;
+
+ for (i=0; i<part->total_blocks; i++) {
+ pr_debug("rfd_ftl_remove_dev:'%s': erase unit #%02d: %d erases\n",
+ part->mbd.mtd->name, i, part->blocks[i].erases);
+ }
+
+ del_mtd_blktrans_dev(dev);
+ vfree(part->sector_map);
+ kfree(part->header_cache);
+ kfree(part->blocks);
+ kfree(part);
+}
+
+struct mtd_blktrans_ops rfd_ftl_tr = {
+ .name = "rfd",
+ .major = RFD_FTL_MAJOR,
+ .part_bits = PART_BITS,
+ .readsect = rfd_ftl_readsect,
+ .writesect = rfd_ftl_writesect,
+ .getgeo = rfd_ftl_getgeo,
+ .add_mtd = rfd_ftl_add_mtd,
+ .remove_dev = rfd_ftl_remove_dev,
+ .owner = THIS_MODULE,
+};
+
+static int __init init_rfd_ftl(void)
+{
+ return register_mtd_blktrans(&rfd_ftl_tr);
+}
+
+static void __exit cleanup_rfd_ftl(void)
+{
+ deregister_mtd_blktrans(&rfd_ftl_tr);
+}
+
+module_init(init_rfd_ftl);
+module_exit(cleanup_rfd_ftl);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Sean Young <sean@mess.org>");
+MODULE_DESCRIPTION("Support code for RFD Flash Translation Layer, "
+ "used by General Software's Embedded BIOS");
+
diff --git a/linux-2.4.x/drivers/mtd/ssfdc.c b/linux-2.4.x/drivers/mtd/ssfdc.c
new file mode 100755
index 0000000..4e082cc
--- /dev/null
+++ b/linux-2.4.x/drivers/mtd/ssfdc.c
@@ -0,0 +1,1132 @@
+/*
+ * drivers/mtd/ssfdc.c
+ *
+ * Copyright (C) 2003 Simon Haynes (simon@baydel.con)
+ * Baydel Ltd
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public License
+ * version 2.1 as published by the Free Software Foundation.
+ *
+ * This module provides a translation layer, via mtd, for smart
+ * media card access. It essentially enables the possibility
+ * of using cards on a hardware which does not have a hardware translation
+ * layer and interchanging them with hardware that does ie: PC card readers
+ *
+ * I had to write this module for a specific task and in a short timeframe
+ * for this reason I have imposed some restricions to make the job easier.
+ *
+ * To build an compile the driver I added the following lines
+ * to mtd/Config.in
+ *
+ * dep_tristate ' SSFDC support' CONFIG_SSFDC $CONFIG_MTD
+ *
+ * to /mtd/Makefile
+ *
+ * obj-$(CONFIG_SSFDC) += ssfdc.o
+ *
+ * and compiled the kernel via the usual methods.
+ *
+ * I am sure that there are many problems I don't know about but here are
+ * some that I know of
+ *
+ * Currently the driver uses MAJOR number 44 which I think is FTL or NFTL
+ * I did this because I wanted a static number and I didn't know
+ * how to go about getting a new one. This needs addressing
+ * The dev nodes required are like standard. I only use minor 0
+ * (/dev/ssfdca), and minor 1 (/dev/ssfdca1).
+ * You should be able to run fdisk on /dev/ssfdca and the first partition
+ * is /dev/ssfdca1. There is no working code in the module for changing the
+ * SMC and rebuilding the maps so the card should not be changed once the
+ * module is loaded. At present I only look for 1 partition. But this is a
+ * small commented hack.
+ *
+ * There is no support cards which do not have a 512 byte page size with 16
+ * bytes of oob and an erase size of 16K.
+ * There are no checks for this at present. In addition the MTD reported size
+ * must be 16M or a multiple.
+ *
+ * Code to handle multiple partitions or multiple cards is incomplete
+ * Need to allocate data buffer and oob buffer on a per partition basis.
+ * As I am only concerned with one partition I will do this if I ever need to.
+ * The cached physical address variable also needs this attention.
+ *
+ * Recently I have started to work on media changes. Some of this is specific
+ * to my hardware and you will see references to pt_ssfdc_smc and smc_status.
+ * This code is incomplete and does not work. I have commented it for the moment
+ * but it should give an indication of what I think is required. Maybe there is
+ * something it mtd that can help
+ *
+ * 17th August 2004 MHB
+ *
+ * Following updating CVS I noticed some single bit data corruption. I believe
+ * that this was down to the fact that I was using mtd->read instead of mtd->read_ecc
+ * and that mtd->read was applying it's own error corretion from the wrong ecc bytes
+ * I have now corrected this.
+ *
+ * During this time I noticed that while in allocate new I only seem to look for blocks
+ * in 1 zone. So this limits the partition size to 16MB with all the other SMC size
+ * restrictions
+
+
+*/
+
+#include <linux/config.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/blktrans.h>
+#include <linux/mtd/nand_ecc.h>
+#include <linux/sched.h>
+#include <linux/ptrace.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/major.h>
+#include <linux/ioctl.h>
+#include <linux/hdreg.h>
+#include <linux/list.h>
+#include <asm/semaphore.h>
+#include <asm/uaccess.h>
+
+
+#if (LINUX_VERSION_CODE >= 0x20100)
+#include <linux/vmalloc.h>
+#endif
+#if (LINUX_VERSION_CODE >= 0x20303)
+#include <linux/blkpg.h>
+#endif
+
+#include <asm/semaphore.h>
+
+#define SSFDC_FORMAT 1
+
+#define PDEBUG(fmt, args...)
+
+#define BLK_INC_USE_COUNT MOD_INC_USE_COUNT
+#define BLK_DEC_USE_COUNT MOD_DEC_USE_COUNT
+
+#if (LINUX_VERSION_CODE < 0x20320)
+#define BLK_DEFAULT_QUEUE(n) blk_dev[n].request_fn
+#define blk_init_queue(q, req) q = (req)
+#define blk_cleanup_queue(q) q = NULL
+#define request_arg_t void
+#else
+#define request_arg_t request_queue_t *q
+#endif
+
+#define TRUE 1
+#define FALSE 0
+
+#define SSFDC_MAJOR 44
+
+#define MAJOR_NR SSFDC_MAJOR
+#define DEVICE_NAME "ssfdc"
+#define DEVICE_REQUEST do_ssfdc_request
+#define DEVICE_ON(device)
+#define DEVICE_OFF(device)
+
+#include <linux/blk.h>
+
+#include "/home/simon/ebony/dbwhatu/dbwhatu/smccontrol.h"
+
+
+
+#define ZONE_SIZE (16 * 1024 * 1024)
+#define SMC_BLOCK_SIZE (16 * 1024)
+#define SECTOR_SIZE 512
+#define SECTORS_PER_ZONE (ZONE_SIZE / SECTOR_SIZE)
+#define BLOCKS_PER_ZONE (ZONE_SIZE / SMC_BLOCK_SIZE)
+#define SECTORS_PER_BLOCK (SMC_BLOCK_SIZE / SECTOR_SIZE)
+#define OOB_SIZE 16
+
+
+#define MAX_DEVICES 4
+#define MAX_PARTITIONS 8
+#define PARTITION_BITS 3
+#define MAX_ZONES 8
+
+
+int ssfdc_major = SSFDC_MAJOR;
+unsigned int ssfdc_cached = 0xFFFFFFFF;
+static unsigned char ssfdc_scratch[16384];
+static unsigned char ssfdc_buffer[16];
+static unsigned char ssfdc_ffoob_buf[OOB_SIZE * SECTORS_PER_BLOCK];
+static unsigned char ssfdc_oob_buf[OOB_SIZE * SECTORS_PER_BLOCK];
+
+
+static struct nand_oobinfo ssfdc_ffoob_info = {
+ .useecc = 0,
+};
+
+
+typedef struct minor_t {
+ atomic_t open;
+ int cached;
+ unsigned char * pt_data;
+ unsigned char * pt_oob;
+} minor_t;
+
+
+
+typedef struct partition_t {
+ int type;
+ struct mtd_info *mtd;
+ int count;
+ unsigned int *zone;
+ unsigned int zoneCount;
+ minor_t minor[MAX_PARTITIONS];
+ unsigned int last_written[MAX_ZONES];
+} partition_t;
+
+partition_t SMCParts[MAX_DEVICES];
+
+
+static unsigned char ssfdc_ecc[] = {14, 13, 15, 9, 8, 10};
+
+static struct hd_struct ssfdc_hd[MAX_DEVICES * MAX_PARTITIONS];
+static int ssfdc_sizes[MAX_DEVICES * MAX_PARTITIONS];
+static int ssfdc_blocksizes[MAX_DEVICES * MAX_PARTITIONS];
+smc_control * pt_ssfdc_smc;
+
+
+static struct gendisk ssfdc_gendisk = {
+ major: SSFDC_MAJOR,
+ major_name: "ssfdc",
+ minor_shift: PARTITION_BITS,
+ max_p: MAX_PARTITIONS,
+ part: ssfdc_hd,
+ sizes: ssfdc_sizes,
+};
+
+
+static int ssfdc_ioctl(struct inode *inode, struct file *file, u_int cmd, u_long arg);
+static int ssfdc_open(struct inode *inode, struct file *file);
+static int ssfdc_close(struct inode *inode, struct file *file);
+static int ssfdc_write(partition_t *part, caddr_t buffer, u_long sector, u_long nblocks);
+static int ssfdc_read(partition_t *part, caddr_t buffer, u_long sector, u_long nblocks);
+static int ssfdc_physical(partition_t * pt_smcpart, int zone, int block);
+static int ssfdc_erase(partition_t *pt_smcpart, unsigned int offset);
+static int ssfdc_read_partitions(partition_t * pt_smcpart);
+static void ssfdc_notify_add(struct mtd_info *mtd);
+static void ssfdc_notify_remove(struct mtd_info *mtd);
+static void ssfdc_tables(partition_t * pt_smcpart);
+static int ssfdc_sector_blank(partition_t * pt_smcpart, int sc);
+static int ssfdc_allocate_new(partition_t * pt_smcpart, int zone);
+int ssfdc_parity(int number);
+static void ssfdc_erase_callback(struct erase_info *erase);
+
+
+
+static DECLARE_WAIT_QUEUE_HEAD(ssfdc_wq);
+
+
+static struct mtd_notifier ssfdc_notifier = {
+ add: ssfdc_notify_add,
+ remove: ssfdc_notify_remove,
+};
+
+
+
+static struct block_device_operations ssfdc_fops = {
+ open: ssfdc_open,
+ release: ssfdc_close,
+ ioctl: ssfdc_ioctl,
+};
+
+static struct semaphore ssfdc_semaphore;
+
+static void ssfdc_notify_add(struct mtd_info *mtd) {
+
+
+
+
+ if(mtd->index >= 1) return; // Hack to limit SSFDC to 1 partition
+
+ if( ((mtd->size % ZONE_SIZE) != 0) && (mtd->size < (ZONE_SIZE * MAX_ZONES)) ){
+ PDEBUG("ssfdc_notify_add : mtd partition %d is not modulus 16M, not SSFDC\n", mtd->index);
+ }
+ else {
+ memset((void *)&SMCParts[mtd->index].type, 0, sizeof(partition_t));
+ SMCParts[mtd->index].mtd = mtd;
+ SMCParts[mtd->index].count = mtd->index;
+ SMCParts[mtd->index].type = 1;
+ SMCParts[mtd->index].zoneCount = mtd->size / ZONE_SIZE;
+ SMCParts[mtd->index].zone = kmalloc(SMCParts[mtd->index].zoneCount * 8192, GFP_KERNEL);
+
+
+ if(!SMCParts[mtd->index].zone) {
+ printk(KERN_NOTICE "ssfdc_notify_add : mtd partition %d, failed to allocate mapping table\n", mtd->index);
+ SMCParts[mtd->index].type = 0;
+ }
+ else {
+ memset((void *)SMCParts[mtd->index].zone, 0xFF, SMCParts[mtd->index].zoneCount * 8192);
+ }
+
+ ssfdc_read_partitions((partition_t *)&SMCParts[mtd->index].type);
+ }
+ return;
+
+}
+static int ssfdc_read_partitions(partition_t * pt_smcpart) {
+
+ int whole, i, j, size;
+
+//=printk("ssfdc_read_partitions : start\n");
+
+ for(i=0; i<MAX_PARTITIONS; i++)
+ if ((atomic_read(&pt_smcpart->minor[i].open) > 1)) {
+//=printk("ssfdc_read_partitions : part %d busy\n", i);
+
+ return -EBUSY;
+ }
+
+
+//=printk("ssfdc_read_partitions : tables start\n");
+ ssfdc_tables(pt_smcpart);
+//=printk("ssfdc_read_partitions : tables end\n");
+
+ whole = pt_smcpart->count << PARTITION_BITS;
+
+
+ j = MAX_PARTITIONS - 1;
+ while (j-- > 0) {
+ if (ssfdc_hd[whole+j].nr_sects > 0) {
+ kdev_t rdev = MKDEV(SSFDC_MAJOR, whole+j);
+ invalidate_device(rdev, 1);
+ }
+ ssfdc_hd[whole+j].start_sect = 0;
+ ssfdc_hd[whole+j].nr_sects = 0;
+ }
+
+
+ size = (((pt_smcpart->mtd->size / 16384) * 1000) / 1024) * 32;
+ size /= (0x8 * 0x20);
+ size = size * (0x8 * 0x20);
+
+//=printk("ssfdc_read_partitions : register start\n");
+
+ register_disk(&ssfdc_gendisk, whole >> PARTITION_BITS, MAX_PARTITIONS,
+ &ssfdc_fops, size);
+
+//=printk("ssfdc_read_partitions : register end\n");
+
+
+ return 0;
+}
+
+
+static void ssfdc_notify_remove(struct mtd_info *mtd) {
+int i, j, whole;
+
+ i=mtd->index;
+ whole = i << PARTITION_BITS;
+ if(SMCParts[i].mtd == mtd) {
+ if(SMCParts[i].zone)kfree(SMCParts[i].zone);
+ memset((void *)&SMCParts[i].type, 0, sizeof(partition_t));
+ for (j = 0; j < MAX_PARTITIONS; j++) {
+ if (ssfdc_hd[whole+j].nr_sects > 0) {
+ ssfdc_hd[whole+j].start_sect = 0;
+ ssfdc_hd[whole+j].nr_sects=0;
+ }
+ }
+ return;
+ }
+ return;
+}
+
+
+
+static int ssfdc_ioctl(struct inode *inode, struct file *file,
+ u_int cmd, u_long arg) {
+
+ int minor = MINOR(inode->i_rdev);
+ int ret = -EINVAL;
+ partition_t * pt_smcpart = (partition_t *)&SMCParts[(minor & ~(MAX_PARTITIONS -1)) >> PARTITION_BITS].type;
+ struct hd_geometry geo;
+ int size;
+/*
+ unsigned char smc_status;
+
+ smc_status = in_8((void *)&pt_ssfdc_smc->smc_status);
+ if(!(smc_status & SMC_PRESENT)) {
+ printk("ssfdc : media not present\n");
+ ret = 1;
+ goto ssfdc_ioctl_error;
+ }
+
+ if(smc_status & SMC_CHANGED) {
+ out_8((void *)&pt_ssfdc_smc->smc_status, smc_status);
+ if(minor & ((1<< PARTITION_BITS) - 1)) return -ENOTTY;
+ ssfdc_read_partitions(pt_smcpart);
+ printk("ssfdc : media change\n");
+ }
+*/
+ switch(cmd) {
+
+ case HDIO_GETGEO:
+ memset(&geo, 0, sizeof(geo));
+ size = (((pt_smcpart->mtd->size / 16384) * 1000) / 1024) * 32;
+ size /= (0x8 * 0x20);
+ geo.heads = 0x8;
+ geo.sectors = 0x20;
+ geo.cylinders = size;
+ geo.start = ssfdc_hd[minor].start_sect;
+// printk(KERN_WARNING "ssfdc : HDIO_GETGEO heads %d, sectors %d, cylinders %d, start %lu\n",
+// geo.heads, geo.sectors, geo.cylinders, geo.start);
+ copy_to_user((void *)arg, &geo, sizeof(geo));
+ ret = 0;
+ break;
+
+ case BLKGETSIZE64:
+ case BLKGETSIZE:
+ size = (((pt_smcpart->mtd->size / 16384) * 1000) / 1024) * 32;
+ //=printk(KERN_WARNING "ssfdc : BLKGETSIZE %d, minor %d\n", size, minor);
+ ret = copy_to_user((unsigned long *)arg, &size, sizeof(size));
+ break;
+ case BLKSSZGET:
+ size = 512;
+ ret = copy_to_user((unsigned long *)arg, &size, sizeof(size));
+ break;
+ break;
+
+ case BLKRRPART:
+ if(minor & ((1<< PARTITION_BITS) - 1)) return -ENOTTY;
+ ssfdc_read_partitions(pt_smcpart);
+ ret=0;
+ break;
+ case BLKFLSBUF:
+ printk(KERN_WARNING "ssfdc : block ioctl 0x%x\n", cmd);
+ break;
+
+ default:
+ printk(KERN_WARNING "ssfdc: unknown ioctl 0x%x\n", cmd);
+ }
+
+//ssfdc_ioctl_error:
+ return(ret);
+
+}
+static int ssfdc_open(struct inode *inode, struct file *file)
+{
+ int minor = MINOR(inode->i_rdev);
+ partition_t *pt_smcpart;
+ int index;
+
+ if (minor >= MAX_MTD_DEVICES)
+ return -ENODEV;
+
+ index = (minor & ~(MAX_PARTITIONS -1)) >> PARTITION_BITS;
+
+
+ if(SMCParts[index].type != SSFDC_FORMAT)
+ return -ENXIO;
+
+ pt_smcpart = &SMCParts[index];
+
+
+ if(!pt_smcpart->zone)
+ return -ENXIO;
+
+
+ BLK_INC_USE_COUNT;
+
+ if (!get_mtd_device(pt_smcpart->mtd, -1)) {
+ BLK_DEC_USE_COUNT;
+ return -ENXIO;
+ }
+
+ if ((file->f_mode & 2) && !(pt_smcpart->mtd->flags & MTD_CLEAR_BITS) ) {
+ put_mtd_device(pt_smcpart->mtd);
+ BLK_DEC_USE_COUNT;
+ return -EROFS;
+ }
+
+
+ atomic_inc(&pt_smcpart->minor[minor & ~(MAX_PARTITIONS -1)].open);
+
+ PDEBUG("ssfdc_open : device %d\n", minor);
+
+ return(0);
+}
+
+static void ssfdc_tables(partition_t * pt_smcpart) {
+
+ int * logical, * physical;
+ int offset = 0;
+ int zone, block;
+ int i, retlen;
+ int block_address, parity;
+ int h, l;
+
+ for(zone=0; zone<pt_smcpart->zoneCount; zone++) {
+ logical = pt_smcpart->zone + (2048 * zone);
+ memset((void *)logical, 0xFF, 1024 * sizeof(int));
+ physical = pt_smcpart->zone + (2048 * zone) + 1024;
+ memset((void *)physical, 0xFF, 1024 * sizeof(int));
+
+ for(block=0; block < 1024; block++) {
+ offset = (zone * ZONE_SIZE) + (block * SMC_BLOCK_SIZE);
+ pt_smcpart->mtd->read_oob(pt_smcpart->mtd, offset, sizeof(ssfdc_buffer), &retlen, ssfdc_buffer);
+ if(retlen != sizeof(ssfdc_buffer)) {
+ printk(KERN_WARNING "ssfdc_tables : failed to read OOB\n");
+ pt_smcpart->type = 0;
+ return;
+ }
+
+ l = (ssfdc_buffer[7] & 0xFF);
+ h = (ssfdc_buffer[6] & 0xFF);
+ block_address = l + (h << 8L);
+
+ if((block_address & ~0x7FF) != 0x1000) {
+ continue;
+ }
+
+ parity = block_address & 0x01;
+
+ block_address &= 0x7FF;
+ block_address >>= 1;
+
+
+ if(ssfdc_parity(block_address) != parity) {
+ printk(KERN_WARNING "ssfdc_tables : parity error offset 0x%x, block 0x%x, parity 0x%x\nOOB : "
+ , offset, block_address, parity);
+ for(i=0; i<16; i++) {
+ printk("0x%02x ", (unsigned char)ssfdc_buffer[i]);
+ }
+ printk("\n");
+ pt_smcpart->type = 0;
+ return;
+ }
+
+
+ /* Ok we have a valid block number so insert it */
+ *(logical + block_address) = (offset/SMC_BLOCK_SIZE);
+ PDEBUG("ssfdc_tables : logical 0x%x + 0x%x = 0x%x\n",
+ (unsigned int)logical, block_address, (offset/SMC_BLOCK_SIZE));
+ *(physical + block) = block_address;
+ PDEBUG("ssfdc_tables : physical 0x%x + 0x%x = 0x%x\n", (unsigned int)physical, block, block_address);
+
+
+ }
+ }
+ return;
+}
+int ssfdc_parity(int number) {
+ int i;
+ int parity = 1; // the 0x1000 bit
+
+ for(i=0; i<10; i++) {
+ parity += ((number >> i) & 1);
+ }
+ PDEBUG("ssfdc_parity : number 0x%x, parity 0x%x\n", number, parity);
+ return(parity % 2);
+}
+static int ssfdc_physical(partition_t * pt_smcpart, int zone, int block) {
+
+ unsigned int * logical;
+
+ logical = pt_smcpart->zone + (zone * 2048);
+
+ logical += block;
+
+ if(*logical == 0xFFFFFFFF) {
+ PDEBUG("ssfdc_physical : physical for zone %d, block %d invalid\n", zone, block);
+ return(-1);
+ }
+
+ PDEBUG("ssfdc_physical : physical for zone %d, block %d, 0x%x\n", zone, block, (*logical * SMC_BLOCK_SIZE));
+ return(*logical * SMC_BLOCK_SIZE);
+}
+
+static int ssfdc_close(struct inode *inode, struct file *file)
+{
+ int minor = MINOR(inode->i_rdev);
+ partition_t *pt_smcpart;
+ int index = (minor & ~(MAX_PARTITIONS -1)) >> PARTITION_BITS;
+
+ if (minor >= MAX_MTD_DEVICES)
+ return -ENODEV;
+
+ if(SMCParts[index].type != SSFDC_FORMAT)
+ return -ENXIO;
+
+ pt_smcpart = &SMCParts[index];
+ atomic_dec(&pt_smcpart->minor[minor & ~(MAX_PARTITIONS -1)].open);
+ put_mtd_device(pt_smcpart->mtd);
+ BLK_DEC_USE_COUNT;
+
+ return(0);
+}
+
+
+static void do_ssfdc_request(request_arg_t)
+{
+ int ret, minor;
+ partition_t *pt_smcpart;
+ int index;
+ do {
+
+ INIT_REQUEST;
+
+
+
+ minor = MINOR(CURRENT->rq_dev);
+ index = (minor & ~(MAX_PARTITIONS -1)) >> PARTITION_BITS;
+
+ pt_smcpart = &SMCParts[index];
+ if (pt_smcpart->type == SSFDC_FORMAT) {
+ ret = 0;
+ switch (CURRENT->cmd) {
+ case READ:
+ ret = ssfdc_read(pt_smcpart, CURRENT->buffer,
+ CURRENT->sector + ssfdc_hd[minor].start_sect,
+ CURRENT->current_nr_sectors);
+ break;
+
+ case WRITE:
+ ret = ssfdc_write(pt_smcpart, CURRENT->buffer,
+ CURRENT->sector + ssfdc_hd[minor].start_sect,
+ CURRENT->current_nr_sectors);
+ break;
+
+ default:
+ panic("do_ssfdc_request : unknown block command!\n");
+ }
+
+ } else {
+ ret = 1;
+ PDEBUG("not ssfdc partition type\n");
+ }
+
+ if (!ret) {
+ CURRENT->sector += CURRENT->current_nr_sectors;
+ }
+
+ end_request((ret == 0) ? 1 : 0);
+ } while (1);
+}
+
+static int ssfdc_write(partition_t *pt_smcpart, caddr_t buffer,
+ u_long sector, u_long nblocks)
+{
+ int zone, block, offset;
+ int sectors_written = 0;
+ int physical;
+ int * pt_logical;
+ int * pt_physical;
+ int new = -1;
+ int size;
+ int retlen;
+ int i;
+ int sc;
+ int ptr_done = 0;
+ unsigned char * ptr = (unsigned char *)buffer;
+ unsigned char ecc_code[6], ecc_calc[6];
+ int do_erase;
+// unsigned char smc_status;
+
+
+
+ offset = (sector % SECTORS_PER_ZONE) % SECTORS_PER_BLOCK ;
+
+ PDEBUG("write device %d, sector %d, count %d\n",
+ pt_smcpart->count, sector, nblocks);
+/*
+ smc_status = in_8((void *)&pt_ssfdc_smc->smc_status);
+ if(!(smc_status & SMC_PRESENT)) {
+ printk("ssfdc : media not present\n");
+ return -ENXIO;
+ }
+
+ if(smc_status & SMC_CHANGED) {
+ out_8((void *)&pt_ssfdc_smc->smc_status, smc_status);
+ ssfdc_read_partitions(pt_smcpart);
+ printk("ssfdc : media change\n");
+ }
+*/
+ while(sectors_written < nblocks) {
+
+ new = -1;
+ do_erase = FALSE;
+
+ zone = (sector + sectors_written) / SECTORS_PER_ZONE;
+ block = ((sector + sectors_written) % SECTORS_PER_ZONE) / SECTORS_PER_BLOCK ;
+ offset = ((sector + sectors_written) % SECTORS_PER_ZONE) % SECTORS_PER_BLOCK ;
+
+ pt_logical = pt_smcpart->zone + (zone * 2048);
+ pt_physical = pt_smcpart->zone + (zone * 2048) + 1024;
+
+ size = ((SECTORS_PER_BLOCK - offset) < (nblocks - sectors_written)) ?
+ (SECTORS_PER_BLOCK - offset) : (nblocks - sectors_written);
+ size *= SECTOR_SIZE;
+
+ PDEBUG("write device %d, sector %d, count %d, zone %d, block %d, offset %d, done %d, size %d, address 0x%x\n",
+ pt_smcpart->count, sector, nblocks, zone, block, offset, sectors_written, size, (unsigned int)ptr);
+
+ physical = ssfdc_physical(pt_smcpart, zone, block);
+
+
+ if(physical >= 0) {
+ if(ssfdc_cached != physical) {
+ pt_smcpart->mtd->read_ecc(pt_smcpart->mtd, physical, SMC_BLOCK_SIZE, &retlen, ssfdc_scratch,
+ ssfdc_oob_buf, &ssfdc_ffoob_info);
+ if(retlen != SMC_BLOCK_SIZE) {
+ printk(KERN_WARNING "ssfdc_write : failed to read physical\n");
+ return -ENXIO;
+ }
+
+ for(sc=0; sc<SECTORS_PER_BLOCK; sc++) {
+ pt_smcpart->mtd->read_oob(pt_smcpart->mtd, physical + (sc * SECTOR_SIZE), sizeof(ssfdc_buffer), &retlen, ssfdc_buffer);
+ if(retlen != sizeof(ssfdc_buffer)) {
+ printk(KERN_WARNING "ssfdc_write : failed to read physical oob\n");
+ return -ENXIO;
+ }
+
+ nand_calculate_ecc (pt_smcpart->mtd, &ssfdc_scratch[sc * SECTOR_SIZE], &ecc_calc[0]);
+ nand_calculate_ecc (pt_smcpart->mtd, &ssfdc_scratch[(sc * SECTOR_SIZE) + 256], &ecc_calc[3]);
+ for(i=0; i<6; i++) ecc_code[i] = ssfdc_buffer[ssfdc_ecc[i]];
+ nand_correct_data(pt_smcpart->mtd, &ssfdc_scratch[sc * SECTOR_SIZE], &ecc_code[0], &ecc_calc[0]);
+ nand_correct_data(pt_smcpart->mtd, &ssfdc_scratch[(sc * SECTOR_SIZE) + 256], &ecc_code[3], &ecc_calc[3]);
+ }
+
+ }
+
+ for(sc=0; sc<SECTORS_PER_BLOCK; sc++) {
+ if(offset > sc) {
+ PDEBUG("offset %d, sector %d\n", offset, sc);
+ continue;
+ }
+ pt_smcpart->mtd->read_oob(pt_smcpart->mtd, physical + (sc * SECTOR_SIZE), sizeof(ssfdc_buffer), &retlen, ssfdc_buffer);
+ if(retlen != sizeof(ssfdc_buffer)) {
+ printk(KERN_WARNING "ssfdc_write : failed to read physical oob\n");
+ return -ENXIO;
+ }
+
+ nand_calculate_ecc (pt_smcpart->mtd, &ssfdc_scratch[sc * SECTOR_SIZE], &ecc_calc[0]);
+ nand_calculate_ecc (pt_smcpart->mtd, &ssfdc_scratch[(sc * SECTOR_SIZE) + 256], &ecc_calc[3]);
+ for(i=0; i<6; i++) ecc_code[i] = ssfdc_buffer[ssfdc_ecc[i]];
+ nand_correct_data(pt_smcpart->mtd, &ssfdc_scratch[sc * SECTOR_SIZE], &ecc_code[0], &ecc_calc[0]);
+ nand_correct_data(pt_smcpart->mtd, &ssfdc_scratch[(sc * SECTOR_SIZE) + 256], &ecc_code[3], &ecc_calc[3]);
+
+ /* find out if the block is being used */
+
+
+ if(ssfdc_sector_blank(pt_smcpart, sc)) {
+ PDEBUG("ssfdc_write : zone %d, block %d, sector %d, lbn %d, blank, physical 0x%x\n",
+ zone, block, sc, sector, physical);
+ memcpy(&ssfdc_scratch[(sc * SECTOR_SIZE)], ptr+ptr_done, SECTOR_SIZE);
+ nand_calculate_ecc (pt_smcpart->mtd, (ptr + ptr_done), &ecc_calc[0]);
+ nand_calculate_ecc (pt_smcpart->mtd, (ptr + ptr_done + 256), &ecc_calc[3]);
+ for(i=0; i<6; i++) ssfdc_buffer[ssfdc_ecc[i]] = ecc_calc[i];
+ i = (block << 1) | 0x1000;
+ i |= ssfdc_parity(block);
+ ssfdc_buffer[7] = ssfdc_buffer[12] = i & 0xFF;
+ ssfdc_buffer[6] = ssfdc_buffer[11] = (i & 0xFF00) >> 0x08;
+
+ pt_smcpart->mtd->write_ecc(pt_smcpart->mtd, physical + (sc * SECTOR_SIZE), SECTOR_SIZE, &retlen,
+ ptr + ptr_done, ssfdc_buffer, &ssfdc_ffoob_info);
+ if(retlen != SECTOR_SIZE) {
+ printk(KERN_WARNING "ssfdc_write : failed to write physical 0x%x, sector 0x%x, blank, retlen %d\n"
+ , physical, sc, retlen);
+ return -ENXIO;
+ }
+
+ ptr_done += SECTOR_SIZE;
+ if(ptr_done >= size) break;
+ }
+ else {
+ new = ssfdc_allocate_new(pt_smcpart, zone);
+ /* erase the old block */
+ *(pt_physical + ((physical % ZONE_SIZE) / SMC_BLOCK_SIZE)) = 0xFFFFFFFF;
+
+ PDEBUG("ssfdc_write : physical 0x%x + 0x%x = 0x%x\n",
+ (unsigned int)pt_physical, ((physical % ZONE_SIZE) / SMC_BLOCK_SIZE), 0xFFFFFFFF);
+ do_erase = TRUE;
+ PDEBUG("ssfdc_write : zone %d, block %d, sector %d, lbn %d, written, physical 0x%x, new 0x%x\n",
+ zone, block, sc, sector, physical, new);
+ break;
+ }
+ }
+ }
+ else {
+ ssfdc_cached = 0xFFFFFFFF;
+ memset(ssfdc_scratch, 0xFF, sizeof(ssfdc_scratch));
+ new = ssfdc_allocate_new(pt_smcpart, zone);
+ PDEBUG("ssfdc_write : zone %d, block %d, lbn %d, physical 0x%x, unallocated, new 0x%x\n",
+ zone, block, sector, physical, new);
+ }
+
+
+
+ if(new != -1) {
+
+
+ memcpy(&ssfdc_scratch[(offset * SECTOR_SIZE)], ptr, size);
+ PDEBUG("ssfdc_write : new 0x%x, offset 0x%x, size 0x%x, block 0x%x\n", new, offset, size, block);
+ for(sc=0; sc<SECTORS_PER_BLOCK; sc++) {
+ memset(ssfdc_buffer, 0xFF, OOB_SIZE);
+ nand_calculate_ecc (pt_smcpart->mtd, &ssfdc_scratch[sc * SECTOR_SIZE], &ecc_calc[0]);
+ nand_calculate_ecc (pt_smcpart->mtd, &ssfdc_scratch[(sc * SECTOR_SIZE) + 256], &ecc_calc[3]);
+ for(i=0; i<6; i++) ssfdc_buffer[ssfdc_ecc[i]] = ecc_calc[i];
+ i = (block << 1) | 0x1000;
+ i |= ssfdc_parity(block);
+ ssfdc_buffer[7] = ssfdc_buffer[12] = i & 0xFF;
+ ssfdc_buffer[6] = ssfdc_buffer[11] = (i & 0xFF00) >> 0x08;
+ memcpy(&ssfdc_oob_buf[sc * OOB_SIZE], ssfdc_buffer, OOB_SIZE);
+ }
+
+
+ pt_smcpart->mtd->write_ecc(pt_smcpart->mtd, new, SMC_BLOCK_SIZE, &retlen, ssfdc_scratch,
+ ssfdc_oob_buf, &ssfdc_ffoob_info);
+ if(retlen != SMC_BLOCK_SIZE) {
+ printk(KERN_WARNING "ssfdc_write : failed to write block, physical 0x%x, returned 0x%x\n", new, retlen);
+ return -ENXIO;
+ }
+ /* change the mapping table to reflect the new block placement */
+
+ *(pt_logical + block) = (new % ZONE_SIZE) / SMC_BLOCK_SIZE;
+ PDEBUG("ssfdc_write : logical 0x%x + 0x%x = 0x%x\n",
+ (unsigned int)pt_logical, block, (new % ZONE_SIZE) / SMC_BLOCK_SIZE);
+
+ *(pt_physical + ((new % ZONE_SIZE) / SMC_BLOCK_SIZE)) = block;
+ PDEBUG("ssfdc_write : physical 0x%x + 0x%x = 0x%x\n",
+ (unsigned int)pt_physical, ((new % ZONE_SIZE) / SMC_BLOCK_SIZE), block);
+
+
+ ssfdc_cached = new;
+ }
+
+
+ ptr += size;
+ ptr_done = 0;
+ sectors_written += (size / SECTOR_SIZE);
+ if(do_erase) ssfdc_erase(pt_smcpart, physical);
+
+ }
+
+
+
+
+ return(0);
+}
+static int ssfdc_sector_blank(partition_t * pt_smcpart, int sc) {
+int b;
+
+ for(b=0; b<SECTOR_SIZE; b++) {
+ if(ssfdc_scratch[b + (sc * SECTOR_SIZE)] != 0xFF) return(0);
+ }
+ for(b=0; b<OOB_SIZE; b++) {
+ if((b==6) || (b==7) || (b==11) || (b==12)) continue; // Block address fields
+ if(ssfdc_buffer[b] != 0xFF) return(0);
+ }
+ return(1);
+}
+static int ssfdc_allocate_new(partition_t * pt_smcpart, int zone) {
+
+ int new = pt_smcpart->last_written[zone] + 1;
+ int * pt_physical;
+ int physical;
+ int block;
+ int retlen;
+ unsigned char oob[16];
+
+
+ if(new >= BLOCKS_PER_ZONE) new = 0;
+
+
+ while (new != pt_smcpart->last_written[zone]) {
+ block = new % BLOCKS_PER_ZONE;
+ pt_physical = pt_smcpart->zone + (zone * 2048) + 1024 + block;
+ physical = (zone * ZONE_SIZE) + (block * SMC_BLOCK_SIZE);
+
+ PDEBUG("ssfdc_allocate_new : zone %d, block %d, address 0x%08x, data 0x%08x\n",
+ zone, block, (unsigned int)pt_physical, *pt_physical);
+ if(*pt_physical == 0xFFFFFFFF) {
+ PDEBUG("ssfdc_allocate_new : physical 0x%x = 0x%x\n", (unsigned int)pt_physical, *pt_physical);
+ memset(oob, 0, OOB_SIZE);
+ pt_smcpart->mtd->read_oob(pt_smcpart->mtd, physical, OOB_SIZE, &retlen, oob);
+ if((oob[5] == 0xFF) && (retlen == OOB_SIZE)) { // If not a bad block
+ pt_smcpart->last_written[zone] = new;
+ return((new * SMC_BLOCK_SIZE) + (zone * ZONE_SIZE));
+ }
+ else {
+ PDEBUG("ssfdc_allocate_new : new 0x%x, physical 0x%x, block status 0x%x, oob length 0x%x\n", new, physical, oob[5], retlen);
+ }
+ }
+ new++;
+ if(new >= BLOCKS_PER_ZONE) new = 0;
+ }
+
+ panic("ssfdc_allocate_new : cant find free block\n");
+
+}
+
+
+
+static int ssfdc_read(partition_t *pt_smcpart, caddr_t buffer,
+ u_long sector, u_long nblocks)
+{
+ int zone, block, offset;
+ int sectors_read = 0;
+ int physical;
+ int size;
+ int retlen;
+ int i;
+ int sc;
+ unsigned char * ptr = (unsigned char *)buffer;
+ unsigned char ecc_code[6], ecc_calc[6];
+/*
+ unsigned char smc_status;
+
+ smc_status = in_8((void *)&pt_ssfdc_smc->smc_status);
+ if(!(smc_status & SMC_PRESENT)) {
+ printk("ssfdc : media not present\n");
+ return -ENXIO;
+ }
+
+
+
+ if(smc_status & SMC_CHANGED) {
+ out_8((void *)&pt_ssfdc_smc->smc_status, smc_status);
+ ssfdc_read_partitions(pt_smcpart);
+ printk("ssfdc : media change\n");
+ }
+*/
+ while(sectors_read < nblocks) {
+
+ zone = (sector + sectors_read) / SECTORS_PER_ZONE;
+ block = ((sector + sectors_read) % SECTORS_PER_ZONE) / SECTORS_PER_BLOCK ;
+ offset = ((sector + sectors_read) % SECTORS_PER_ZONE) % SECTORS_PER_BLOCK ;
+
+
+ if(offset) {
+ size = ((SECTORS_PER_BLOCK - offset) < (nblocks - sectors_read)) ?
+ (SECTORS_PER_BLOCK - offset) : (nblocks - sectors_read);
+ }
+ else {
+ size = (SECTORS_PER_BLOCK < (nblocks - sectors_read)) ? SECTORS_PER_BLOCK : nblocks - sectors_read;
+ }
+ size *= SECTOR_SIZE;
+
+ PDEBUG("ssfdc_read : device %d, sector %d, count %d, zone %d, block %d, offset %d, done %d, size %d, address 0x%x\n",
+ pt_smcpart->count, sector, nblocks, zone, block, offset, sectors_read, size, (unsigned int)ptr);
+
+
+ physical = ssfdc_physical(pt_smcpart, zone, block);
+ if(physical >= 0) {
+ if(ssfdc_cached != physical) {
+ pt_smcpart->mtd->read_ecc(pt_smcpart->mtd, physical, SMC_BLOCK_SIZE, &retlen, ssfdc_scratch,
+ ssfdc_oob_buf, &ssfdc_ffoob_info);
+ if(retlen != SMC_BLOCK_SIZE) {
+ printk(KERN_WARNING "ssfdc_read : failed to read physical\n");
+ return -ENXIO;
+ }
+ for(sc=0; sc<SECTORS_PER_BLOCK; sc++) {
+ pt_smcpart->mtd->read_oob(pt_smcpart->mtd, physical + (sc * SECTOR_SIZE), sizeof(ssfdc_buffer), &retlen, ssfdc_buffer);
+ if(retlen != sizeof(ssfdc_buffer)) {
+ printk(KERN_WARNING "ssfdc_read : failed to read physical oob\n");
+ return -ENXIO;
+ }
+ nand_calculate_ecc (pt_smcpart->mtd, &ssfdc_scratch[sc * SECTOR_SIZE], &ecc_calc[0]);
+ nand_calculate_ecc (pt_smcpart->mtd, &ssfdc_scratch[(sc * SECTOR_SIZE) + 256], &ecc_calc[3]);
+ for(i=0; i<3; i++) ecc_code[i] = ssfdc_buffer[ssfdc_ecc[i]];
+ for(i=3; i<6; i++) ecc_code[i] = ssfdc_buffer[ssfdc_ecc[i]];
+ nand_correct_data(pt_smcpart->mtd, &ssfdc_scratch[sc * SECTOR_SIZE], &ecc_code[0], &ecc_calc[0]);
+ nand_correct_data(pt_smcpart->mtd, &ssfdc_scratch[(sc * SECTOR_SIZE) + 256], &ecc_code[3], &ecc_calc[3]);
+ }
+
+ /* Get the ecc bytes and check that they are ok */
+
+
+ }
+ ssfdc_cached = physical;
+
+
+ }
+ else {
+ memset(ssfdc_scratch, 0xFF, sizeof(ssfdc_scratch));
+ ssfdc_cached = 0xFFFFFFFF;
+ }
+
+
+ memcpy(ptr, &ssfdc_scratch[(offset * SECTOR_SIZE)], size);
+ ptr += size;
+ sectors_read += (size / SECTOR_SIZE);
+ }
+
+
+
+ return(0);
+}
+
+static void ssfdc_erase_callback(struct erase_info *erase) {
+
+ PDEBUG("ssfdc_erase_callback : wake erase\n");
+ up(&ssfdc_semaphore);
+ PDEBUG("ssfdc_erase_callback : woken erase\n");
+}
+
+static int ssfdc_erase(partition_t *pt_smcpart, unsigned int offset)
+{
+ int ret = 0;
+ struct erase_info *erase;
+ unsigned char * junk;
+ unsigned char * oob;
+ int retlen;
+ int b, sc;
+
+
+ PDEBUG("ssfdc_erase : offset 0x%08x\n", offset);
+
+ erase=kmalloc(sizeof(struct erase_info), GFP_KERNEL);
+ junk=kmalloc(pt_smcpart->mtd->erasesize + 16, GFP_KERNEL);
+ oob = junk + pt_smcpart->mtd->erasesize;
+
+ if (!erase)
+ return -ENOMEM;
+ if (!junk)
+ return -ENOMEM;
+
+ erase->addr = offset;
+ erase->len = pt_smcpart->mtd->erasesize;
+ erase->callback = ssfdc_erase_callback;
+ ret = pt_smcpart->mtd->erase(pt_smcpart->mtd, erase);
+ if(ret) {
+ printk(KERN_WARNING "ssfdc_erase : failed status 0x%x\n", ret);
+ goto end;
+
+ }
+
+ down(&ssfdc_semaphore);
+
+ pt_smcpart->mtd->read_ecc(pt_smcpart->mtd, offset, SMC_BLOCK_SIZE, &retlen, junk,
+ ssfdc_oob_buf, &ssfdc_ffoob_info);
+ if(retlen != SMC_BLOCK_SIZE) {
+ printk(KERN_WARNING "ssfdc_erase : offset 0x%x, read returned length %d\n", offset, retlen);
+ goto end;
+ }
+
+
+ for(sc=0; sc < SECTORS_PER_BLOCK; sc++) {
+ for(b=0; b<SECTOR_SIZE; b++) {
+ if(*(junk + (b + (sc * SECTOR_SIZE))) != 0xFF) {
+ printk(KERN_WARNING "ssfdc_erase : offset 0x%x, sector 0x%x, byte 0x%x, data 0x%02x, expected 0xff\n"
+ , offset, sc, b, *(junk + (b + (sc * SECTOR_SIZE))));
+ goto end;
+ }
+ }
+ pt_smcpart->mtd->read_oob(pt_smcpart->mtd, offset + (sc * SECTOR_SIZE), OOB_SIZE, &retlen, oob);
+ if(retlen != OOB_SIZE) {
+ printk(KERN_WARNING "ssfdc_erase : offset 0x%x, read oob returned length %d\n", offset, retlen);
+ goto end;
+ }
+ for(b=0; b<OOB_SIZE; b++) {
+ if(*(oob+b) != 0xFF) {
+ printk(KERN_WARNING "ssfdc_erase : offset 0x%x, byte 0x%x, oob got 0x%02x, expected 0xff\n",
+ offset, b, *(oob+b));
+ goto end;
+ }
+ }
+ }
+
+end:
+
+ kfree(erase);
+ kfree(junk);
+
+ return ret;
+} /* erase_xfer */
+
+
+
+
+
+int init_ssfdc(void)
+{
+ int result, i;
+
+// unsigned char smc_status;
+// #define B01159_FIO_PBASE 0x0000000148000000 /* Physical Base address of SMC control chip */
+
+ printk(KERN_INFO "SSFDC block device translation layer V1.0\n");
+/*
+ pt_ssfdc_smc = ioremap64(B01159_FIO_PBASE, 1024);
+ if(!pt_ssfdc_smc){
+ printk("ssfdc : failed to map SMC control device\n");
+ return(-EFAULT);
+ }
+
+ smc_status = in_8((void *)&pt_ssfdc_smc->smc_status);
+*/
+ memset(ssfdc_ffoob_buf, 0xFF, sizeof(ssfdc_ffoob_buf));
+
+ for (i = 0; i < MAX_DEVICES*MAX_PARTITIONS; i++) {
+ ssfdc_hd[i].nr_sects = 0;
+ ssfdc_hd[i].start_sect = 0;
+ ssfdc_blocksizes[i] = 4096;
+ }
+ blksize_size[SSFDC_MAJOR] = ssfdc_blocksizes;
+ ssfdc_gendisk.major = SSFDC_MAJOR;
+
+
+ memset(ssfdc_scratch, 0xFF, sizeof(ssfdc_scratch));
+
+ result = register_blkdev(ssfdc_major, "ssfdc", &ssfdc_fops);
+ if(result != 0) {
+ printk(KERN_WARNING "ssfdc : failed to get a major number\n");
+ return(result);
+ }
+// if(ssfdc_major == 0) ssfdc_major = result;
+
+ blk_init_queue(BLK_DEFAULT_QUEUE(ssfdc_major), &do_ssfdc_request);
+
+ add_gendisk(&ssfdc_gendisk);
+
+
+
+ register_mtd_user(&ssfdc_notifier);
+
+
+ init_MUTEX_LOCKED(&ssfdc_semaphore);
+
+
+
+ return 0;
+}
+
+static void __exit cleanup_ssfdc(void)
+{
+ int i;
+
+ for(i=0; i<MAX_DEVICES; i++) {
+ if(SMCParts[i].zone)kfree(SMCParts[i].zone);
+ }
+
+
+ unregister_mtd_user(&ssfdc_notifier);
+ unregister_blkdev(ssfdc_major, "ssfdc");
+ blk_cleanup_queue(BLK_DEFAULT_QUEUE(ssfdc_major));
+
+
+
+ blksize_size[SSFDC_MAJOR] = NULL;
+ del_gendisk(&ssfdc_gendisk);
+
+}
+
+module_init(init_ssfdc);
+module_exit(cleanup_ssfdc);
+
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Simon Haynes <simon@baydel.com>");
+MODULE_DESCRIPTION("SSFDC translation layer support for MTD");
+
+
+
+
diff --git a/linux-2.4.x/fs/Config.in b/linux-2.4.x/fs/Config.in
index e9891ec..5d3e621 100644
--- a/linux-2.4.x/fs/Config.in
+++ b/linux-2.4.x/fs/Config.in
@@ -45,6 +45,18 @@ fi
dep_tristate 'Journalling Flash File System v2 (JFFS2) support' CONFIG_JFFS2_FS $CONFIG_MTD
if [ "$CONFIG_JFFS2_FS" = "y" -o "$CONFIG_JFFS2_FS" = "m" ] ; then
int 'JFFS2 debugging verbosity (0 = quiet, 2 = noisy)' CONFIG_JFFS2_FS_DEBUG 0
+ bool 'JFFS2 write-buffering support' CONFIG_JFFS2_FS_WRITEBUFFER
+ bool 'JFFS2 ZLIB compression support (recommended)' CONFIG_JFFS2_ZLIB
+ bool 'JFFS2 RTIME compression support (recommended)' CONFIG_JFFS2_RTIME
+ bool 'JFFS2 RUBIN compression support' CONFIG_JFFS2_RUBIN
+ bool 'JFFS2 LZO compression support' CONFIG_JFFS2_LZO
+ bool 'JFFS2 LZARI compression support' CONFIG_JFFS2_LZARI
+ choice 'JFFS2 default compression mode' \
+ "none CONFIG_JFFS2_CMODE_NONE \
+ priority CONFIG_JFFS2_CMODE_PRIORITY \
+ size CONFIG_JFFS2_CMODE_SIZE" priority
+
+ bool 'JFFS2 proc interface support' CONFIG_JFFS2_PROC
fi
tristate 'Compressed ROM file system support' CONFIG_CRAMFS
bool 'Virtual memory file system support (former shm fs)' CONFIG_TMPFS
diff --git a/linux-2.4.x/fs/jffs2/Makefile b/linux-2.4.x/fs/jffs2/Makefile
index a63c353..41f2c97 100644
--- a/linux-2.4.x/fs/jffs2/Makefile
+++ b/linux-2.4.x/fs/jffs2/Makefile
@@ -1,25 +1,42 @@
#
-# Makefile for the linux Journalling Flash FileSystem (JFFS) routines.
+# fs/jffs2/Makefile.24
#
-# $Id: Makefile,v 1.25.2.1 2002/10/11 09:04:44 dwmw2 Exp $
-#
-# Note! Dependencies are done automagically by 'make dep', which also
-# removes any old dependencies. DON'T put your own dependencies here
-# unless it's something special (ie not a .c file).
-#
-# Note 2! The CFLAGS definitions are now in the main makefile...
+# $Id: Makefile,v 1.43 2003/10/06 12:54:49 dwmw2 Exp $
+ifdef OUT_OF_TREE_BUILD
+include $(mtd)/defconfig
-COMPR_OBJS := compr.o compr_rubin.o compr_rtime.o pushpull.o \
- compr_zlib.o
-JFFS2_OBJS := crc32.o dir.o file.o ioctl.o nodelist.o malloc.o \
- read.o nodemgmt.o readinode.o super.o write.o scan.o gc.o \
- symlink.o build.o erase.o background.o
+# This must be first in the include path, so it goes in $(CC) rather
+# then $(EXTRA_CFLAGS)
-O_TARGET := jffs2.o
+CC += -I$(mtd)/../../include
+EXTRA_CFLAGS := -g -Werror
+
+ifndef CONFIG_MTD
+EXTRA_CFLAGS += -DMTD_OUT_OF_TREE
+endif
+
+ifdef NONAND
+EXTRA_CFLAGS += -DNONAND
+endif
+endif
+
+obj-$(CONFIG_JFFS2_FS) += jffs2.o
-obj-y := $(COMPR_OBJS) $(JFFS2_OBJS)
-obj-m := $(O_TARGET)
+JFFS2_OBJS := compr.o dir.o file.o ioctl.o nodelist.o malloc.o
+JFFS2_OBJS += read.o nodemgmt.o readinode.o write.o scan.o gc.o
+JFFS2_OBJS += symlink-v24.o build.o erase.o background.o fs.o writev.o
+
+LINUX_OBJS += super-v24.o crc32.o rbtree.o
+
+WBUF_OBJS-$(CONFIG_JFFS2_FS_WRITEBUFFER) += wbuf.o
+
+COMPR_OBJS-$(CONFIG_JFFS2_RUBIN) += compr_rubin.o
+COMPR_OBJS-$(CONFIG_JFFS2_RTIME) += compr_rtime.o
+COMPR_OBJS-$(CONFIG_JFFS2_ZLIB) += compr_zlib.o
+
+obj-y := $(COMPR_OBJS-y) $(JFFS2_OBJS) $(LINUX_OBJS) $(WBUF_OBJS-y)
+O_TARGET := jffs2.o
include $(TOPDIR)/Rules.make
diff --git a/linux-2.4.x/fs/jffs2/Makefile.common b/linux-2.4.x/fs/jffs2/Makefile.common
new file mode 100644
index 0000000..c19ea16
--- /dev/null
+++ b/linux-2.4.x/fs/jffs2/Makefile.common
@@ -0,0 +1,18 @@
+#
+# Makefile for the Linux Journalling Flash File System v2 (JFFS2)
+#
+# $Id: Makefile.common,v 1.12 2005/11/18 07:27:45 forrest Exp $
+#
+
+obj-$(CONFIG_JFFS2_FS) += jffs2.o
+
+jffs2-y := compr.o dir.o file.o ioctl.o nodelist.o malloc.o
+jffs2-y += read.o nodemgmt.o readinode.o write.o scan.o gc.o
+jffs2-y += symlink.o build.o erase.o background.o fs.o writev.o
+jffs2-y += super.o debug.o wear_leveling.o
+
+jffs2-$(CONFIG_JFFS2_FS_WRITEBUFFER) += wbuf.o
+jffs2-$(CONFIG_JFFS2_RUBIN) += compr_rubin.o
+jffs2-$(CONFIG_JFFS2_RTIME) += compr_rtime.o
+jffs2-$(CONFIG_JFFS2_ZLIB) += compr_zlib.o
+jffs2-$(CONFIG_JFFS2_SUMMARY) += summary.o
diff --git a/linux-2.4.x/fs/jffs2/background.c b/linux-2.4.x/fs/jffs2/background.c
index b0f01c2..c1bb8a1 100644
--- a/linux-2.4.x/fs/jffs2/background.c
+++ b/linux-2.4.x/fs/jffs2/background.c
@@ -1,61 +1,32 @@
/*
* JFFS2 -- Journalling Flash File System, Version 2.
*
- * Copyright (C) 2001 Red Hat, Inc.
+ * Copyright (C) 2001-2003 Red Hat, Inc.
*
- * Created by David Woodhouse <dwmw2@cambridge.redhat.com>
+ * Created by David Woodhouse <dwmw2@infradead.org>
*
- * The original JFFS, from which the design for JFFS2 was derived,
- * was designed and implemented by Axis Communications AB.
+ * For licensing information, see the file 'LICENCE' in this directory.
*
- * The contents of this file are subject to the Red Hat eCos Public
- * License Version 1.1 (the "Licence"); you may not use this file
- * except in compliance with the Licence. You may obtain a copy of
- * the Licence at http://www.redhat.com/
- *
- * Software distributed under the Licence is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied.
- * See the Licence for the specific language governing rights and
- * limitations under the Licence.
- *
- * The Original Code is JFFS2 - Journalling Flash File System, version 2
- *
- * Alternatively, the contents of this file may be used under the
- * terms of the GNU General Public License version 2 (the "GPL"), in
- * which case the provisions of the GPL are applicable instead of the
- * above. If you wish to allow the use of your version of this file
- * only under the terms of the GPL and not to allow others to use your
- * version of this file under the RHEPL, indicate your decision by
- * deleting the provisions above and replace them with the notice and
- * other provisions required by the GPL. If you do not delete the
- * provisions above, a recipient may use your version of this file
- * under either the RHEPL or the GPL.
- *
- * $Id: background.c,v 1.16 2001/10/08 09:22:38 dwmw2 Exp $
+ * $Id: background.c,v 1.59 2005/11/07 11:14:38 gleixner Exp $
*
*/
-#define __KERNEL_SYSCALLS__
-
#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/unistd.h>
#include <linux/jffs2.h>
#include <linux/mtd/mtd.h>
-#include <linux/interrupt.h>
#include <linux/completion.h>
+#include <linux/sched.h>
#include "nodelist.h"
static int jffs2_garbage_collect_thread(void *);
-static int thread_should_wake(struct jffs2_sb_info *c);
void jffs2_garbage_collect_trigger(struct jffs2_sb_info *c)
{
- spin_lock_bh(&c->erase_completion_lock);
- if (c->gc_task && thread_should_wake(c))
+ spin_lock(&c->erase_completion_lock);
+ if (c->gc_task && jffs2_thread_should_wake(c))
send_sig(SIGHUP, c->gc_task, 1);
- spin_unlock_bh(&c->erase_completion_lock);
+ spin_unlock(&c->erase_completion_lock);
}
/* This must only ever be called when no GC thread is currently running */
@@ -67,7 +38,7 @@ int jffs2_start_garbage_collect_thread(struct jffs2_sb_info *c)
if (c->gc_task)
BUG();
- init_MUTEX_LOCKED(&c->gc_thread_start);
+ init_completion(&c->gc_thread_start);
init_completion(&c->gc_thread_exit);
pid = kernel_thread(jffs2_garbage_collect_thread, c, CLONE_FS|CLONE_FILES);
@@ -78,106 +49,96 @@ int jffs2_start_garbage_collect_thread(struct jffs2_sb_info *c)
} else {
/* Wait for it... */
D1(printk(KERN_DEBUG "JFFS2: Garbage collect thread is pid %d\n", pid));
- down(&c->gc_thread_start);
+ wait_for_completion(&c->gc_thread_start);
}
-
+
return ret;
}
void jffs2_stop_garbage_collect_thread(struct jffs2_sb_info *c)
{
- spin_lock_bh(&c->erase_completion_lock);
+ int wait = 0;
+ spin_lock(&c->erase_completion_lock);
if (c->gc_task) {
D1(printk(KERN_DEBUG "jffs2: Killing GC task %d\n", c->gc_task->pid));
send_sig(SIGKILL, c->gc_task, 1);
+ wait = 1;
}
- spin_unlock_bh(&c->erase_completion_lock);
- wait_for_completion(&c->gc_thread_exit);
+ spin_unlock(&c->erase_completion_lock);
+ if (wait)
+ wait_for_completion(&c->gc_thread_exit);
}
static int jffs2_garbage_collect_thread(void *_c)
{
struct jffs2_sb_info *c = _c;
- daemonize();
- current->tty = NULL;
- c->gc_task = current;
- up(&c->gc_thread_start);
+ daemonize("jffs2_gcd_mtd%d", c->mtd->index);
+ allow_signal(SIGKILL);
+ allow_signal(SIGSTOP);
+ allow_signal(SIGCONT);
- sprintf(current->comm, "jffs2_gcd_mtd%d", c->mtd->index);
+ c->gc_task = current;
+ complete(&c->gc_thread_start);
- /* FIXME in the 2.2 backport */
- current->nice = 10;
+ set_user_nice(current, 10);
for (;;) {
- spin_lock_irq(&current->sigmask_lock);
- siginitsetinv (&current->blocked, sigmask(SIGHUP) | sigmask(SIGKILL) | sigmask(SIGSTOP) | sigmask(SIGCONT));
- recalc_sigpending(current);
- spin_unlock_irq(&current->sigmask_lock);
+ allow_signal(SIGHUP);
- if (!thread_should_wake(c)) {
- set_current_state (TASK_INTERRUPTIBLE);
+ if (!jffs2_thread_should_wake(c)) {
+ set_current_state (TASK_INTERRUPTIBLE);
D1(printk(KERN_DEBUG "jffs2_garbage_collect_thread sleeping...\n"));
- /* Yes, there's a race here; we checked thread_should_wake() before
- setting current->state to TASK_INTERRUPTIBLE. But it doesn't
+ /* Yes, there's a race here; we checked jffs2_thread_should_wake()
+ before setting current->state to TASK_INTERRUPTIBLE. But it doesn't
matter - We don't care if we miss a wakeup, because the GC thread
is only an optimisation anyway. */
schedule();
}
-
- if (current->need_resched)
- schedule();
- /* Put_super will send a SIGKILL and then wait on the sem.
- */
- while (signal_pending(current)) {
- siginfo_t info;
- unsigned long signr;
-
- spin_lock_irq(&current->sigmask_lock);
- signr = dequeue_signal(&current->blocked, &info);
- spin_unlock_irq(&current->sigmask_lock);
-
- switch(signr) {
- case SIGSTOP:
- D1(printk(KERN_DEBUG "jffs2_garbage_collect_thread(): SIGSTOP received.\n"));
- set_current_state(TASK_STOPPED);
- schedule();
- break;
-
- case SIGKILL:
- D1(printk(KERN_DEBUG "jffs2_garbage_collect_thread(): SIGKILL received.\n"));
- spin_lock_bh(&c->erase_completion_lock);
- c->gc_task = NULL;
- spin_unlock_bh(&c->erase_completion_lock);
- complete_and_exit(&c->gc_thread_exit, 0);
+ if (try_to_freeze())
+ continue;
+
+ cond_resched();
+
+ /* Put_super will send a SIGKILL and then wait on the sem.
+ */
+ while (signal_pending(current)) {
+ siginfo_t info;
+ unsigned long signr;
+
+ signr = dequeue_signal_lock(current, &current->blocked, &info);
+
+ switch(signr) {
+ case SIGSTOP:
+ D1(printk(KERN_DEBUG "jffs2_garbage_collect_thread(): SIGSTOP received.\n"));
+ set_current_state(TASK_STOPPED);
+ schedule();
+ break;
+
+ case SIGKILL:
+ D1(printk(KERN_DEBUG "jffs2_garbage_collect_thread(): SIGKILL received.\n"));
+ goto die;
case SIGHUP:
D1(printk(KERN_DEBUG "jffs2_garbage_collect_thread(): SIGHUP received.\n"));
break;
default:
D1(printk(KERN_DEBUG "jffs2_garbage_collect_thread(): signal %ld received\n", signr));
-
- }
- }
+ }
+ }
/* We don't want SIGHUP to interrupt us. STOP and KILL are OK though. */
- spin_lock_irq(&current->sigmask_lock);
- siginitsetinv (&current->blocked, sigmask(SIGKILL) | sigmask(SIGSTOP) | sigmask(SIGCONT));
- recalc_sigpending(current);
- spin_unlock_irq(&current->sigmask_lock);
+ disallow_signal(SIGHUP);
D1(printk(KERN_DEBUG "jffs2_garbage_collect_thread(): pass\n"));
- jffs2_garbage_collect_pass(c);
+ if (jffs2_garbage_collect_pass(c) == -ENOSPC) {
+ printk(KERN_NOTICE "No space for garbage collection. Aborting GC thread\n");
+ goto die;
+ }
}
-}
-
-static int thread_should_wake(struct jffs2_sb_info *c)
-{
- D1(printk(KERN_DEBUG "thread_should_wake(): nr_free_blocks %d, nr_erasing_blocks %d, dirty_size 0x%x\n",
- c->nr_free_blocks, c->nr_erasing_blocks, c->dirty_size));
- if (c->nr_free_blocks + c->nr_erasing_blocks < JFFS2_RESERVED_BLOCKS_GCTRIGGER &&
- c->dirty_size > c->sector_size)
- return 1;
- else
- return 0;
+ die:
+ spin_lock(&c->erase_completion_lock);
+ c->gc_task = NULL;
+ spin_unlock(&c->erase_completion_lock);
+ complete_and_exit(&c->gc_thread_exit, 0);
}
diff --git a/linux-2.4.x/fs/jffs2/build.c b/linux-2.4.x/fs/jffs2/build.c
index 57a3c09..e77ddfd 100644
--- a/linux-2.4.x/fs/jffs2/build.c
+++ b/linux-2.4.x/fs/jffs2/build.c
@@ -1,265 +1,366 @@
/*
* JFFS2 -- Journalling Flash File System, Version 2.
*
- * Copyright (C) 2001 Red Hat, Inc.
+ * Copyright (C) 2001-2003 Red Hat, Inc.
*
- * Created by David Woodhouse <dwmw2@cambridge.redhat.com>
+ * Created by David Woodhouse <dwmw2@infradead.org>
*
- * The original JFFS, from which the design for JFFS2 was derived,
- * was designed and implemented by Axis Communications AB.
+ * For licensing information, see the file 'LICENCE' in this directory.
*
- * The contents of this file are subject to the Red Hat eCos Public
- * License Version 1.1 (the "Licence"); you may not use this file
- * except in compliance with the Licence. You may obtain a copy of
- * the Licence at http://www.redhat.com/
- *
- * Software distributed under the Licence is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied.
- * See the Licence for the specific language governing rights and
- * limitations under the Licence.
- *
- * The Original Code is JFFS2 - Journalling Flash File System, version 2
- *
- * Alternatively, the contents of this file may be used under the
- * terms of the GNU General Public License version 2 (the "GPL"), in
- * which case the provisions of the GPL are applicable instead of the
- * above. If you wish to allow the use of your version of this file
- * only under the terms of the GPL and not to allow others to use your
- * version of this file under the RHEPL, indicate your decision by
- * deleting the provisions above and replace them with the notice and
- * other provisions required by the GPL. If you do not delete the
- * provisions above, a recipient may use your version of this file
- * under either the RHEPL or the GPL.
- *
- * $Id: build.c,v 1.16.2.2 2002/03/12 15:36:43 dwmw2 Exp $
+ * $Id: build.c,v 1.87 2005/11/18 07:27:45 forrest Exp $
*
*/
#include <linux/kernel.h>
-#include <linux/jffs2.h>
+#include <linux/sched.h>
#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/mtd/mtd.h>
#include "nodelist.h"
-int jffs2_build_inode_pass1(struct jffs2_sb_info *, struct jffs2_inode_cache *);
-int jffs2_build_remove_unlinked_inode(struct jffs2_sb_info *, struct jffs2_inode_cache *);
+static void jffs2_build_remove_unlinked_inode(struct jffs2_sb_info *,
+ struct jffs2_inode_cache *, struct jffs2_full_dirent **);
+
+static inline struct jffs2_inode_cache *
+first_inode_chain(int *i, struct jffs2_sb_info *c)
+{
+ for (; *i < INOCACHE_HASHSIZE; (*i)++) {
+ if (c->inocache_list[*i])
+ return c->inocache_list[*i];
+ }
+ return NULL;
+}
+
+static inline struct jffs2_inode_cache *
+next_inode(int *i, struct jffs2_inode_cache *ic, struct jffs2_sb_info *c)
+{
+ /* More in this chain? */
+ if (ic->next)
+ return ic->next;
+ (*i)++;
+ return first_inode_chain(i, c);
+}
+
+#define for_each_inode(i, c, ic) \
+ for (i = 0, ic = first_inode_chain(&i, (c)); \
+ ic; \
+ ic = next_inode(&i, ic, (c)))
+
+
+static inline void jffs2_build_inode_pass1(struct jffs2_sb_info *c,
+ struct jffs2_inode_cache *ic)
+{
+ struct jffs2_full_dirent *fd;
+
+ dbg_fsbuild("building directory inode #%u\n", ic->ino);
+
+ /* For each child, increase nlink */
+ for(fd = ic->scan_dents; fd; fd = fd->next) {
+ struct jffs2_inode_cache *child_ic;
+ if (!fd->ino)
+ continue;
+
+ /* we can get high latency here with huge directories */
+ child_ic = jffs2_get_ino_cache(c, fd->ino);
+ if (!child_ic) {
+ dbg_fsbuild("child \"%s\" (ino #%u) of dir ino #%u doesn't exist!\n",
+ fd->name, fd->ino, ic->ino);
+ jffs2_mark_node_obsolete(c, fd->raw);
+ continue;
+ }
-#define for_each_inode(i, c, ic) for (i=0; i<INOCACHE_HASHSIZE; i++) for (ic=c->inocache_list[i]; ic; ic=ic->next)
+ if (child_ic->nlink++ && fd->type == DT_DIR) {
+ JFFS2_ERROR("child dir \"%s\" (ino #%u) of dir ino #%u appears to be a hard link\n",
+ fd->name, fd->ino, ic->ino);
+ /* TODO: What do we do about it? */
+ }
+ dbg_fsbuild("increased nlink for child \"%s\" (ino #%u)\n", fd->name, fd->ino);
+ /* Can't free scan_dents so far. We might need them in pass 2 */
+ }
+}
/* Scan plan:
- Scan physical nodes. Build map of inodes/dirents. Allocate inocaches as we go
- Scan directory tree from top down, setting nlink in inocaches
- Scan inocaches for inodes with nlink==0
*/
-int jffs2_build_filesystem(struct jffs2_sb_info *c)
+static int jffs2_build_filesystem(struct jffs2_sb_info *c)
{
int ret;
int i;
struct jffs2_inode_cache *ic;
+ struct jffs2_full_dirent *fd;
+ struct jffs2_full_dirent *dead_fds = NULL;
+
+ dbg_fsbuild("build FS data structures\n");
/* First, scan the medium and build all the inode caches with
lists of physical nodes */
- c->flags |= JFFS2_SB_FLAG_MOUNTING;
+ c->flags |= JFFS2_SB_FLAG_SCANNING;
ret = jffs2_scan_medium(c);
- c->flags &= ~JFFS2_SB_FLAG_MOUNTING;
-
+ c->flags &= ~JFFS2_SB_FLAG_SCANNING;
if (ret)
- return ret;
+ goto exit;
+
+ dbg_fsbuild("scanned flash completely\n");
+ jffs2_dbg_dump_block_lists_nolock(c);
- D1(printk(KERN_DEBUG "Scanned flash completely\n"));
- /* Now build the data map for each inode, marking obsoleted nodes
- as such, and also increase nlink of any children. */
+ dbg_fsbuild("pass 1 starting\n");
+ c->flags |= JFFS2_SB_FLAG_BUILDING;
+ /* Now scan the directory tree, increasing nlink according to every dirent found. */
for_each_inode(i, c, ic) {
- D1(printk(KERN_DEBUG "Pass 1: ino #%u\n", ic->ino));
- ret = jffs2_build_inode_pass1(c, ic);
- if (ret) {
- D1(printk(KERN_WARNING "Eep. jffs2_build_inode_pass1 for ino %d returned %d\n", ic->ino, ret));
- return ret;
+ if (ic->scan_dents) {
+ jffs2_build_inode_pass1(c, ic);
+ cond_resched();
}
}
- D1(printk(KERN_DEBUG "Pass 1 complete\n"));
+
+ dbg_fsbuild("pass 1 complete\n");
/* Next, scan for inodes with nlink == 0 and remove them. If
they were directories, then decrement the nlink of their
children too, and repeat the scan. As that's going to be
a fairly uncommon occurrence, it's not so evil to do it this
way. Recursion bad. */
- do {
- D1(printk(KERN_DEBUG "Pass 2 (re)starting\n"));
- ret = 0;
- for_each_inode(i, c, ic) {
- D1(printk(KERN_DEBUG "Pass 2: ino #%u, nlink %d, ic %p, nodes %p\n", ic->ino, ic->nlink, ic, ic->nodes));
- if (ic->nlink)
- continue;
-
- ret = jffs2_build_remove_unlinked_inode(c, ic);
- if (ret)
- break;
- /* -EAGAIN means the inode's nlink was zero, so we deleted it,
- and furthermore that it had children and their nlink has now
- gone to zero too. So we have to restart the scan. */
- }
- } while(ret == -EAGAIN);
-
- D1(printk(KERN_DEBUG "Pass 2 complete\n"));
-
- /* Finally, we can scan again and free the dirent nodes and scan_info structs */
+ dbg_fsbuild("pass 2 starting\n");
+
for_each_inode(i, c, ic) {
- struct jffs2_scan_info *scan = ic->scan;
- struct jffs2_full_dirent *fd;
- D1(printk(KERN_DEBUG "Pass 3: ino #%u, ic %p, nodes %p\n", ic->ino, ic, ic->nodes));
- if (!scan) {
- if (ic->nlink) {
- D1(printk(KERN_WARNING "Why no scan struct for ino #%u which has nlink %d?\n", ic->ino, ic->nlink));
- }
+ if (ic->nlink)
continue;
- }
- ic->scan = NULL;
- while(scan->dents) {
- fd = scan->dents;
- scan->dents = fd->next;
- jffs2_free_full_dirent(fd);
- }
- kfree(scan);
+
+ jffs2_build_remove_unlinked_inode(c, ic, &dead_fds);
+ cond_resched();
}
- D1(printk(KERN_DEBUG "Pass 3 complete\n"));
- return ret;
-}
-
-int jffs2_build_inode_pass1(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic)
-{
- struct jffs2_tmp_dnode_info *tn;
- struct jffs2_full_dirent *fd;
- struct jffs2_node_frag *fraglist = NULL;
- struct jffs2_tmp_dnode_info *metadata = NULL;
+ dbg_fsbuild("pass 2a starting\n");
- D1(printk(KERN_DEBUG "jffs2_build_inode building inode #%u\n", ic->ino));
- if (ic->ino > c->highest_ino)
- c->highest_ino = ic->ino;
+ while (dead_fds) {
+ fd = dead_fds;
+ dead_fds = fd->next;
- if (!ic->scan->tmpnodes && ic->ino != 1) {
- D1(printk(KERN_DEBUG "jffs2_build_inode: ino #%u has no data nodes!\n", ic->ino));
- }
- /* Build the list to make sure any obsolete nodes are marked as such */
- while(ic->scan->tmpnodes) {
- tn = ic->scan->tmpnodes;
- ic->scan->tmpnodes = tn->next;
-
- if (metadata && tn->version > metadata->version) {
- D1(printk(KERN_DEBUG "jffs2_build_inode_pass1 ignoring old metadata at 0x%08x\n",
- metadata->fn->raw->flash_offset &~3));
-
- jffs2_free_full_dnode(metadata->fn);
- jffs2_free_tmp_dnode_info(metadata);
- metadata = NULL;
- }
-
- if (tn->fn->size) {
- jffs2_add_full_dnode_to_fraglist (c, &fraglist, tn->fn);
- jffs2_free_tmp_dnode_info(tn);
- } else {
- if (!metadata) {
- metadata = tn;
- } else {
- D1(printk(KERN_DEBUG "jffs2_build_inode_pass1 ignoring new metadata at 0x%08x\n",
- tn->fn->raw->flash_offset &~3));
-
- jffs2_free_full_dnode(tn->fn);
- jffs2_free_tmp_dnode_info(tn);
- }
- }
- }
-
- /* OK. Now clear up */
- if (metadata) {
- jffs2_free_full_dnode(metadata->fn);
- jffs2_free_tmp_dnode_info(metadata);
+ ic = jffs2_get_ino_cache(c, fd->ino);
+
+ if (ic)
+ jffs2_build_remove_unlinked_inode(c, ic, &dead_fds);
+ jffs2_free_full_dirent(fd);
}
- metadata = NULL;
-
- while (fraglist) {
- struct jffs2_node_frag *frag;
- frag = fraglist;
- fraglist = fraglist->next;
-
- if (frag->node && !(--frag->node->frags)) {
- jffs2_free_full_dnode(frag->node);
+
+ dbg_fsbuild("pass 2a complete\n");
+ dbg_fsbuild("freeing temporary data structures\n");
+
+ /* Finally, we can scan again and free the dirent structs */
+ for_each_inode(i, c, ic) {
+ while(ic->scan_dents) {
+ fd = ic->scan_dents;
+ ic->scan_dents = fd->next;
+ jffs2_free_full_dirent(fd);
}
- jffs2_free_node_frag(frag);
+ ic->scan_dents = NULL;
+ cond_resched();
}
+ c->flags &= ~JFFS2_SB_FLAG_BUILDING;
- /* Now for each child, increase nlink */
- for(fd=ic->scan->dents; fd; fd = fd->next) {
- struct jffs2_inode_cache *child_ic;
- if (!fd->ino)
- continue;
+ dbg_fsbuild("FS build complete\n");
- child_ic = jffs2_get_ino_cache(c, fd->ino);
- if (!child_ic) {
- printk(KERN_NOTICE "Eep. Child \"%s\" (ino #%u) of dir ino #%u doesn't exist!\n",
- fd->name, fd->ino, ic->ino);
- continue;
- }
+ /* Rotate the lists by some number to ensure wear levelling */
+ jffs2_rotate_lists(c);
- if (child_ic->nlink++ && fd->type == DT_DIR) {
- printk(KERN_NOTICE "Child dir \"%s\" (ino #%u) of dir ino #%u appears to be a hard link\n", fd->name, fd->ino, ic->ino);
- if (fd->ino == 1 && ic->ino == 1) {
- printk(KERN_NOTICE "This is mostly harmless, and probably caused by creating a JFFS2 image\n");
- printk(KERN_NOTICE "using a buggy version of mkfs.jffs2. Use at least v1.17.\n");
+ ret = 0;
+
+exit:
+ if (ret) {
+ for_each_inode(i, c, ic) {
+ while(ic->scan_dents) {
+ fd = ic->scan_dents;
+ ic->scan_dents = fd->next;
+ jffs2_free_full_dirent(fd);
}
- /* What do we do about it? */
}
- D1(printk(KERN_DEBUG "Increased nlink for child \"%s\" (ino #%u)\n", fd->name, fd->ino));
- /* Can't free them. We might need them in pass 2 */
}
- return 0;
+
+ return ret;
}
-int jffs2_build_remove_unlinked_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic)
+static void jffs2_build_remove_unlinked_inode(struct jffs2_sb_info *c,
+ struct jffs2_inode_cache *ic,
+ struct jffs2_full_dirent **dead_fds)
{
struct jffs2_raw_node_ref *raw;
struct jffs2_full_dirent *fd;
- int ret = 0;
- if(!ic->scan) {
- D1(printk(KERN_DEBUG "ino #%u was already removed\n", ic->ino));
- return 0;
- }
+ dbg_fsbuild("removing ino #%u with nlink == zero.\n", ic->ino);
- D1(printk(KERN_DEBUG "JFFS2: Removing ino #%u with nlink == zero.\n", ic->ino));
-
- for (raw = ic->nodes; raw != (void *)ic; raw = raw->next_in_ino) {
- D1(printk(KERN_DEBUG "obsoleting node at 0x%08x\n", raw->flash_offset&~3));
+ raw = ic->nodes;
+ while (raw != (void *)ic) {
+ struct jffs2_raw_node_ref *next = raw->next_in_ino;
+ dbg_fsbuild("obsoleting node at 0x%08x\n", ref_offset(raw));
jffs2_mark_node_obsolete(c, raw);
+ raw = next;
}
- if (ic->scan->dents) {
- printk(KERN_NOTICE "Inode #%u was a directory with children - removing those too...\n", ic->ino);
-
- while(ic->scan->dents) {
+ if (ic->scan_dents) {
+ int whinged = 0;
+ dbg_fsbuild("inode #%u was a directory which may have children...\n", ic->ino);
+
+ while(ic->scan_dents) {
struct jffs2_inode_cache *child_ic;
- fd = ic->scan->dents;
- ic->scan->dents = fd->next;
+ fd = ic->scan_dents;
+ ic->scan_dents = fd->next;
+
+ if (!fd->ino) {
+ /* It's a deletion dirent. Ignore it */
+ dbg_fsbuild("child \"%s\" is a deletion dirent, skipping...\n", fd->name);
+ jffs2_free_full_dirent(fd);
+ continue;
+ }
+ if (!whinged)
+ whinged = 1;
+
+ dbg_fsbuild("removing child \"%s\", ino #%u\n", fd->name, fd->ino);
- D1(printk(KERN_DEBUG "Removing child \"%s\", ino #%u\n",
- fd->name, fd->ino));
-
child_ic = jffs2_get_ino_cache(c, fd->ino);
if (!child_ic) {
- printk(KERN_NOTICE "Cannot remove child \"%s\", ino #%u, because it doesn't exist\n", fd->name, fd->ino);
+ dbg_fsbuild("cannot remove child \"%s\", ino #%u, because it doesn't exist\n",
+ fd->name, fd->ino);
+ jffs2_free_full_dirent(fd);
continue;
}
- jffs2_free_full_dirent(fd);
+
+ /* Reduce nlink of the child. If it's now zero, stick it on the
+ dead_fds list to be cleaned up later. Else just free the fd */
+
child_ic->nlink--;
+
+ if (!child_ic->nlink) {
+ dbg_fsbuild("inode #%u (\"%s\") has now got zero nlink, adding to dead_fds list.\n",
+ fd->ino, fd->name);
+ fd->next = *dead_fds;
+ *dead_fds = fd;
+ } else {
+ dbg_fsbuild("inode #%u (\"%s\") has now got nlink %d. Ignoring.\n",
+ fd->ino, fd->name, child_ic->nlink);
+ jffs2_free_full_dirent(fd);
+ }
}
- ret = -EAGAIN;
}
- kfree(ic->scan);
- ic->scan = NULL;
- // jffs2_del_ino_cache(c, ic);
- // jffs2_free_inode_cache(ic);
- return ret;
+
+ /*
+ We don't delete the inocache from the hash list and free it yet.
+ The erase code will do that, when all the nodes are completely gone.
+ */
+}
+
+static void jffs2_calc_trigger_levels(struct jffs2_sb_info *c)
+{
+ uint32_t size;
+
+ /* Deletion should almost _always_ be allowed. We're fairly
+ buggered once we stop allowing people to delete stuff
+ because there's not enough free space... */
+ c->resv_blocks_deletion = 2;
+
+ /* Be conservative about how much space we need before we allow writes.
+ On top of that which is required for deletia, require an extra 2%
+ of the medium to be available, for overhead caused by nodes being
+ split across blocks, etc. */
+
+ size = c->flash_size / 50; /* 2% of flash size */
+ size += c->nr_blocks * 100; /* And 100 bytes per eraseblock */
+ size += c->sector_size - 1; /* ... and round up */
+
+ c->resv_blocks_write = c->resv_blocks_deletion + (size / c->sector_size);
+
+ /* When do we let the GC thread run in the background */
+
+ c->resv_blocks_gctrigger = c->resv_blocks_write + 1;
+
+ /* When do we allow garbage collection to merge nodes to make
+ long-term progress at the expense of short-term space exhaustion? */
+ c->resv_blocks_gcmerge = c->resv_blocks_deletion + 1;
+
+ /* When do we allow garbage collection to eat from bad blocks rather
+ than actually making progress? */
+ c->resv_blocks_gcbad = 0;//c->resv_blocks_deletion + 2;
+
+ /* If there's less than this amount of dirty space, don't bother
+ trying to GC to make more space. It'll be a fruitless task */
+ c->nospc_dirty_size = c->sector_size + (c->flash_size / 100);
+
+ dbg_fsbuild("JFFS2 trigger levels (size %d KiB, block size %d KiB, %d blocks)\n",
+ c->flash_size / 1024, c->sector_size / 1024, c->nr_blocks);
+ dbg_fsbuild("Blocks required to allow deletion: %d (%d KiB)\n",
+ c->resv_blocks_deletion, c->resv_blocks_deletion*c->sector_size/1024);
+ dbg_fsbuild("Blocks required to allow writes: %d (%d KiB)\n",
+ c->resv_blocks_write, c->resv_blocks_write*c->sector_size/1024);
+ dbg_fsbuild("Blocks required to quiesce GC thread: %d (%d KiB)\n",
+ c->resv_blocks_gctrigger, c->resv_blocks_gctrigger*c->sector_size/1024);
+ dbg_fsbuild("Blocks required to allow GC merges: %d (%d KiB)\n",
+ c->resv_blocks_gcmerge, c->resv_blocks_gcmerge*c->sector_size/1024);
+ dbg_fsbuild("Blocks required to GC bad blocks: %d (%d KiB)\n",
+ c->resv_blocks_gcbad, c->resv_blocks_gcbad*c->sector_size/1024);
+ dbg_fsbuild("Amount of dirty space required to GC: %d bytes\n",
+ c->nospc_dirty_size);
+}
+
+static void jffs2_init_hash_tables(struct jffs2_sb_info *c)
+{
+ int i;
+
+ for (i=0; i<HASH_SIZE; i++) {
+ INIT_LIST_HEAD(&(c->used_blocks[i].chain));
+ INIT_LIST_HEAD(&(c->free_blocks[i].chain));
+ }
+ c->used_blocks_current_index = HASH_SIZE;
+ c->free_blocks_current_index = HASH_SIZE;
+ return;
+}
+
+int jffs2_do_mount_fs(struct jffs2_sb_info *c)
+{
+ int ret;
+
+ c->free_size = c->flash_size;
+ c->nr_blocks = c->flash_size / c->sector_size;
+
+ ret = jffs2_alloc_eraseblocks(c);
+ if (ret)
+ return ret;
+
+ INIT_LIST_HEAD(&c->clean_list);
+ INIT_LIST_HEAD(&c->very_dirty_list);
+ INIT_LIST_HEAD(&c->dirty_list);
+ INIT_LIST_HEAD(&c->erasable_list);
+ INIT_LIST_HEAD(&c->erasing_list);
+ INIT_LIST_HEAD(&c->erase_pending_list);
+ INIT_LIST_HEAD(&c->erasable_pending_wbuf_list);
+ INIT_LIST_HEAD(&c->erase_complete_list);
+ INIT_LIST_HEAD(&c->free_list);
+ INIT_LIST_HEAD(&c->bad_list);
+ INIT_LIST_HEAD(&c->bad_used_list);
+ c->highest_ino = 1;
+ c->summary = NULL;
+
+ jffs2_init_hash_tables(c);
+
+ ret = jffs2_sum_init(c);
+ if (ret) {
+ jffs2_free_eraseblocks(c);
+ return ret;
+
+ }
+
+ if (jffs2_build_filesystem(c)) {
+ dbg_fsbuild("build_fs failed\n");
+ jffs2_free_ino_caches(c);
+ jffs2_free_raw_node_refs(c);
+ jffs2_free_eraseblocks(c);
+ return -EIO;
+ }
+
+ jffs2_calc_trigger_levels(c);
+
+ return 0;
}
diff --git a/linux-2.4.x/fs/jffs2/compr.c b/linux-2.4.x/fs/jffs2/compr.c
index 258be64..e7944e6 100644
--- a/linux-2.4.x/fs/jffs2/compr.c
+++ b/linux-2.4.x/fs/jffs2/compr.c
@@ -1,151 +1,457 @@
/*
* JFFS2 -- Journalling Flash File System, Version 2.
*
- * Copyright (C) 2001 Red Hat, Inc.
- *
+ * Copyright (C) 2001-2003 Red Hat, Inc.
* Created by Arjan van de Ven <arjanv@redhat.com>
*
- * The original JFFS, from which the design for JFFS2 was derived,
- * was designed and implemented by Axis Communications AB.
- *
- * The contents of this file are subject to the Red Hat eCos Public
- * License Version 1.1 (the "Licence"); you may not use this file
- * except in compliance with the Licence. You may obtain a copy of
- * the Licence at http://www.redhat.com/
- *
- * Software distributed under the Licence is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied.
- * See the Licence for the specific language governing rights and
- * limitations under the Licence.
+ * Copyright (C) 2004 Ferenc Havasi <havasi@inf.u-szeged.hu>,
+ * University of Szeged, Hungary
*
- * The Original Code is JFFS2 - Journalling Flash File System, version 2
+ * For licensing information, see the file 'LICENCE' in this directory.
*
- * Alternatively, the contents of this file may be used under the
- * terms of the GNU General Public License version 2 (the "GPL"), in
- * which case the provisions of the GPL are applicable instead of the
- * above. If you wish to allow the use of your version of this file
- * only under the terms of the GPL and not to allow others to use your
- * version of this file under the RHEPL, indicate your decision by
- * deleting the provisions above and replace them with the notice and
- * other provisions required by the GPL. If you do not delete the
- * provisions above, a recipient may use your version of this file
- * under either the RHEPL or the GPL.
- *
- * $Id: compr.c,v 1.17 2001/09/23 09:56:46 dwmw2 Exp $
+ * $Id: compr.c,v 1.46 2005/11/07 11:14:38 gleixner Exp $
*
*/
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/types.h>
-#include <linux/errno.h>
-#include <linux/jffs2.h>
+#include "compr.h"
+
+static DEFINE_SPINLOCK(jffs2_compressor_list_lock);
+
+/* Available compressors are on this list */
+static LIST_HEAD(jffs2_compressor_list);
-int zlib_compress(unsigned char *data_in, unsigned char *cpage_out, __u32 *sourcelen, __u32 *dstlen);
-void zlib_decompress(unsigned char *data_in, unsigned char *cpage_out, __u32 srclen, __u32 destlen);
-int rtime_compress(unsigned char *data_in, unsigned char *cpage_out, __u32 *sourcelen, __u32 *dstlen);
-void rtime_decompress(unsigned char *data_in, unsigned char *cpage_out, __u32 srclen, __u32 destlen);
-int rubinmips_compress(unsigned char *data_in, unsigned char *cpage_out, __u32 *sourcelen, __u32 *dstlen);
-void rubinmips_decompress(unsigned char *data_in, unsigned char *cpage_out, __u32 srclen, __u32 destlen);
-int dynrubin_compress(unsigned char *data_in, unsigned char *cpage_out, __u32 *sourcelen, __u32 *dstlen);
-void dynrubin_decompress(unsigned char *data_in, unsigned char *cpage_out, __u32 srclen, __u32 destlen);
+/* Actual compression mode */
+static int jffs2_compression_mode = JFFS2_COMPR_MODE_PRIORITY;
+/* Statistics for blocks stored without compression */
+static uint32_t none_stat_compr_blocks=0,none_stat_decompr_blocks=0,none_stat_compr_size=0;
/* jffs2_compress:
* @data: Pointer to uncompressed data
- * @cdata: Pointer to buffer for compressed data
+ * @cdata: Pointer to returned pointer to buffer for compressed data
* @datalen: On entry, holds the amount of data available for compression.
* On exit, expected to hold the amount of data actually compressed.
* @cdatalen: On entry, holds the amount of space available for compressed
* data. On exit, expected to hold the actual size of the compressed
* data.
*
- * Returns: Byte to be stored with data indicating compression type used.
- * Zero is used to show that the data could not be compressed - the
+ * Returns: Lower byte to be stored with data indicating compression type used.
+ * Zero is used to show that the data could not be compressed - the
* compressed version was actually larger than the original.
+ * Upper byte will be used later. (soon)
*
* If the cdata buffer isn't large enough to hold all the uncompressed data,
- * jffs2_compress should compress as much as will fit, and should set
+ * jffs2_compress should compress as much as will fit, and should set
* *datalen accordingly to show the amount of data which were compressed.
*/
-unsigned char jffs2_compress(unsigned char *data_in, unsigned char *cpage_out,
- __u32 *datalen, __u32 *cdatalen)
+uint16_t jffs2_compress(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
+ unsigned char *data_in, unsigned char **cpage_out,
+ uint32_t *datalen, uint32_t *cdatalen)
{
- int ret;
+ int ret = JFFS2_COMPR_NONE;
+ int compr_ret;
+ struct jffs2_compressor *this, *best=NULL;
+ unsigned char *output_buf = NULL, *tmp_buf;
+ uint32_t orig_slen, orig_dlen;
+ uint32_t best_slen=0, best_dlen=0;
- ret = zlib_compress(data_in, cpage_out, datalen, cdatalen);
- if (!ret) {
- return JFFS2_COMPR_ZLIB;
- }
-#if 0 /* Disabled 23/9/1. With zlib it hardly ever gets a look in */
- ret = dynrubin_compress(data_in, cpage_out, datalen, cdatalen);
- if (!ret) {
- return JFFS2_COMPR_DYNRUBIN;
- }
-#endif
-#if 0 /* Disabled 26/2/1. Obsoleted by dynrubin */
- ret = rubinmips_compress(data_in, cpage_out, datalen, cdatalen);
- if (!ret) {
- return JFFS2_COMPR_RUBINMIPS;
- }
-#endif
- /* rtime does manage to recompress already-compressed data */
- ret = rtime_compress(data_in, cpage_out, datalen, cdatalen);
- if (!ret) {
- return JFFS2_COMPR_RTIME;
- }
-#if 0
- /* We don't need to copy. Let the caller special-case the COMPR_NONE case. */
- /* If we get here, no compression is going to work */
- /* But we might want to use the fragmentation part -- Arjan */
- memcpy(cpage_out,data_in,min(*datalen,*cdatalen));
- if (*datalen > *cdatalen)
- *datalen = *cdatalen;
-#endif
- return JFFS2_COMPR_NONE; /* We failed to compress */
+ switch (jffs2_compression_mode) {
+ case JFFS2_COMPR_MODE_NONE:
+ break;
+ case JFFS2_COMPR_MODE_PRIORITY:
+ output_buf = kmalloc(*cdatalen,GFP_KERNEL);
+ if (!output_buf) {
+ printk(KERN_WARNING "JFFS2: No memory for compressor allocation. Compression failed.\n");
+ goto out;
+ }
+ orig_slen = *datalen;
+ orig_dlen = *cdatalen;
+ spin_lock(&jffs2_compressor_list_lock);
+ list_for_each_entry(this, &jffs2_compressor_list, list) {
+ /* Skip decompress-only backwards-compatibility and disabled modules */
+ if ((!this->compress)||(this->disabled))
+ continue;
+ this->usecount++;
+ spin_unlock(&jffs2_compressor_list_lock);
+ *datalen = orig_slen;
+ *cdatalen = orig_dlen;
+ compr_ret = this->compress(data_in, output_buf, datalen, cdatalen, NULL);
+ spin_lock(&jffs2_compressor_list_lock);
+ this->usecount--;
+ if (!compr_ret) {
+ ret = this->compr;
+ this->stat_compr_blocks++;
+ this->stat_compr_orig_size += *datalen;
+ this->stat_compr_new_size += *cdatalen;
+ break;
+ }
+ }
+ spin_unlock(&jffs2_compressor_list_lock);
+ if (ret == JFFS2_COMPR_NONE) kfree(output_buf);
+ break;
+ case JFFS2_COMPR_MODE_SIZE:
+ orig_slen = *datalen;
+ orig_dlen = *cdatalen;
+ spin_lock(&jffs2_compressor_list_lock);
+ list_for_each_entry(this, &jffs2_compressor_list, list) {
+ /* Skip decompress-only backwards-compatibility and disabled modules */
+ if ((!this->compress)||(this->disabled))
+ continue;
+ /* Allocating memory for output buffer if necessary */
+ if ((this->compr_buf_size<orig_dlen)&&(this->compr_buf)) {
+ spin_unlock(&jffs2_compressor_list_lock);
+ kfree(this->compr_buf);
+ spin_lock(&jffs2_compressor_list_lock);
+ this->compr_buf_size=0;
+ this->compr_buf=NULL;
+ }
+ if (!this->compr_buf) {
+ spin_unlock(&jffs2_compressor_list_lock);
+ tmp_buf = kmalloc(orig_dlen,GFP_KERNEL);
+ spin_lock(&jffs2_compressor_list_lock);
+ if (!tmp_buf) {
+ printk(KERN_WARNING "JFFS2: No memory for compressor allocation. (%d bytes)\n",orig_dlen);
+ continue;
+ }
+ else {
+ this->compr_buf = tmp_buf;
+ this->compr_buf_size = orig_dlen;
+ }
+ }
+ this->usecount++;
+ spin_unlock(&jffs2_compressor_list_lock);
+ *datalen = orig_slen;
+ *cdatalen = orig_dlen;
+ compr_ret = this->compress(data_in, this->compr_buf, datalen, cdatalen, NULL);
+ spin_lock(&jffs2_compressor_list_lock);
+ this->usecount--;
+ if (!compr_ret) {
+ if ((!best_dlen)||(best_dlen>*cdatalen)) {
+ best_dlen = *cdatalen;
+ best_slen = *datalen;
+ best = this;
+ }
+ }
+ }
+ if (best_dlen) {
+ *cdatalen = best_dlen;
+ *datalen = best_slen;
+ output_buf = best->compr_buf;
+ best->compr_buf = NULL;
+ best->compr_buf_size = 0;
+ best->stat_compr_blocks++;
+ best->stat_compr_orig_size += best_slen;
+ best->stat_compr_new_size += best_dlen;
+ ret = best->compr;
+ }
+ spin_unlock(&jffs2_compressor_list_lock);
+ break;
+ default:
+ printk(KERN_ERR "JFFS2: unknow compression mode.\n");
+ }
+ out:
+ if (ret == JFFS2_COMPR_NONE) {
+ *cpage_out = data_in;
+ *datalen = *cdatalen;
+ none_stat_compr_blocks++;
+ none_stat_compr_size += *datalen;
+ }
+ else {
+ *cpage_out = output_buf;
+ }
+ return ret;
}
-
-int jffs2_decompress(unsigned char comprtype, unsigned char *cdata_in,
- unsigned char *data_out, __u32 cdatalen, __u32 datalen)
+int jffs2_decompress(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
+ uint16_t comprtype, unsigned char *cdata_in,
+ unsigned char *data_out, uint32_t cdatalen, uint32_t datalen)
{
- switch (comprtype) {
+ struct jffs2_compressor *this;
+ int ret;
+
+ /* Older code had a bug where it would write non-zero 'usercompr'
+ fields. Deal with it. */
+ if ((comprtype & 0xff) <= JFFS2_COMPR_ZLIB)
+ comprtype &= 0xff;
+
+ switch (comprtype & 0xff) {
case JFFS2_COMPR_NONE:
/* This should be special-cased elsewhere, but we might as well deal with it */
memcpy(data_out, cdata_in, datalen);
+ none_stat_decompr_blocks++;
break;
-
case JFFS2_COMPR_ZERO:
memset(data_out, 0, datalen);
break;
+ default:
+ spin_lock(&jffs2_compressor_list_lock);
+ list_for_each_entry(this, &jffs2_compressor_list, list) {
+ if (comprtype == this->compr) {
+ this->usecount++;
+ spin_unlock(&jffs2_compressor_list_lock);
+ ret = this->decompress(cdata_in, data_out, cdatalen, datalen, NULL);
+ spin_lock(&jffs2_compressor_list_lock);
+ if (ret) {
+ printk(KERN_WARNING "Decompressor \"%s\" returned %d\n", this->name, ret);
+ }
+ else {
+ this->stat_decompr_blocks++;
+ }
+ this->usecount--;
+ spin_unlock(&jffs2_compressor_list_lock);
+ return ret;
+ }
+ }
+ printk(KERN_WARNING "JFFS2 compression type 0x%02x not available.\n", comprtype);
+ spin_unlock(&jffs2_compressor_list_lock);
+ return -EIO;
+ }
+ return 0;
+}
- case JFFS2_COMPR_ZLIB:
- zlib_decompress(cdata_in, data_out, cdatalen, datalen);
- break;
+int jffs2_register_compressor(struct jffs2_compressor *comp)
+{
+ struct jffs2_compressor *this;
- case JFFS2_COMPR_RTIME:
- rtime_decompress(cdata_in, data_out, cdatalen, datalen);
- break;
+ if (!comp->name) {
+ printk(KERN_WARNING "NULL compressor name at registering JFFS2 compressor. Failed.\n");
+ return -1;
+ }
+ comp->compr_buf_size=0;
+ comp->compr_buf=NULL;
+ comp->usecount=0;
+ comp->stat_compr_orig_size=0;
+ comp->stat_compr_new_size=0;
+ comp->stat_compr_blocks=0;
+ comp->stat_decompr_blocks=0;
+ D1(printk(KERN_DEBUG "Registering JFFS2 compressor \"%s\"\n", comp->name));
+
+ spin_lock(&jffs2_compressor_list_lock);
+
+ list_for_each_entry(this, &jffs2_compressor_list, list) {
+ if (this->priority < comp->priority) {
+ list_add(&comp->list, this->list.prev);
+ goto out;
+ }
+ }
+ list_add_tail(&comp->list, &jffs2_compressor_list);
+out:
+ D2(list_for_each_entry(this, &jffs2_compressor_list, list) {
+ printk(KERN_DEBUG "Compressor \"%s\", prio %d\n", this->name, this->priority);
+ })
+
+ spin_unlock(&jffs2_compressor_list_lock);
+
+ return 0;
+}
+
+int jffs2_unregister_compressor(struct jffs2_compressor *comp)
+{
+ D2(struct jffs2_compressor *this;)
+
+ D1(printk(KERN_DEBUG "Unregistering JFFS2 compressor \"%s\"\n", comp->name));
+
+ spin_lock(&jffs2_compressor_list_lock);
+
+ if (comp->usecount) {
+ spin_unlock(&jffs2_compressor_list_lock);
+ printk(KERN_WARNING "JFFS2: Compressor modul is in use. Unregister failed.\n");
+ return -1;
+ }
+ list_del(&comp->list);
+
+ D2(list_for_each_entry(this, &jffs2_compressor_list, list) {
+ printk(KERN_DEBUG "Compressor \"%s\", prio %d\n", this->name, this->priority);
+ })
+ spin_unlock(&jffs2_compressor_list_lock);
+ return 0;
+}
+
+#ifdef CONFIG_JFFS2_PROC
+
+#define JFFS2_STAT_BUF_SIZE 16000
+
+char *jffs2_list_compressors(void)
+{
+ struct jffs2_compressor *this;
+ char *buf, *act_buf;
+
+ act_buf = buf = kmalloc(JFFS2_STAT_BUF_SIZE,GFP_KERNEL);
+ list_for_each_entry(this, &jffs2_compressor_list, list) {
+ act_buf += sprintf(act_buf, "%10s priority:%d ", this->name, this->priority);
+ if ((this->disabled)||(!this->compress))
+ act_buf += sprintf(act_buf,"disabled");
+ else
+ act_buf += sprintf(act_buf,"enabled");
+ act_buf += sprintf(act_buf,"\n");
+ }
+ return buf;
+}
+
+char *jffs2_stats(void)
+{
+ struct jffs2_compressor *this;
+ char *buf, *act_buf;
+
+ act_buf = buf = kmalloc(JFFS2_STAT_BUF_SIZE,GFP_KERNEL);
+
+ act_buf += sprintf(act_buf,"JFFS2 compressor statistics:\n");
+ act_buf += sprintf(act_buf,"%10s ","none");
+ act_buf += sprintf(act_buf,"compr: %d blocks (%d) decompr: %d blocks\n", none_stat_compr_blocks,
+ none_stat_compr_size, none_stat_decompr_blocks);
+ spin_lock(&jffs2_compressor_list_lock);
+ list_for_each_entry(this, &jffs2_compressor_list, list) {
+ act_buf += sprintf(act_buf,"%10s ",this->name);
+ if ((this->disabled)||(!this->compress))
+ act_buf += sprintf(act_buf,"- ");
+ else
+ act_buf += sprintf(act_buf,"+ ");
+ act_buf += sprintf(act_buf,"compr: %d blocks (%d/%d) decompr: %d blocks ", this->stat_compr_blocks,
+ this->stat_compr_new_size, this->stat_compr_orig_size,
+ this->stat_decompr_blocks);
+ act_buf += sprintf(act_buf,"\n");
+ }
+ spin_unlock(&jffs2_compressor_list_lock);
+
+ return buf;
+}
+
+char *jffs2_get_compression_mode_name(void)
+{
+ switch (jffs2_compression_mode) {
+ case JFFS2_COMPR_MODE_NONE:
+ return "none";
+ case JFFS2_COMPR_MODE_PRIORITY:
+ return "priority";
+ case JFFS2_COMPR_MODE_SIZE:
+ return "size";
+ }
+ return "unkown";
+}
+
+int jffs2_set_compression_mode_name(const char *name)
+{
+ if (!strcmp("none",name)) {
+ jffs2_compression_mode = JFFS2_COMPR_MODE_NONE;
+ return 0;
+ }
+ if (!strcmp("priority",name)) {
+ jffs2_compression_mode = JFFS2_COMPR_MODE_PRIORITY;
+ return 0;
+ }
+ if (!strcmp("size",name)) {
+ jffs2_compression_mode = JFFS2_COMPR_MODE_SIZE;
+ return 0;
+ }
+ return 1;
+}
+
+static int jffs2_compressor_Xable(const char *name, int disabled)
+{
+ struct jffs2_compressor *this;
+ spin_lock(&jffs2_compressor_list_lock);
+ list_for_each_entry(this, &jffs2_compressor_list, list) {
+ if (!strcmp(this->name, name)) {
+ this->disabled = disabled;
+ spin_unlock(&jffs2_compressor_list_lock);
+ return 0;
+ }
+ }
+ spin_unlock(&jffs2_compressor_list_lock);
+ printk(KERN_WARNING "JFFS2: compressor %s not found.\n",name);
+ return 1;
+}
+
+int jffs2_enable_compressor_name(const char *name)
+{
+ return jffs2_compressor_Xable(name, 0);
+}
+
+int jffs2_disable_compressor_name(const char *name)
+{
+ return jffs2_compressor_Xable(name, 1);
+}
+
+int jffs2_set_compressor_priority(const char *name, int priority)
+{
+ struct jffs2_compressor *this,*comp;
+ spin_lock(&jffs2_compressor_list_lock);
+ list_for_each_entry(this, &jffs2_compressor_list, list) {
+ if (!strcmp(this->name, name)) {
+ this->priority = priority;
+ comp = this;
+ goto reinsert;
+ }
+ }
+ spin_unlock(&jffs2_compressor_list_lock);
+ printk(KERN_WARNING "JFFS2: compressor %s not found.\n",name);
+ return 1;
+reinsert:
+ /* list is sorted in the order of priority, so if
+ we change it we have to reinsert it into the
+ good place */
+ list_del(&comp->list);
+ list_for_each_entry(this, &jffs2_compressor_list, list) {
+ if (this->priority < comp->priority) {
+ list_add(&comp->list, this->list.prev);
+ spin_unlock(&jffs2_compressor_list_lock);
+ return 0;
+ }
+ }
+ list_add_tail(&comp->list, &jffs2_compressor_list);
+ spin_unlock(&jffs2_compressor_list_lock);
+ return 0;
+}
- case JFFS2_COMPR_RUBINMIPS:
-#if 0 /* Disabled 23/9/1 */
- rubinmips_decompress(cdata_in, data_out, cdatalen, datalen);
-#else
- printk(KERN_WARNING "JFFS2: Rubinmips compression encountered but support not compiled in!\n");
#endif
- break;
- case JFFS2_COMPR_DYNRUBIN:
-#if 1 /* Phase this one out */
- dynrubin_decompress(cdata_in, data_out, cdatalen, datalen);
+
+void jffs2_free_comprbuf(unsigned char *comprbuf, unsigned char *orig)
+{
+ if (orig != comprbuf)
+ kfree(comprbuf);
+}
+
+int jffs2_compressors_init(void)
+{
+/* Registering compressors */
+#ifdef CONFIG_JFFS2_ZLIB
+ jffs2_zlib_init();
+#endif
+#ifdef CONFIG_JFFS2_RTIME
+ jffs2_rtime_init();
+#endif
+#ifdef CONFIG_JFFS2_RUBIN
+ jffs2_rubinmips_init();
+ jffs2_dynrubin_init();
+#endif
+/* Setting default compression mode */
+#ifdef CONFIG_JFFS2_CMODE_NONE
+ jffs2_compression_mode = JFFS2_COMPR_MODE_NONE;
+ D1(printk(KERN_INFO "JFFS2: default compression mode: none\n");)
+#else
+#ifdef CONFIG_JFFS2_CMODE_SIZE
+ jffs2_compression_mode = JFFS2_COMPR_MODE_SIZE;
+ D1(printk(KERN_INFO "JFFS2: default compression mode: size\n");)
#else
- printk(KERN_WARNING "JFFS2: Dynrubin compression encountered but support not compiled in!\n");
+ D1(printk(KERN_INFO "JFFS2: default compression mode: priority\n");)
#endif
- break;
+#endif
+ return 0;
+}
- default:
- printk(KERN_NOTICE "Unknown JFFS2 compression type 0x%02x\n", comprtype);
- return -EIO;
- }
- return 0;
+int jffs2_compressors_exit(void)
+{
+/* Unregistering compressors */
+#ifdef CONFIG_JFFS2_RUBIN
+ jffs2_dynrubin_exit();
+ jffs2_rubinmips_exit();
+#endif
+#ifdef CONFIG_JFFS2_RTIME
+ jffs2_rtime_exit();
+#endif
+#ifdef CONFIG_JFFS2_ZLIB
+ jffs2_zlib_exit();
+#endif
+ return 0;
}
diff --git a/linux-2.4.x/fs/jffs2/compr.h b/linux-2.4.x/fs/jffs2/compr.h
new file mode 100644
index 0000000..a77e830
--- /dev/null
+++ b/linux-2.4.x/fs/jffs2/compr.h
@@ -0,0 +1,107 @@
+/*
+ * JFFS2 -- Journalling Flash File System, Version 2.
+ *
+ * Copyright (C) 2004 Ferenc Havasi <havasi@inf.u-szeged.hu>,
+ * University of Szeged, Hungary
+ *
+ * For licensing information, see the file 'LICENCE' in the
+ * jffs2 directory.
+ *
+ * $Id: compr.h,v 1.9 2005/11/07 11:14:38 gleixner Exp $
+ *
+ */
+
+#ifndef __JFFS2_COMPR_H__
+#define __JFFS2_COMPR_H__
+
+#include <linux/kernel.h>
+#include <linux/vmalloc.h>
+#include <linux/list.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/jffs2.h>
+#include <linux/jffs2_fs_i.h>
+#include <linux/jffs2_fs_sb.h>
+#include "nodelist.h"
+
+#define JFFS2_RUBINMIPS_PRIORITY 10
+#define JFFS2_DYNRUBIN_PRIORITY 20
+#define JFFS2_LZARI_PRIORITY 30
+#define JFFS2_LZO_PRIORITY 40
+#define JFFS2_RTIME_PRIORITY 50
+#define JFFS2_ZLIB_PRIORITY 60
+
+#define JFFS2_RUBINMIPS_DISABLED /* RUBINs will be used only */
+#define JFFS2_DYNRUBIN_DISABLED /* for decompression */
+
+#define JFFS2_COMPR_MODE_NONE 0
+#define JFFS2_COMPR_MODE_PRIORITY 1
+#define JFFS2_COMPR_MODE_SIZE 2
+
+struct jffs2_compressor {
+ struct list_head list;
+ int priority; /* used by prirority comr. mode */
+ char *name;
+ char compr; /* JFFS2_COMPR_XXX */
+ int (*compress)(unsigned char *data_in, unsigned char *cpage_out,
+ uint32_t *srclen, uint32_t *destlen, void *model);
+ int (*decompress)(unsigned char *cdata_in, unsigned char *data_out,
+ uint32_t cdatalen, uint32_t datalen, void *model);
+ int usecount;
+ int disabled; /* if seted the compressor won't compress */
+ unsigned char *compr_buf; /* used by size compr. mode */
+ uint32_t compr_buf_size; /* used by size compr. mode */
+ uint32_t stat_compr_orig_size;
+ uint32_t stat_compr_new_size;
+ uint32_t stat_compr_blocks;
+ uint32_t stat_decompr_blocks;
+};
+
+int jffs2_register_compressor(struct jffs2_compressor *comp);
+int jffs2_unregister_compressor(struct jffs2_compressor *comp);
+
+int jffs2_compressors_init(void);
+int jffs2_compressors_exit(void);
+
+uint16_t jffs2_compress(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
+ unsigned char *data_in, unsigned char **cpage_out,
+ uint32_t *datalen, uint32_t *cdatalen);
+
+int jffs2_decompress(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
+ uint16_t comprtype, unsigned char *cdata_in,
+ unsigned char *data_out, uint32_t cdatalen, uint32_t datalen);
+
+void jffs2_free_comprbuf(unsigned char *comprbuf, unsigned char *orig);
+
+#ifdef CONFIG_JFFS2_PROC
+int jffs2_enable_compressor_name(const char *name);
+int jffs2_disable_compressor_name(const char *name);
+int jffs2_set_compression_mode_name(const char *mode_name);
+char *jffs2_get_compression_mode_name(void);
+int jffs2_set_compressor_priority(const char *mode_name, int priority);
+char *jffs2_list_compressors(void);
+char *jffs2_stats(void);
+#endif
+
+/* Compressor modules */
+/* These functions will be called by jffs2_compressors_init/exit */
+
+#ifdef CONFIG_JFFS2_RUBIN
+int jffs2_rubinmips_init(void);
+void jffs2_rubinmips_exit(void);
+int jffs2_dynrubin_init(void);
+void jffs2_dynrubin_exit(void);
+#endif
+#ifdef CONFIG_JFFS2_RTIME
+int jffs2_rtime_init(void);
+void jffs2_rtime_exit(void);
+#endif
+#ifdef CONFIG_JFFS2_ZLIB
+int jffs2_zlib_init(void);
+void jffs2_zlib_exit(void);
+#endif
+
+#endif /* __JFFS2_COMPR_H__ */
diff --git a/linux-2.4.x/fs/jffs2/compr_rtime.c b/linux-2.4.x/fs/jffs2/compr_rtime.c
index 1a71020..42ee158 100644
--- a/linux-2.4.x/fs/jffs2/compr_rtime.c
+++ b/linux-2.4.x/fs/jffs2/compr_rtime.c
@@ -1,43 +1,19 @@
/*
* JFFS2 -- Journalling Flash File System, Version 2.
*
- * Copyright (C) 2001 Red Hat, Inc.
+ * Copyright (C) 2001-2003 Red Hat, Inc.
*
* Created by Arjan van de Ven <arjanv@redhat.com>
*
- * The original JFFS, from which the design for JFFS2 was derived,
- * was designed and implemented by Axis Communications AB.
+ * For licensing information, see the file 'LICENCE' in this directory.
*
- * The contents of this file are subject to the Red Hat eCos Public
- * License Version 1.1 (the "Licence"); you may not use this file
- * except in compliance with the Licence. You may obtain a copy of
- * the Licence at http://www.redhat.com/
- *
- * Software distributed under the Licence is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied.
- * See the Licence for the specific language governing rights and
- * limitations under the Licence.
- *
- * The Original Code is JFFS2 - Journalling Flash File System, version 2
- *
- * Alternatively, the contents of this file may be used under the
- * terms of the GNU General Public License version 2 (the "GPL"), in
- * which case the provisions of the GPL are applicable instead of the
- * above. If you wish to allow the use of your version of this file
- * only under the terms of the GPL and not to allow others to use your
- * version of this file under the RHEPL, indicate your decision by
- * deleting the provisions above and replace them with the notice and
- * other provisions required by the GPL. If you do not delete the
- * provisions above, a recipient may use your version of this file
- * under either the RHEPL or the GPL.
- *
- * $Id: compr_rtime.c,v 1.5 2001/03/15 15:38:23 dwmw2 Exp $
+ * $Id: compr_rtime.c,v 1.16 2005/11/07 11:14:38 gleixner Exp $
*
*
* Very simple lz77-ish encoder.
*
* Theory of operation: Both encoder and decoder have a list of "last
- * occurances" for every possible source-value; after sending the
+ * occurrences" for every possible source-value; after sending the
* first source-byte, the second byte indicated the "run" length of
* matches
*
@@ -48,29 +24,33 @@
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/errno.h>
-#include <linux/string.h>
+#include <linux/string.h>
+#include <linux/jffs2.h>
+#include "compr.h"
/* _compress returns the compressed size, -1 if bigger */
-int rtime_compress(unsigned char *data_in, unsigned char *cpage_out,
- __u32 *sourcelen, __u32 *dstlen)
+static int jffs2_rtime_compress(unsigned char *data_in,
+ unsigned char *cpage_out,
+ uint32_t *sourcelen, uint32_t *dstlen,
+ void *model)
{
- int positions[256];
+ short positions[256];
int outpos = 0;
int pos=0;
- memset(positions,0,sizeof(positions));
-
+ memset(positions,0,sizeof(positions));
+
while (pos < (*sourcelen) && outpos <= (*dstlen)-2) {
int backpos, runlen=0;
unsigned char value;
-
+
value = data_in[pos];
cpage_out[outpos++] = data_in[pos++];
-
+
backpos = positions[value];
positions[value]=pos;
-
+
while ((backpos < pos) && (pos < (*sourcelen)) &&
(data_in[pos]==data_in[backpos++]) && (runlen<255)) {
pos++;
@@ -83,33 +63,35 @@ int rtime_compress(unsigned char *data_in, unsigned char *cpage_out,
/* We failed */
return -1;
}
-
+
/* Tell the caller how much we managed to compress, and how much space it took */
*sourcelen = pos;
*dstlen = outpos;
return 0;
-}
+}
-void rtime_decompress(unsigned char *data_in, unsigned char *cpage_out,
- __u32 srclen, __u32 destlen)
+static int jffs2_rtime_decompress(unsigned char *data_in,
+ unsigned char *cpage_out,
+ uint32_t srclen, uint32_t destlen,
+ void *model)
{
- int positions[256];
+ short positions[256];
int outpos = 0;
int pos=0;
-
- memset(positions,0,sizeof(positions));
-
+
+ memset(positions,0,sizeof(positions));
+
while (outpos<destlen) {
unsigned char value;
int backoffs;
int repeat;
-
+
value = data_in[pos++];
cpage_out[outpos++] = value; /* first the verbatim copied byte */
repeat = data_in[pos++];
backoffs = positions[value];
-
+
positions[value]=outpos;
if (repeat) {
if (backoffs + repeat >= outpos) {
@@ -119,10 +101,32 @@ void rtime_decompress(unsigned char *data_in, unsigned char *cpage_out,
}
} else {
memcpy(&cpage_out[outpos],&cpage_out[backoffs],repeat);
- outpos+=repeat;
+ outpos+=repeat;
}
}
- }
-}
+ }
+ return 0;
+}
+
+static struct jffs2_compressor jffs2_rtime_comp = {
+ .priority = JFFS2_RTIME_PRIORITY,
+ .name = "rtime",
+ .compr = JFFS2_COMPR_RTIME,
+ .compress = &jffs2_rtime_compress,
+ .decompress = &jffs2_rtime_decompress,
+#ifdef JFFS2_RTIME_DISABLED
+ .disabled = 1,
+#else
+ .disabled = 0,
+#endif
+};
+int jffs2_rtime_init(void)
+{
+ return jffs2_register_compressor(&jffs2_rtime_comp);
+}
+void jffs2_rtime_exit(void)
+{
+ jffs2_unregister_compressor(&jffs2_rtime_comp);
+}
diff --git a/linux-2.4.x/fs/jffs2/compr_rubin.c b/linux-2.4.x/fs/jffs2/compr_rubin.c
index 22d8dd5..de99bbe 100644
--- a/linux-2.4.x/fs/jffs2/compr_rubin.c
+++ b/linux-2.4.x/fs/jffs2/compr_rubin.c
@@ -1,50 +1,26 @@
/*
* JFFS2 -- Journalling Flash File System, Version 2.
*
- * Copyright (C) 2001 Red Hat, Inc.
+ * Copyright (C) 2001, 2002 Red Hat, Inc.
*
* Created by Arjan van de Ven <arjanv@redhat.com>
*
- * The original JFFS, from which the design for JFFS2 was derived,
- * was designed and implemented by Axis Communications AB.
+ * For licensing information, see the file 'LICENCE' in this directory.
*
- * The contents of this file are subject to the Red Hat eCos Public
- * License Version 1.1 (the "Licence"); you may not use this file
- * except in compliance with the Licence. You may obtain a copy of
- * the Licence at http://www.redhat.com/
- *
- * Software distributed under the Licence is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied.
- * See the Licence for the specific language governing rights and
- * limitations under the Licence.
- *
- * The Original Code is JFFS2 - Journalling Flash File System, version 2
- *
- * Alternatively, the contents of this file may be used under the
- * terms of the GNU General Public License version 2 (the "GPL"), in
- * which case the provisions of the GPL are applicable instead of the
- * above. If you wish to allow the use of your version of this file
- * only under the terms of the GPL and not to allow others to use your
- * version of this file under the RHEPL, indicate your decision by
- * deleting the provisions above and replace them with the notice and
- * other provisions required by the GPL. If you do not delete the
- * provisions above, a recipient may use your version of this file
- * under either the RHEPL or the GPL.
- *
- * $Id: compr_rubin.c,v 1.13 2001/09/23 10:06:05 rmk Exp $
+ * $Id: compr_rubin.c,v 1.22 2005/11/07 11:14:38 gleixner Exp $
*
*/
-
+
#include <linux/string.h>
#include <linux/types.h>
+#include <linux/jffs2.h>
#include "compr_rubin.h"
#include "histo_mips.h"
+#include "compr.h"
-
-
-void init_rubin(struct rubin_state *rs, int div, int *bits)
-{
+static void init_rubin(struct rubin_state *rs, int div, int *bits)
+{
int c;
rs->q = 0;
@@ -56,7 +32,7 @@ void init_rubin(struct rubin_state *rs, int div, int *bits)
}
-int encode(struct rubin_state *rs, long A, long B, int symbol)
+static int encode(struct rubin_state *rs, long A, long B, int symbol)
{
long i0, i1;
@@ -64,7 +40,7 @@ int encode(struct rubin_state *rs, long A, long B, int symbol)
while ((rs->q >= UPPER_BIT_RUBIN) || ((rs->p + rs->q) <= UPPER_BIT_RUBIN)) {
rs->bit_number++;
-
+
ret = pushbit(&rs->pp, (rs->q & UPPER_BIT_RUBIN) ? 1 : 0, 0);
if (ret)
return ret;
@@ -91,8 +67,8 @@ int encode(struct rubin_state *rs, long A, long B, int symbol)
}
-void end_rubin(struct rubin_state *rs)
-{
+static void end_rubin(struct rubin_state *rs)
+{
int i;
@@ -104,9 +80,9 @@ void end_rubin(struct rubin_state *rs)
}
-void init_decode(struct rubin_state *rs, int div, int *bits)
+static void init_decode(struct rubin_state *rs, int div, int *bits)
{
- init_rubin(rs, div, bits);
+ init_rubin(rs, div, bits);
/* behalve lower */
rs->rec_q = 0;
@@ -151,7 +127,7 @@ static void __do_decode(struct rubin_state *rs, unsigned long p, unsigned long q
rs->rec_q = rec_q;
}
-int decode(struct rubin_state *rs, long A, long B)
+static int decode(struct rubin_state *rs, long A, long B)
{
unsigned long p = rs->p, q = rs->q;
long i0, threshold;
@@ -212,8 +188,8 @@ static int in_byte(struct rubin_state *rs)
-int rubin_do_compress(int bit_divider, int *bits, unsigned char *data_in,
- unsigned char *cpage_out, __u32 *sourcelen, __u32 *dstlen)
+static int rubin_do_compress(int bit_divider, int *bits, unsigned char *data_in,
+ unsigned char *cpage_out, uint32_t *sourcelen, uint32_t *dstlen)
{
int outpos = 0;
int pos=0;
@@ -222,44 +198,46 @@ int rubin_do_compress(int bit_divider, int *bits, unsigned char *data_in,
init_pushpull(&rs.pp, cpage_out, *dstlen * 8, 0, 32);
init_rubin(&rs, bit_divider, bits);
-
+
while (pos < (*sourcelen) && !out_byte(&rs, data_in[pos]))
pos++;
-
+
end_rubin(&rs);
if (outpos > pos) {
/* We failed */
return -1;
}
-
- /* Tell the caller how much we managed to compress,
+
+ /* Tell the caller how much we managed to compress,
* and how much space it took */
-
+
outpos = (pushedbits(&rs.pp)+7)/8;
-
+
if (outpos >= pos)
return -1; /* We didn't actually compress */
*sourcelen = pos;
*dstlen = outpos;
return 0;
-}
+}
#if 0
/* _compress returns the compressed size, -1 if bigger */
-int rubinmips_compress(unsigned char *data_in, unsigned char *cpage_out,
- __u32 *sourcelen, __u32 *dstlen)
+int jffs2_rubinmips_compress(unsigned char *data_in, unsigned char *cpage_out,
+ uint32_t *sourcelen, uint32_t *dstlen, void *model)
{
return rubin_do_compress(BIT_DIVIDER_MIPS, bits_mips, data_in, cpage_out, sourcelen, dstlen);
}
#endif
-int dynrubin_compress(unsigned char *data_in, unsigned char *cpage_out,
- __u32 *sourcelen, __u32 *dstlen)
+static int jffs2_dynrubin_compress(unsigned char *data_in,
+ unsigned char *cpage_out,
+ uint32_t *sourcelen, uint32_t *dstlen,
+ void *model)
{
int bits[8];
unsigned char histo[256];
int i;
int ret;
- __u32 mysrclen, mydstlen;
+ uint32_t mysrclen, mydstlen;
mysrclen = *sourcelen;
mydstlen = *dstlen - 8;
@@ -299,7 +277,7 @@ int dynrubin_compress(unsigned char *data_in, unsigned char *cpage_out,
}
ret = rubin_do_compress(256, bits, data_in, cpage_out+8, &mysrclen, &mydstlen);
- if (ret)
+ if (ret)
return ret;
/* Add back the 8 bytes we took for the probabilities */
@@ -315,29 +293,34 @@ int dynrubin_compress(unsigned char *data_in, unsigned char *cpage_out,
return 0;
}
-void rubin_do_decompress(int bit_divider, int *bits, unsigned char *cdata_in,
- unsigned char *page_out, __u32 srclen, __u32 destlen)
+static void rubin_do_decompress(int bit_divider, int *bits, unsigned char *cdata_in,
+ unsigned char *page_out, uint32_t srclen, uint32_t destlen)
{
int outpos = 0;
struct rubin_state rs;
-
+
init_pushpull(&rs.pp, cdata_in, srclen, 0, 0);
init_decode(&rs, bit_divider, bits);
-
+
while (outpos < destlen) {
page_out[outpos++] = in_byte(&rs);
}
-}
+}
-void rubinmips_decompress(unsigned char *data_in, unsigned char *cpage_out,
- __u32 sourcelen, __u32 dstlen)
+static int jffs2_rubinmips_decompress(unsigned char *data_in,
+ unsigned char *cpage_out,
+ uint32_t sourcelen, uint32_t dstlen,
+ void *model)
{
rubin_do_decompress(BIT_DIVIDER_MIPS, bits_mips, data_in, cpage_out, sourcelen, dstlen);
+ return 0;
}
-void dynrubin_decompress(unsigned char *data_in, unsigned char *cpage_out,
- __u32 sourcelen, __u32 dstlen)
+static int jffs2_dynrubin_decompress(unsigned char *data_in,
+ unsigned char *cpage_out,
+ uint32_t sourcelen, uint32_t dstlen,
+ void *model)
{
int bits[8];
int c;
@@ -346,4 +329,51 @@ void dynrubin_decompress(unsigned char *data_in, unsigned char *cpage_out,
bits[c] = data_in[c];
rubin_do_decompress(256, bits, data_in+8, cpage_out, sourcelen-8, dstlen);
+ return 0;
+}
+
+static struct jffs2_compressor jffs2_rubinmips_comp = {
+ .priority = JFFS2_RUBINMIPS_PRIORITY,
+ .name = "rubinmips",
+ .compr = JFFS2_COMPR_DYNRUBIN,
+ .compress = NULL, /*&jffs2_rubinmips_compress,*/
+ .decompress = &jffs2_rubinmips_decompress,
+#ifdef JFFS2_RUBINMIPS_DISABLED
+ .disabled = 1,
+#else
+ .disabled = 0,
+#endif
+};
+
+int jffs2_rubinmips_init(void)
+{
+ return jffs2_register_compressor(&jffs2_rubinmips_comp);
+}
+
+void jffs2_rubinmips_exit(void)
+{
+ jffs2_unregister_compressor(&jffs2_rubinmips_comp);
+}
+
+static struct jffs2_compressor jffs2_dynrubin_comp = {
+ .priority = JFFS2_DYNRUBIN_PRIORITY,
+ .name = "dynrubin",
+ .compr = JFFS2_COMPR_RUBINMIPS,
+ .compress = jffs2_dynrubin_compress,
+ .decompress = &jffs2_dynrubin_decompress,
+#ifdef JFFS2_DYNRUBIN_DISABLED
+ .disabled = 1,
+#else
+ .disabled = 0,
+#endif
+};
+
+int jffs2_dynrubin_init(void)
+{
+ return jffs2_register_compressor(&jffs2_dynrubin_comp);
+}
+
+void jffs2_dynrubin_exit(void)
+{
+ jffs2_unregister_compressor(&jffs2_dynrubin_comp);
}
diff --git a/linux-2.4.x/fs/jffs2/compr_rubin.h b/linux-2.4.x/fs/jffs2/compr_rubin.h
index 58ccc2b..bf1a934 100644
--- a/linux-2.4.x/fs/jffs2/compr_rubin.h
+++ b/linux-2.4.x/fs/jffs2/compr_rubin.h
@@ -1,7 +1,7 @@
/* Rubin encoder/decoder header */
/* work started at : aug 3, 1994 */
/* last modification : aug 15, 1994 */
-/* $Id: compr_rubin.h,v 1.5 2001/02/26 13:50:01 dwmw2 Exp $ */
+/* $Id: compr_rubin.h,v 1.7 2005/11/07 11:14:38 gleixner Exp $ */
#include "pushpull.h"
@@ -11,18 +11,11 @@
struct rubin_state {
- unsigned long p;
- unsigned long q;
+ unsigned long p;
+ unsigned long q;
unsigned long rec_q;
long bit_number;
struct pushpull pp;
int bit_divider;
int bits[8];
};
-
-
-void init_rubin (struct rubin_state *rs, int div, int *bits);
-int encode (struct rubin_state *, long, long, int);
-void end_rubin (struct rubin_state *);
-void init_decode (struct rubin_state *, int div, int *bits);
-int decode (struct rubin_state *, long, long);
diff --git a/linux-2.4.x/fs/jffs2/compr_zlib.c b/linux-2.4.x/fs/jffs2/compr_zlib.c
index 137568c..4db8be8 100644
--- a/linux-2.4.x/fs/jffs2/compr_zlib.c
+++ b/linux-2.4.x/fs/jffs2/compr_zlib.c
@@ -1,177 +1,222 @@
/*
* JFFS2 -- Journalling Flash File System, Version 2.
*
- * Copyright (C) 2001, 2002 Red Hat, Inc.
+ * Copyright (C) 2001-2003 Red Hat, Inc.
*
- * Created by David Woodhouse <dwmw2@cambridge.redhat.com>
+ * Created by David Woodhouse <dwmw2@infradead.org>
*
- * The original JFFS, from which the design for JFFS2 was derived,
- * was designed and implemented by Axis Communications AB.
+ * For licensing information, see the file 'LICENCE' in this directory.
*
- * The contents of this file are subject to the Red Hat eCos Public
- * License Version 1.1 (the "Licence"); you may not use this file
- * except in compliance with the Licence. You may obtain a copy of
- * the Licence at http://www.redhat.com/
- *
- * Software distributed under the Licence is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied.
- * See the Licence for the specific language governing rights and
- * limitations under the Licence.
- *
- * The Original Code is JFFS2 - Journalling Flash File System, version 2
- *
- * Alternatively, the contents of this file may be used under the
- * terms of the GNU General Public License version 2 (the "GPL"), in
- * which case the provisions of the GPL are applicable instead of the
- * above. If you wish to allow the use of your version of this file
- * only under the terms of the GPL and not to allow others to use your
- * version of this file under the RHEPL, indicate your decision by
- * deleting the provisions above and replace them with the notice and
- * other provisions required by the GPL. If you do not delete the
- * provisions above, a recipient may use your version of this file
- * under either the RHEPL or the GPL.
- *
- * $Id: compr_zlib.c,v 1.8.2.1 2002/10/11 09:04:44 dwmw2 Exp $
+ * $Id: compr_zlib.c,v 1.32 2005/11/07 11:14:38 gleixner Exp $
*
*/
-#ifndef __KERNEL__
+#if !defined(__KERNEL__) && !defined(__ECOS)
#error "The userspace support got too messy and was removed. Update your mkfs.jffs2"
#endif
#include <linux/config.h>
#include <linux/kernel.h>
-#include <linux/mtd/compatmac.h> /* for min() */
+#include <linux/sched.h>
#include <linux/slab.h>
-#include <linux/jffs2.h>
#include <linux/zlib.h>
+#include <linux/zutil.h>
#include "nodelist.h"
+#include "compr.h"
- /* Plan: call deflate() with avail_in == *sourcelen,
- avail_out = *dstlen - 12 and flush == Z_FINISH.
+ /* Plan: call deflate() with avail_in == *sourcelen,
+ avail_out = *dstlen - 12 and flush == Z_FINISH.
If it doesn't manage to finish, call it again with
avail_in == 0 and avail_out set to the remaining 12
- bytes for it to clean up.
+ bytes for it to clean up.
Q: Is 12 bytes sufficient?
*/
#define STREAM_END_SPACE 12
static DECLARE_MUTEX(deflate_sem);
static DECLARE_MUTEX(inflate_sem);
-static void *deflate_workspace;
-static void *inflate_workspace;
+static z_stream inf_strm, def_strm;
-int __init jffs2_zlib_init(void)
+#ifdef __KERNEL__ /* Linux-only */
+#include <linux/vmalloc.h>
+#include <linux/init.h>
+
+static int __init alloc_workspaces(void)
{
- deflate_workspace = vmalloc(zlib_deflate_workspacesize());
- if (!deflate_workspace) {
+ def_strm.workspace = vmalloc(zlib_deflate_workspacesize());
+ if (!def_strm.workspace) {
printk(KERN_WARNING "Failed to allocate %d bytes for deflate workspace\n", zlib_deflate_workspacesize());
return -ENOMEM;
}
D1(printk(KERN_DEBUG "Allocated %d bytes for deflate workspace\n", zlib_deflate_workspacesize()));
- inflate_workspace = vmalloc(zlib_inflate_workspacesize());
- if (!inflate_workspace) {
+ inf_strm.workspace = vmalloc(zlib_inflate_workspacesize());
+ if (!inf_strm.workspace) {
printk(KERN_WARNING "Failed to allocate %d bytes for inflate workspace\n", zlib_inflate_workspacesize());
- vfree(deflate_workspace);
+ vfree(def_strm.workspace);
return -ENOMEM;
}
D1(printk(KERN_DEBUG "Allocated %d bytes for inflate workspace\n", zlib_inflate_workspacesize()));
return 0;
}
-void jffs2_zlib_exit(void)
+static void free_workspaces(void)
{
- vfree(deflate_workspace);
- vfree(inflate_workspace);
+ vfree(def_strm.workspace);
+ vfree(inf_strm.workspace);
}
-
-int zlib_compress(unsigned char *data_in, unsigned char *cpage_out,
- __u32 *sourcelen, __u32 *dstlen)
+#else
+#define alloc_workspaces() (0)
+#define free_workspaces() do { } while(0)
+#endif /* __KERNEL__ */
+
+static int jffs2_zlib_compress(unsigned char *data_in,
+ unsigned char *cpage_out,
+ uint32_t *sourcelen, uint32_t *dstlen,
+ void *model)
{
- z_stream strm;
int ret;
if (*dstlen <= STREAM_END_SPACE)
return -1;
down(&deflate_sem);
- strm.workspace = deflate_workspace;
- if (Z_OK != zlib_deflateInit(&strm, 3)) {
+ if (Z_OK != zlib_deflateInit(&def_strm, 3)) {
printk(KERN_WARNING "deflateInit failed\n");
up(&deflate_sem);
return -1;
}
- strm.next_in = data_in;
- strm.total_in = 0;
-
- strm.next_out = cpage_out;
- strm.total_out = 0;
+ def_strm.next_in = data_in;
+ def_strm.total_in = 0;
+
+ def_strm.next_out = cpage_out;
+ def_strm.total_out = 0;
- while (strm.total_out < *dstlen - STREAM_END_SPACE && strm.total_in < *sourcelen) {
- strm.avail_out = *dstlen - (strm.total_out + STREAM_END_SPACE);
- strm.avail_in = min((unsigned)(*sourcelen-strm.total_in), strm.avail_out);
+ while (def_strm.total_out < *dstlen - STREAM_END_SPACE && def_strm.total_in < *sourcelen) {
+ def_strm.avail_out = *dstlen - (def_strm.total_out + STREAM_END_SPACE);
+ def_strm.avail_in = min((unsigned)(*sourcelen-def_strm.total_in), def_strm.avail_out);
D1(printk(KERN_DEBUG "calling deflate with avail_in %d, avail_out %d\n",
- strm.avail_in, strm.avail_out));
- ret = zlib_deflate(&strm, Z_PARTIAL_FLUSH);
- D1(printk(KERN_DEBUG "deflate returned with avail_in %d, avail_out %d, total_in %ld, total_out %ld\n",
- strm.avail_in, strm.avail_out, strm.total_in, strm.total_out));
+ def_strm.avail_in, def_strm.avail_out));
+ ret = zlib_deflate(&def_strm, Z_PARTIAL_FLUSH);
+ D1(printk(KERN_DEBUG "deflate returned with avail_in %d, avail_out %d, total_in %ld, total_out %ld\n",
+ def_strm.avail_in, def_strm.avail_out, def_strm.total_in, def_strm.total_out));
if (ret != Z_OK) {
D1(printk(KERN_DEBUG "deflate in loop returned %d\n", ret));
- zlib_deflateEnd(&strm);
+ zlib_deflateEnd(&def_strm);
up(&deflate_sem);
return -1;
}
}
- strm.avail_out += STREAM_END_SPACE;
- strm.avail_in = 0;
- ret = zlib_deflate(&strm, Z_FINISH);
- zlib_deflateEnd(&strm);
- up(&deflate_sem);
+ def_strm.avail_out += STREAM_END_SPACE;
+ def_strm.avail_in = 0;
+ ret = zlib_deflate(&def_strm, Z_FINISH);
+ zlib_deflateEnd(&def_strm);
+
if (ret != Z_STREAM_END) {
D1(printk(KERN_DEBUG "final deflate returned %d\n", ret));
- return -1;
+ ret = -1;
+ goto out;
}
- D1(printk(KERN_DEBUG "zlib compressed %ld bytes into %ld\n",
- strm.total_in, strm.total_out));
+ if (def_strm.total_out >= def_strm.total_in) {
+ D1(printk(KERN_DEBUG "zlib compressed %ld bytes into %ld; failing\n",
+ def_strm.total_in, def_strm.total_out));
+ ret = -1;
+ goto out;
+ }
- if (strm.total_out >= strm.total_in)
- return -1;
+ D1(printk(KERN_DEBUG "zlib compressed %ld bytes into %ld\n",
+ def_strm.total_in, def_strm.total_out));
- *dstlen = strm.total_out;
- *sourcelen = strm.total_in;
- return 0;
+ *dstlen = def_strm.total_out;
+ *sourcelen = def_strm.total_in;
+ ret = 0;
+ out:
+ up(&deflate_sem);
+ return ret;
}
-void zlib_decompress(unsigned char *data_in, unsigned char *cpage_out,
- __u32 srclen, __u32 destlen)
+static int jffs2_zlib_decompress(unsigned char *data_in,
+ unsigned char *cpage_out,
+ uint32_t srclen, uint32_t destlen,
+ void *model)
{
- z_stream strm;
int ret;
+ int wbits = MAX_WBITS;
down(&inflate_sem);
- strm.workspace = inflate_workspace;
- if (Z_OK != zlib_inflateInit(&strm)) {
+ inf_strm.next_in = data_in;
+ inf_strm.avail_in = srclen;
+ inf_strm.total_in = 0;
+
+ inf_strm.next_out = cpage_out;
+ inf_strm.avail_out = destlen;
+ inf_strm.total_out = 0;
+
+ /* If it's deflate, and it's got no preset dictionary, then
+ we can tell zlib to skip the adler32 check. */
+ if (srclen > 2 && !(data_in[1] & PRESET_DICT) &&
+ ((data_in[0] & 0x0f) == Z_DEFLATED) &&
+ !(((data_in[0]<<8) + data_in[1]) % 31)) {
+
+ D2(printk(KERN_DEBUG "inflate skipping adler32\n"));
+ wbits = -((data_in[0] >> 4) + 8);
+ inf_strm.next_in += 2;
+ inf_strm.avail_in -= 2;
+ } else {
+ /* Let this remain D1 for now -- it should never happen */
+ D1(printk(KERN_DEBUG "inflate not skipping adler32\n"));
+ }
+
+
+ if (Z_OK != zlib_inflateInit2(&inf_strm, wbits)) {
printk(KERN_WARNING "inflateInit failed\n");
up(&inflate_sem);
- return;
+ return 1;
}
- strm.next_in = data_in;
- strm.avail_in = srclen;
- strm.total_in = 0;
-
- strm.next_out = cpage_out;
- strm.avail_out = destlen;
- strm.total_out = 0;
-
- while((ret = zlib_inflate(&strm, Z_FINISH)) == Z_OK)
+
+ while((ret = zlib_inflate(&inf_strm, Z_FINISH)) == Z_OK)
;
if (ret != Z_STREAM_END) {
printk(KERN_NOTICE "inflate returned %d\n", ret);
}
- zlib_inflateEnd(&strm);
+ zlib_inflateEnd(&inf_strm);
up(&inflate_sem);
+ return 0;
+}
+
+static struct jffs2_compressor jffs2_zlib_comp = {
+ .priority = JFFS2_ZLIB_PRIORITY,
+ .name = "zlib",
+ .compr = JFFS2_COMPR_ZLIB,
+ .compress = &jffs2_zlib_compress,
+ .decompress = &jffs2_zlib_decompress,
+#ifdef JFFS2_ZLIB_DISABLED
+ .disabled = 1,
+#else
+ .disabled = 0,
+#endif
+};
+
+int __init jffs2_zlib_init(void)
+{
+ int ret;
+
+ ret = alloc_workspaces();
+ if (ret)
+ return ret;
+
+ ret = jffs2_register_compressor(&jffs2_zlib_comp);
+ if (ret)
+ free_workspaces();
+
+ return ret;
+}
+
+void jffs2_zlib_exit(void)
+{
+ jffs2_unregister_compressor(&jffs2_zlib_comp);
+ free_workspaces();
}
diff --git a/linux-2.4.x/fs/jffs2/comprtest.c b/linux-2.4.x/fs/jffs2/comprtest.c
index dbc12cc..f0fb8be 100644
--- a/linux-2.4.x/fs/jffs2/comprtest.c
+++ b/linux-2.4.x/fs/jffs2/comprtest.c
@@ -1,4 +1,4 @@
-/* $Id: comprtest.c,v 1.4 2001/02/21 14:03:20 dwmw2 Exp $ */
+/* $Id: comprtest.c,v 1.6 2005/11/07 11:14:38 gleixner Exp $ */
#include <linux/kernel.h>
#include <linux/string.h>
@@ -265,21 +265,21 @@ static unsigned char testdata[TESTDATA_LEN] = {
static unsigned char comprbuf[TESTDATA_LEN];
static unsigned char decomprbuf[TESTDATA_LEN];
-int jffs2_decompress(unsigned char comprtype, unsigned char *cdata_in,
- unsigned char *data_out, __u32 cdatalen, __u32 datalen);
-unsigned char jffs2_compress(unsigned char *data_in, unsigned char *cpage_out,
- __u32 *datalen, __u32 *cdatalen);
+int jffs2_decompress(unsigned char comprtype, unsigned char *cdata_in,
+ unsigned char *data_out, uint32_t cdatalen, uint32_t datalen);
+unsigned char jffs2_compress(unsigned char *data_in, unsigned char *cpage_out,
+ uint32_t *datalen, uint32_t *cdatalen);
int init_module(void ) {
unsigned char comprtype;
- __u32 c, d;
+ uint32_t c, d;
int ret;
printk("Original data: %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x\n",
- testdata[0],testdata[1],testdata[2],testdata[3],
- testdata[4],testdata[5],testdata[6],testdata[7],
- testdata[8],testdata[9],testdata[10],testdata[11],
- testdata[12],testdata[13],testdata[14],testdata[15]);
+ testdata[0],testdata[1],testdata[2],testdata[3],
+ testdata[4],testdata[5],testdata[6],testdata[7],
+ testdata[8],testdata[9],testdata[10],testdata[11],
+ testdata[12],testdata[13],testdata[14],testdata[15]);
d = TESTDATA_LEN;
c = TESTDATA_LEN;
comprtype = jffs2_compress(testdata, comprbuf, &d, &c);
@@ -287,18 +287,18 @@ int init_module(void ) {
printk("jffs2_compress used compression type %d. Compressed size %d, uncompressed size %d\n",
comprtype, c, d);
printk("Compressed data: %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x\n",
- comprbuf[0],comprbuf[1],comprbuf[2],comprbuf[3],
- comprbuf[4],comprbuf[5],comprbuf[6],comprbuf[7],
- comprbuf[8],comprbuf[9],comprbuf[10],comprbuf[11],
- comprbuf[12],comprbuf[13],comprbuf[14],comprbuf[15]);
+ comprbuf[0],comprbuf[1],comprbuf[2],comprbuf[3],
+ comprbuf[4],comprbuf[5],comprbuf[6],comprbuf[7],
+ comprbuf[8],comprbuf[9],comprbuf[10],comprbuf[11],
+ comprbuf[12],comprbuf[13],comprbuf[14],comprbuf[15]);
ret = jffs2_decompress(comprtype, comprbuf, decomprbuf, c, d);
printk("jffs2_decompress returned %d\n", ret);
printk("Decompressed data: %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x\n",
- decomprbuf[0],decomprbuf[1],decomprbuf[2],decomprbuf[3],
- decomprbuf[4],decomprbuf[5],decomprbuf[6],decomprbuf[7],
- decomprbuf[8],decomprbuf[9],decomprbuf[10],decomprbuf[11],
- decomprbuf[12],decomprbuf[13],decomprbuf[14],decomprbuf[15]);
+ decomprbuf[0],decomprbuf[1],decomprbuf[2],decomprbuf[3],
+ decomprbuf[4],decomprbuf[5],decomprbuf[6],decomprbuf[7],
+ decomprbuf[8],decomprbuf[9],decomprbuf[10],decomprbuf[11],
+ decomprbuf[12],decomprbuf[13],decomprbuf[14],decomprbuf[15]);
if (memcmp(decomprbuf, testdata, d))
printk("Compression and decompression corrupted data\n");
else
diff --git a/linux-2.4.x/fs/jffs2/crc32.c b/linux-2.4.x/fs/jffs2/crc32.c
index b3b6a81..c4694ea 100644
--- a/linux-2.4.x/fs/jffs2/crc32.c
+++ b/linux-2.4.x/fs/jffs2/crc32.c
@@ -37,11 +37,11 @@
* polynomial $edb88320
*/
-/* $Id: crc32.c,v 1.3 2001/02/07 16:45:32 dwmw2 Exp $ */
+/* $Id: crc32.c,v 1.4 2002/01/03 15:20:44 dwmw2 Exp $ */
#include "crc32.h"
-const __u32 crc32_table[256] = {
+const uint32_t crc32_table[256] = {
0x00000000L, 0x77073096L, 0xee0e612cL, 0x990951baL, 0x076dc419L,
0x706af48fL, 0xe963a535L, 0x9e6495a3L, 0x0edb8832L, 0x79dcb8a4L,
0xe0d5e91eL, 0x97d2d988L, 0x09b64c2bL, 0x7eb17cbdL, 0xe7b82d07L,
diff --git a/linux-2.4.x/fs/jffs2/crc32.h b/linux-2.4.x/fs/jffs2/crc32.h
index cd8979f..bd09d6a 100644
--- a/linux-2.4.x/fs/jffs2/crc32.h
+++ b/linux-2.4.x/fs/jffs2/crc32.h
@@ -1,16 +1,16 @@
#ifndef CRC32_H
#define CRC32_H
-/* $Id: crc32.h,v 1.3 2001/02/26 14:44:37 dwmw2 Exp $ */
+/* $Id: crc32.h,v 1.5 2005/11/07 11:14:38 gleixner Exp $ */
#include <linux/types.h>
-extern const __u32 crc32_table[256];
+extern const uint32_t crc32_table[256];
/* Return a 32-bit CRC of the contents of the buffer. */
-static inline __u32
-crc32(__u32 val, const void *ss, int len)
+static inline uint32_t
+crc32(uint32_t val, const void *ss, int len)
{
const unsigned char *s = ss;
while (--len >= 0)
diff --git a/linux-2.4.x/fs/jffs2/debug.c b/linux-2.4.x/fs/jffs2/debug.c
new file mode 100644
index 0000000..41769a2
--- /dev/null
+++ b/linux-2.4.x/fs/jffs2/debug.c
@@ -0,0 +1,695 @@
+/*
+ * JFFS2 -- Journalling Flash File System, Version 2.
+ *
+ * Copyright (C) 2001-2003 Red Hat, Inc.
+ *
+ * Created by David Woodhouse <dwmw2@infradead.org>
+ *
+ * For licensing information, see the file 'LICENCE' in this directory.
+ *
+ * $Id: debug.c,v 1.15 2005/12/11 12:33:16 dedekind Exp $
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/pagemap.h>
+#include <linux/crc32.h>
+#include <linux/jffs2.h>
+#include <linux/mtd/mtd.h>
+#include "nodelist.h"
+#include "debug.h"
+
+#ifdef JFFS2_DBG_SANITY_CHECKS
+
+void
+__jffs2_dbg_acct_sanity_check_nolock(struct jffs2_sb_info *c,
+ struct jffs2_eraseblock *jeb)
+{
+ if (unlikely(jeb && jeb->used_size + jeb->dirty_size +
+ jeb->free_size + jeb->wasted_size +
+ jeb->unchecked_size != c->sector_size)) {
+ JFFS2_ERROR("eeep, space accounting for block at 0x%08x is screwed.\n", jeb->offset);
+ JFFS2_ERROR("free %#08x + dirty %#08x + used %#08x + wasted %#08x + unchecked %#08x != total %#08x.\n",
+ jeb->free_size, jeb->dirty_size, jeb->used_size,
+ jeb->wasted_size, jeb->unchecked_size, c->sector_size);
+ BUG();
+ }
+
+ if (unlikely(c->used_size + c->dirty_size + c->free_size + c->erasing_size + c->bad_size
+ + c->wasted_size + c->unchecked_size != c->flash_size)) {
+ JFFS2_ERROR("eeep, space accounting superblock info is screwed.\n");
+ JFFS2_ERROR("free %#08x + dirty %#08x + used %#08x + erasing %#08x + bad %#08x + wasted %#08x + unchecked %#08x != total %#08x.\n",
+ c->free_size, c->dirty_size, c->used_size, c->erasing_size, c->bad_size,
+ c->wasted_size, c->unchecked_size, c->flash_size);
+ BUG();
+ }
+}
+
+void
+__jffs2_dbg_acct_sanity_check(struct jffs2_sb_info *c,
+ struct jffs2_eraseblock *jeb)
+{
+ spin_lock(&c->erase_completion_lock);
+ jffs2_dbg_acct_sanity_check_nolock(c, jeb);
+ spin_unlock(&c->erase_completion_lock);
+}
+
+#endif /* JFFS2_DBG_SANITY_CHECKS */
+
+#ifdef JFFS2_DBG_PARANOIA_CHECKS
+/*
+ * Check the fragtree.
+ */
+void
+__jffs2_dbg_fragtree_paranoia_check(struct jffs2_inode_info *f)
+{
+ down(&f->sem);
+ __jffs2_dbg_fragtree_paranoia_check_nolock(f);
+ up(&f->sem);
+}
+
+void
+__jffs2_dbg_fragtree_paranoia_check_nolock(struct jffs2_inode_info *f)
+{
+ struct jffs2_node_frag *frag;
+ int bitched = 0;
+
+ for (frag = frag_first(&f->fragtree); frag; frag = frag_next(frag)) {
+ struct jffs2_full_dnode *fn = frag->node;
+
+ if (!fn || !fn->raw)
+ continue;
+
+ if (ref_flags(fn->raw) == REF_PRISTINE) {
+ if (fn->frags > 1) {
+ JFFS2_ERROR("REF_PRISTINE node at 0x%08x had %d frags. Tell dwmw2.\n",
+ ref_offset(fn->raw), fn->frags);
+ bitched = 1;
+ }
+
+ /* A hole node which isn't multi-page should be garbage-collected
+ and merged anyway, so we just check for the frag size here,
+ rather than mucking around with actually reading the node
+ and checking the compression type, which is the real way
+ to tell a hole node. */
+ if (frag->ofs & (PAGE_CACHE_SIZE-1) && frag_prev(frag)
+ && frag_prev(frag)->size < PAGE_CACHE_SIZE && frag_prev(frag)->node) {
+ JFFS2_ERROR("REF_PRISTINE node at 0x%08x had a previous non-hole frag in the same page. Tell dwmw2.\n",
+ ref_offset(fn->raw));
+ bitched = 1;
+ }
+
+ if ((frag->ofs+frag->size) & (PAGE_CACHE_SIZE-1) && frag_next(frag)
+ && frag_next(frag)->size < PAGE_CACHE_SIZE && frag_next(frag)->node) {
+ JFFS2_ERROR("REF_PRISTINE node at 0x%08x (%08x-%08x) had a following non-hole frag in the same page. Tell dwmw2.\n",
+ ref_offset(fn->raw), frag->ofs, frag->ofs+frag->size);
+ bitched = 1;
+ }
+ }
+ }
+
+ if (bitched) {
+ JFFS2_ERROR("fragtree is corrupted.\n");
+ __jffs2_dbg_dump_fragtree_nolock(f);
+ BUG();
+ }
+}
+
+/*
+ * Check if the flash contains all 0xFF before we start writing.
+ */
+void
+__jffs2_dbg_prewrite_paranoia_check(struct jffs2_sb_info *c,
+ uint32_t ofs, int len)
+{
+ int ret, i;
+ unsigned char *buf;
+
+ buf = kmalloc(len, GFP_KERNEL);
+ if (!buf)
+ return;
+
+ if (jffs2_flash_read_safe(c, ofs, len, buf)) {
+ kfree(buf);
+ return;
+ }
+
+ ret = 0;
+ for (i = 0; i < len; i++)
+ if (buf[i] != 0xff)
+ ret = 1;
+
+ if (ret) {
+ JFFS2_ERROR("argh, about to write node to %#08x on flash, but there are data already there. The first corrupted byte is at %#08x offset.\n",
+ ofs, ofs + i);
+ __jffs2_dbg_dump_buffer(buf, len, ofs);
+ kfree(buf);
+ BUG();
+ }
+
+ kfree(buf);
+}
+
+/*
+ * Check the space accounting and node_ref list correctness for the JFFS2 erasable block 'jeb'.
+ */
+void
+__jffs2_dbg_acct_paranoia_check(struct jffs2_sb_info *c,
+ struct jffs2_eraseblock *jeb)
+{
+ spin_lock(&c->erase_completion_lock);
+ __jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
+ spin_unlock(&c->erase_completion_lock);
+}
+
+void
+__jffs2_dbg_acct_paranoia_check_nolock(struct jffs2_sb_info *c,
+ struct jffs2_eraseblock *jeb)
+{
+ uint32_t my_used_size = 0;
+ uint32_t my_unchecked_size = 0;
+ uint32_t my_dirty_size = 0;
+ struct jffs2_raw_node_ref *ref2 = jeb->first_node;
+
+ while (ref2) {
+ uint32_t totlen = ref_totlen(c, jeb, ref2);
+
+ if (ref2->flash_offset < jeb->offset ||
+ ref2->flash_offset > jeb->offset + c->sector_size) {
+ JFFS2_ERROR("node_ref %#08x shouldn't be in block at %#08x.\n",
+ ref_offset(ref2), jeb->offset);
+ goto error;
+
+ }
+ if (ref_flags(ref2) == REF_UNCHECKED)
+ my_unchecked_size += totlen;
+ else if (!ref_obsolete(ref2))
+ my_used_size += totlen;
+ else
+ my_dirty_size += totlen;
+
+ if ((!ref2->next_phys) != (ref2 == jeb->last_node)) {
+ JFFS2_ERROR("node_ref for node at %#08x (mem %p) has next_phys at %#08x (mem %p), last_node is at %#08x (mem %p).\n",
+ ref_offset(ref2), ref2, ref_offset(ref2->next_phys), ref2->next_phys,
+ ref_offset(jeb->last_node), jeb->last_node);
+ goto error;
+ }
+ ref2 = ref2->next_phys;
+ }
+
+ if (my_used_size != jeb->used_size) {
+ JFFS2_ERROR("Calculated used size %#08x != stored used size %#08x.\n",
+ my_used_size, jeb->used_size);
+ goto error;
+ }
+
+ if (my_unchecked_size != jeb->unchecked_size) {
+ JFFS2_ERROR("Calculated unchecked size %#08x != stored unchecked size %#08x.\n",
+ my_unchecked_size, jeb->unchecked_size);
+ goto error;
+ }
+
+#if 0
+ /* This should work when we implement ref->__totlen elemination */
+ if (my_dirty_size != jeb->dirty_size + jeb->wasted_size) {
+ JFFS2_ERROR("Calculated dirty+wasted size %#08x != stored dirty + wasted size %#08x\n",
+ my_dirty_size, jeb->dirty_size + jeb->wasted_size);
+ goto error;
+ }
+
+ if (jeb->free_size == 0
+ && my_used_size + my_unchecked_size + my_dirty_size != c->sector_size) {
+ JFFS2_ERROR("The sum of all nodes in block (%#x) != size of block (%#x)\n",
+ my_used_size + my_unchecked_size + my_dirty_size,
+ c->sector_size);
+ goto error;
+ }
+#endif
+
+ return;
+
+error:
+ __jffs2_dbg_dump_node_refs_nolock(c, jeb);
+ __jffs2_dbg_dump_jeb_nolock(jeb);
+ __jffs2_dbg_dump_block_lists_nolock(c);
+ BUG();
+
+}
+#endif /* JFFS2_DBG_PARANOIA_CHECKS */
+
+#if defined(JFFS2_DBG_DUMPS) || defined(JFFS2_DBG_PARANOIA_CHECKS)
+/*
+ * Dump the node_refs of the 'jeb' JFFS2 eraseblock.
+ */
+void
+__jffs2_dbg_dump_node_refs(struct jffs2_sb_info *c,
+ struct jffs2_eraseblock *jeb)
+{
+ spin_lock(&c->erase_completion_lock);
+ __jffs2_dbg_dump_node_refs_nolock(c, jeb);
+ spin_unlock(&c->erase_completion_lock);
+}
+
+void
+__jffs2_dbg_dump_node_refs_nolock(struct jffs2_sb_info *c,
+ struct jffs2_eraseblock *jeb)
+{
+ struct jffs2_raw_node_ref *ref;
+ int i = 0;
+
+ printk(JFFS2_DBG_MSG_PREFIX " Dump node_refs of the eraseblock %#08x\n", jeb->offset);
+ if (!jeb->first_node) {
+ printk(JFFS2_DBG_MSG_PREFIX " no nodes in the eraseblock %#08x\n", jeb->offset);
+ return;
+ }
+
+ printk(JFFS2_DBG);
+ for (ref = jeb->first_node; ; ref = ref->next_phys) {
+ printk("%#08x(%#x)", ref_offset(ref), ref->__totlen);
+ if (ref->next_phys)
+ printk("->");
+ else
+ break;
+ if (++i == 4) {
+ i = 0;
+ printk("\n" JFFS2_DBG);
+ }
+ }
+ printk("\n");
+}
+
+/*
+ * Dump an eraseblock's space accounting.
+ */
+void
+__jffs2_dbg_dump_jeb(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb)
+{
+ spin_lock(&c->erase_completion_lock);
+ __jffs2_dbg_dump_jeb_nolock(jeb);
+ spin_unlock(&c->erase_completion_lock);
+}
+
+void
+__jffs2_dbg_dump_jeb_nolock(struct jffs2_eraseblock *jeb)
+{
+ if (!jeb)
+ return;
+
+ printk(JFFS2_DBG_MSG_PREFIX " dump space accounting for the eraseblock at %#08x:\n",
+ jeb->offset);
+
+ printk(JFFS2_DBG "used_size: %#08x\n", jeb->used_size);
+ printk(JFFS2_DBG "dirty_size: %#08x\n", jeb->dirty_size);
+ printk(JFFS2_DBG "wasted_size: %#08x\n", jeb->wasted_size);
+ printk(JFFS2_DBG "unchecked_size: %#08x\n", jeb->unchecked_size);
+ printk(JFFS2_DBG "free_size: %#08x\n", jeb->free_size);
+}
+
+void
+__jffs2_dbg_dump_block_lists(struct jffs2_sb_info *c)
+{
+ spin_lock(&c->erase_completion_lock);
+ __jffs2_dbg_dump_block_lists_nolock(c);
+ spin_unlock(&c->erase_completion_lock);
+}
+
+void
+__jffs2_dbg_dump_block_lists_nolock(struct jffs2_sb_info *c)
+{
+ printk(JFFS2_DBG_MSG_PREFIX " dump JFFS2 blocks lists:\n");
+
+ printk(JFFS2_DBG "flash_size: %#08x\n", c->flash_size);
+ printk(JFFS2_DBG "used_size: %#08x\n", c->used_size);
+ printk(JFFS2_DBG "dirty_size: %#08x\n", c->dirty_size);
+ printk(JFFS2_DBG "wasted_size: %#08x\n", c->wasted_size);
+ printk(JFFS2_DBG "unchecked_size: %#08x\n", c->unchecked_size);
+ printk(JFFS2_DBG "free_size: %#08x\n", c->free_size);
+ printk(JFFS2_DBG "erasing_size: %#08x\n", c->erasing_size);
+ printk(JFFS2_DBG "bad_size: %#08x\n", c->bad_size);
+ printk(JFFS2_DBG "sector_size: %#08x\n", c->sector_size);
+ printk(JFFS2_DBG "jffs2_reserved_blocks size: %#08x\n",
+ c->sector_size * c->resv_blocks_write);
+
+ if (c->nextblock)
+ printk(JFFS2_DBG "nextblock: %#08x (used %#08x, dirty %#08x, wasted %#08x, unchecked %#08x, free %#08x)\n",
+ c->nextblock->offset, c->nextblock->used_size,
+ c->nextblock->dirty_size, c->nextblock->wasted_size,
+ c->nextblock->unchecked_size, c->nextblock->free_size);
+ else
+ printk(JFFS2_DBG "nextblock: NULL\n");
+
+ if (c->gcblock)
+ printk(JFFS2_DBG "gcblock: %#08x (used %#08x, dirty %#08x, wasted %#08x, unchecked %#08x, free %#08x)\n",
+ c->gcblock->offset, c->gcblock->used_size, c->gcblock->dirty_size,
+ c->gcblock->wasted_size, c->gcblock->unchecked_size, c->gcblock->free_size);
+ else
+ printk(JFFS2_DBG "gcblock: NULL\n");
+
+ if (list_empty(&c->clean_list)) {
+ printk(JFFS2_DBG "clean_list: empty\n");
+ } else {
+ struct list_head *this;
+ int numblocks = 0;
+ uint32_t dirty = 0;
+
+ list_for_each(this, &c->clean_list) {
+ struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
+ numblocks ++;
+ dirty += jeb->wasted_size;
+ if (!(jeb->used_size == 0 && jeb->dirty_size == 0 && jeb->wasted_size == 0)) {
+ printk(JFFS2_DBG "clean_list: %#08x (used %#08x, dirty %#08x, wasted %#08x, unchecked %#08x, free %#08x)\n",
+ jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size,
+ jeb->unchecked_size, jeb->free_size);
+ }
+ }
+
+ printk (JFFS2_DBG "Contains %d blocks with total wasted size %u, average wasted size: %u\n",
+ numblocks, dirty, dirty / numblocks);
+ }
+
+ if (list_empty(&c->very_dirty_list)) {
+ printk(JFFS2_DBG "very_dirty_list: empty\n");
+ } else {
+ struct list_head *this;
+ int numblocks = 0;
+ uint32_t dirty = 0;
+
+ list_for_each(this, &c->very_dirty_list) {
+ struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
+
+ numblocks ++;
+ dirty += jeb->dirty_size;
+ if (!(jeb->used_size == 0 && jeb->dirty_size == 0 && jeb->wasted_size == 0)) {
+ printk(JFFS2_DBG "very_dirty_list: %#08x (used %#08x, dirty %#08x, wasted %#08x, unchecked %#08x, free %#08x)\n",
+ jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size,
+ jeb->unchecked_size, jeb->free_size);
+ }
+ }
+
+ printk (JFFS2_DBG "Contains %d blocks with total dirty size %u, average dirty size: %u\n",
+ numblocks, dirty, dirty / numblocks);
+ }
+
+ if (list_empty(&c->dirty_list)) {
+ printk(JFFS2_DBG "dirty_list: empty\n");
+ } else {
+ struct list_head *this;
+ int numblocks = 0;
+ uint32_t dirty = 0;
+
+ list_for_each(this, &c->dirty_list) {
+ struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
+
+ numblocks ++;
+ dirty += jeb->dirty_size;
+ if (!(jeb->used_size == 0 && jeb->dirty_size == 0 && jeb->wasted_size == 0)) {
+ printk(JFFS2_DBG "dirty_list: %#08x (used %#08x, dirty %#08x, wasted %#08x, unchecked %#08x, free %#08x)\n",
+ jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size,
+ jeb->unchecked_size, jeb->free_size);
+ }
+ }
+
+ printk (JFFS2_DBG "contains %d blocks with total dirty size %u, average dirty size: %u\n",
+ numblocks, dirty, dirty / numblocks);
+ }
+
+ if (list_empty(&c->erasable_list)) {
+ printk(JFFS2_DBG "erasable_list: empty\n");
+ } else {
+ struct list_head *this;
+
+ list_for_each(this, &c->erasable_list) {
+ struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
+
+ if (!(jeb->used_size == 0 && jeb->dirty_size == 0 && jeb->wasted_size == 0)) {
+ printk(JFFS2_DBG "erasable_list: %#08x (used %#08x, dirty %#08x, wasted %#08x, unchecked %#08x, free %#08x)\n",
+ jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size,
+ jeb->unchecked_size, jeb->free_size);
+ }
+ }
+ }
+
+ if (list_empty(&c->erasing_list)) {
+ printk(JFFS2_DBG "erasing_list: empty\n");
+ } else {
+ struct list_head *this;
+
+ list_for_each(this, &c->erasing_list) {
+ struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
+
+ if (!(jeb->used_size == 0 && jeb->dirty_size == 0 && jeb->wasted_size == 0)) {
+ printk(JFFS2_DBG "erasing_list: %#08x (used %#08x, dirty %#08x, wasted %#08x, unchecked %#08x, free %#08x)\n",
+ jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size,
+ jeb->unchecked_size, jeb->free_size);
+ }
+ }
+ }
+
+ if (list_empty(&c->erase_pending_list)) {
+ printk(JFFS2_DBG "erase_pending_list: empty\n");
+ } else {
+ struct list_head *this;
+
+ list_for_each(this, &c->erase_pending_list) {
+ struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
+
+ if (!(jeb->used_size == 0 && jeb->dirty_size == 0 && jeb->wasted_size == 0)) {
+ printk(JFFS2_DBG "erase_pending_list: %#08x (used %#08x, dirty %#08x, wasted %#08x, unchecked %#08x, free %#08x)\n",
+ jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size,
+ jeb->unchecked_size, jeb->free_size);
+ }
+ }
+ }
+
+ if (list_empty(&c->erasable_pending_wbuf_list)) {
+ printk(JFFS2_DBG "erasable_pending_wbuf_list: empty\n");
+ } else {
+ struct list_head *this;
+
+ list_for_each(this, &c->erasable_pending_wbuf_list) {
+ struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
+
+ if (!(jeb->used_size == 0 && jeb->dirty_size == 0 && jeb->wasted_size == 0)) {
+ printk(JFFS2_DBG "erasable_pending_wbuf_list: %#08x (used %#08x, dirty %#08x, wasted %#08x, unchecked %#08x, free %#08x)\n",
+ jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size,
+ jeb->unchecked_size, jeb->free_size);
+ }
+ }
+ }
+
+ if (list_empty(&c->free_list)) {
+ printk(JFFS2_DBG "free_list: empty\n");
+ } else {
+ struct list_head *this;
+
+ list_for_each(this, &c->free_list) {
+ struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
+
+ if (!(jeb->used_size == 0 && jeb->dirty_size == 0 && jeb->wasted_size == 0)) {
+ printk(JFFS2_DBG "free_list: %#08x (used %#08x, dirty %#08x, wasted %#08x, unchecked %#08x, free %#08x)\n",
+ jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size,
+ jeb->unchecked_size, jeb->free_size);
+ }
+ }
+ }
+
+ if (list_empty(&c->bad_list)) {
+ printk(JFFS2_DBG "bad_list: empty\n");
+ } else {
+ struct list_head *this;
+
+ list_for_each(this, &c->bad_list) {
+ struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
+
+ if (!(jeb->used_size == 0 && jeb->dirty_size == 0 && jeb->wasted_size == 0)) {
+ printk(JFFS2_DBG "bad_list: %#08x (used %#08x, dirty %#08x, wasted %#08x, unchecked %#08x, free %#08x)\n",
+ jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size,
+ jeb->unchecked_size, jeb->free_size);
+ }
+ }
+ }
+
+ if (list_empty(&c->bad_used_list)) {
+ printk(JFFS2_DBG "bad_used_list: empty\n");
+ } else {
+ struct list_head *this;
+
+ list_for_each(this, &c->bad_used_list) {
+ struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
+
+ if (!(jeb->used_size == 0 && jeb->dirty_size == 0 && jeb->wasted_size == 0)) {
+ printk(JFFS2_DBG "bad_used_list: %#08x (used %#08x, dirty %#08x, wasted %#08x, unchecked %#08x, free %#08x)\n",
+ jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size,
+ jeb->unchecked_size, jeb->free_size);
+ }
+ }
+ }
+}
+
+void
+__jffs2_dbg_dump_fragtree(struct jffs2_inode_info *f)
+{
+ down(&f->sem);
+ jffs2_dbg_dump_fragtree_nolock(f);
+ up(&f->sem);
+}
+
+void
+__jffs2_dbg_dump_fragtree_nolock(struct jffs2_inode_info *f)
+{
+ struct jffs2_node_frag *this = frag_first(&f->fragtree);
+ uint32_t lastofs = 0;
+ int buggy = 0;
+
+ printk(JFFS2_DBG_MSG_PREFIX " dump fragtree of ino #%u\n", f->inocache->ino);
+ while(this) {
+ if (this->node)
+ printk(JFFS2_DBG "frag %#04x-%#04x: %#08x(%d) on flash (*%p), left (%p), right (%p), parent (%p)\n",
+ this->ofs, this->ofs+this->size, ref_offset(this->node->raw),
+ ref_flags(this->node->raw), this, frag_left(this), frag_right(this),
+ frag_parent(this));
+ else
+ printk(JFFS2_DBG "frag %#04x-%#04x: hole (*%p). left (%p), right (%p), parent (%p)\n",
+ this->ofs, this->ofs+this->size, this, frag_left(this),
+ frag_right(this), frag_parent(this));
+ if (this->ofs != lastofs)
+ buggy = 1;
+ lastofs = this->ofs + this->size;
+ this = frag_next(this);
+ }
+
+ if (f->metadata)
+ printk(JFFS2_DBG "metadata at 0x%08x\n", ref_offset(f->metadata->raw));
+
+ if (buggy) {
+ JFFS2_ERROR("frag tree got a hole in it.\n");
+ BUG();
+ }
+}
+
+#define JFFS2_BUFDUMP_BYTES_PER_LINE 32
+void
+__jffs2_dbg_dump_buffer(unsigned char *buf, int len, uint32_t offs)
+{
+ int skip;
+ int i;
+
+ printk(JFFS2_DBG_MSG_PREFIX " dump from offset %#08x to offset %#08x (%x bytes).\n",
+ offs, offs + len, len);
+ i = skip = offs % JFFS2_BUFDUMP_BYTES_PER_LINE;
+ offs = offs & ~(JFFS2_BUFDUMP_BYTES_PER_LINE - 1);
+
+ if (skip != 0)
+ printk(JFFS2_DBG "%#08x: ", offs);
+
+ while (skip--)
+ printk(" ");
+
+ while (i < len) {
+ if ((i % JFFS2_BUFDUMP_BYTES_PER_LINE) == 0 && i != len -1) {
+ if (i != 0)
+ printk("\n");
+ offs += JFFS2_BUFDUMP_BYTES_PER_LINE;
+ printk(JFFS2_DBG "%0#8x: ", offs);
+ }
+
+ printk("%02x ", buf[i]);
+
+ i += 1;
+ }
+
+ printk("\n");
+}
+
+/*
+ * Dump a JFFS2 node.
+ */
+void
+__jffs2_dbg_dump_node(struct jffs2_sb_info *c, uint32_t ofs)
+{
+ union jffs2_node_union node;
+ int len = sizeof(union jffs2_node_union);
+ uint32_t crc;
+
+ printk(JFFS2_DBG_MSG_PREFIX " dump node at offset %#08x.\n", ofs);
+
+ if (jffs2_flash_read_safe(c, ofs, len, (unsigned char *)&node))
+ return;
+
+ printk(JFFS2_DBG "magic:\t%#04x\n", je16_to_cpu(node.u.magic));
+ printk(JFFS2_DBG "nodetype:\t%#04x\n", je16_to_cpu(node.u.nodetype));
+ printk(JFFS2_DBG "totlen:\t%#08x\n", je32_to_cpu(node.u.totlen));
+ printk(JFFS2_DBG "hdr_crc:\t%#08x\n", je32_to_cpu(node.u.hdr_crc));
+
+ crc = crc32(0, &node.u, sizeof(node.u) - 4);
+ if (crc != je32_to_cpu(node.u.hdr_crc)) {
+ JFFS2_ERROR("wrong common header CRC.\n");
+ return;
+ }
+
+ if (je16_to_cpu(node.u.magic) != JFFS2_MAGIC_BITMASK &&
+ je16_to_cpu(node.u.magic) != JFFS2_OLD_MAGIC_BITMASK)
+ {
+ JFFS2_ERROR("wrong node magic: %#04x instead of %#04x.\n",
+ je16_to_cpu(node.u.magic), JFFS2_MAGIC_BITMASK);
+ return;
+ }
+
+ switch(je16_to_cpu(node.u.nodetype)) {
+
+ case JFFS2_NODETYPE_INODE:
+
+ printk(JFFS2_DBG "the node is inode node\n");
+ printk(JFFS2_DBG "ino:\t%#08x\n", je32_to_cpu(node.i.ino));
+ printk(JFFS2_DBG "version:\t%#08x\n", je32_to_cpu(node.i.version));
+ printk(JFFS2_DBG "mode:\t%#08x\n", node.i.mode.m);
+ printk(JFFS2_DBG "uid:\t%#04x\n", je16_to_cpu(node.i.uid));
+ printk(JFFS2_DBG "gid:\t%#04x\n", je16_to_cpu(node.i.gid));
+ printk(JFFS2_DBG "isize:\t%#08x\n", je32_to_cpu(node.i.isize));
+ printk(JFFS2_DBG "atime:\t%#08x\n", je32_to_cpu(node.i.atime));
+ printk(JFFS2_DBG "mtime:\t%#08x\n", je32_to_cpu(node.i.mtime));
+ printk(JFFS2_DBG "ctime:\t%#08x\n", je32_to_cpu(node.i.ctime));
+ printk(JFFS2_DBG "offset:\t%#08x\n", je32_to_cpu(node.i.offset));
+ printk(JFFS2_DBG "csize:\t%#08x\n", je32_to_cpu(node.i.csize));
+ printk(JFFS2_DBG "dsize:\t%#08x\n", je32_to_cpu(node.i.dsize));
+ printk(JFFS2_DBG "compr:\t%#02x\n", node.i.compr);
+ printk(JFFS2_DBG "usercompr:\t%#02x\n", node.i.usercompr);
+ printk(JFFS2_DBG "flags:\t%#04x\n", je16_to_cpu(node.i.flags));
+ printk(JFFS2_DBG "data_crc:\t%#08x\n", je32_to_cpu(node.i.data_crc));
+ printk(JFFS2_DBG "node_crc:\t%#08x\n", je32_to_cpu(node.i.node_crc));
+
+ crc = crc32(0, &node.i, sizeof(node.i) - 8);
+ if (crc != je32_to_cpu(node.i.node_crc)) {
+ JFFS2_ERROR("wrong node header CRC.\n");
+ return;
+ }
+ break;
+
+ case JFFS2_NODETYPE_DIRENT:
+
+ printk(JFFS2_DBG "the node is dirent node\n");
+ printk(JFFS2_DBG "pino:\t%#08x\n", je32_to_cpu(node.d.pino));
+ printk(JFFS2_DBG "version:\t%#08x\n", je32_to_cpu(node.d.version));
+ printk(JFFS2_DBG "ino:\t%#08x\n", je32_to_cpu(node.d.ino));
+ printk(JFFS2_DBG "mctime:\t%#08x\n", je32_to_cpu(node.d.mctime));
+ printk(JFFS2_DBG "nsize:\t%#02x\n", node.d.nsize);
+ printk(JFFS2_DBG "type:\t%#02x\n", node.d.type);
+ printk(JFFS2_DBG "node_crc:\t%#08x\n", je32_to_cpu(node.d.node_crc));
+ printk(JFFS2_DBG "name_crc:\t%#08x\n", je32_to_cpu(node.d.name_crc));
+
+ node.d.name[node.d.nsize] = '\0';
+ printk(JFFS2_DBG "name:\t\"%s\"\n", node.d.name);
+
+ crc = crc32(0, &node.d, sizeof(node.d) - 8);
+ if (crc != je32_to_cpu(node.d.node_crc)) {
+ JFFS2_ERROR("wrong node header CRC.\n");
+ return;
+ }
+ break;
+
+ default:
+ printk(JFFS2_DBG "node type is unknown\n");
+ break;
+ }
+}
+#endif /* JFFS2_DBG_DUMPS || JFFS2_DBG_PARANOIA_CHECKS */
diff --git a/linux-2.4.x/fs/jffs2/debug.h b/linux-2.4.x/fs/jffs2/debug.h
new file mode 100644
index 0000000..4507482
--- /dev/null
+++ b/linux-2.4.x/fs/jffs2/debug.h
@@ -0,0 +1,279 @@
+/*
+ * JFFS2 -- Journalling Flash File System, Version 2.
+ *
+ * Copyright (C) 2001-2003 Red Hat, Inc.
+ *
+ * Created by David Woodhouse <dwmw2@infradead.org>
+ *
+ * For licensing information, see the file 'LICENCE' in this directory.
+ *
+ * $Id: debug.h,v 1.22 2005/11/28 22:41:38 gleixner Exp $
+ *
+ */
+#ifndef _JFFS2_DEBUG_H_
+#define _JFFS2_DEBUG_H_
+
+#include <linux/config.h>
+
+#ifndef CONFIG_JFFS2_FS_DEBUG
+#define CONFIG_JFFS2_FS_DEBUG 0
+#endif
+
+#if CONFIG_JFFS2_FS_DEBUG > 0
+/* Enable "paranoia" checks and dumps */
+#define JFFS2_DBG_PARANOIA_CHECKS
+#define JFFS2_DBG_DUMPS
+
+/*
+ * By defining/undefining the below macros one may select debugging messages
+ * fro specific JFFS2 subsystems.
+ */
+#define JFFS2_DBG_READINODE_MESSAGES
+#define JFFS2_DBG_FRAGTREE_MESSAGES
+#define JFFS2_DBG_DENTLIST_MESSAGES
+#define JFFS2_DBG_NODEREF_MESSAGES
+#define JFFS2_DBG_INOCACHE_MESSAGES
+#define JFFS2_DBG_SUMMARY_MESSAGES
+#define JFFS2_DBG_FSBUILD_MESSAGES
+#endif
+
+#if CONFIG_JFFS2_FS_DEBUG > 1
+#define JFFS2_DBG_FRAGTREE2_MESSAGES
+#define JFFS2_DBG_MEMALLOC_MESSAGES
+#endif
+
+/* Sanity checks are supposed to be light-weight and enabled by default */
+#define JFFS2_DBG_SANITY_CHECKS
+
+/*
+ * Dx() are mainly used for debugging messages, they must go away and be
+ * superseded by nicer dbg_xxx() macros...
+ */
+#if CONFIG_JFFS2_FS_DEBUG > 0
+#define D1(x) x
+#else
+#define D1(x)
+#endif
+
+#if CONFIG_JFFS2_FS_DEBUG > 1
+#define D2(x) x
+#else
+#define D2(x)
+#endif
+
+/* The prefixes of JFFS2 messages */
+#define JFFS2_DBG_PREFIX "[JFFS2 DBG]"
+#define JFFS2_ERR_PREFIX "JFFS2 error:"
+#define JFFS2_WARN_PREFIX "JFFS2 warning:"
+#define JFFS2_NOTICE_PREFIX "JFFS2 notice:"
+
+#define JFFS2_ERR KERN_ERR
+#define JFFS2_WARN KERN_WARNING
+#define JFFS2_NOT KERN_NOTICE
+#define JFFS2_DBG KERN_DEBUG
+
+#define JFFS2_DBG_MSG_PREFIX JFFS2_DBG JFFS2_DBG_PREFIX
+#define JFFS2_ERR_MSG_PREFIX JFFS2_ERR JFFS2_ERR_PREFIX
+#define JFFS2_WARN_MSG_PREFIX JFFS2_WARN JFFS2_WARN_PREFIX
+#define JFFS2_NOTICE_MSG_PREFIX JFFS2_NOT JFFS2_NOTICE_PREFIX
+
+/* JFFS2 message macros */
+#define JFFS2_ERROR(fmt, ...) \
+ do { \
+ printk(JFFS2_ERR_MSG_PREFIX \
+ " (%d) %s: " fmt, current->pid, \
+ __FUNCTION__ , ##__VA_ARGS__); \
+ } while(0)
+
+#define JFFS2_WARNING(fmt, ...) \
+ do { \
+ printk(JFFS2_WARN_MSG_PREFIX \
+ " (%d) %s: " fmt, current->pid, \
+ __FUNCTION__ , ##__VA_ARGS__); \
+ } while(0)
+
+#define JFFS2_NOTICE(fmt, ...) \
+ do { \
+ printk(JFFS2_NOTICE_MSG_PREFIX \
+ " (%d) %s: " fmt, current->pid, \
+ __FUNCTION__ , ##__VA_ARGS__); \
+ } while(0)
+
+#define JFFS2_DEBUG(fmt, ...) \
+ do { \
+ printk(JFFS2_DBG_MSG_PREFIX \
+ " (%d) %s: " fmt, current->pid, \
+ __FUNCTION__ , ##__VA_ARGS__); \
+ } while(0)
+
+/*
+ * We split our debugging messages on several parts, depending on the JFFS2
+ * subsystem the message belongs to.
+ */
+/* Read inode debugging messages */
+#ifdef JFFS2_DBG_READINODE_MESSAGES
+#define dbg_readinode(fmt, ...) JFFS2_DEBUG(fmt, ##__VA_ARGS__)
+#else
+#define dbg_readinode(fmt, ...)
+#endif
+
+/* Fragtree build debugging messages */
+#ifdef JFFS2_DBG_FRAGTREE_MESSAGES
+#define dbg_fragtree(fmt, ...) JFFS2_DEBUG(fmt, ##__VA_ARGS__)
+#else
+#define dbg_fragtree(fmt, ...)
+#endif
+#ifdef JFFS2_DBG_FRAGTREE2_MESSAGES
+#define dbg_fragtree2(fmt, ...) JFFS2_DEBUG(fmt, ##__VA_ARGS__)
+#else
+#define dbg_fragtree2(fmt, ...)
+#endif
+
+/* Directory entry list manilulation debugging messages */
+#ifdef JFFS2_DBG_DENTLIST_MESSAGES
+#define dbg_dentlist(fmt, ...) JFFS2_DEBUG(fmt, ##__VA_ARGS__)
+#else
+#define dbg_dentlist(fmt, ...)
+#endif
+
+/* Print the messages about manipulating node_refs */
+#ifdef JFFS2_DBG_NODEREF_MESSAGES
+#define dbg_noderef(fmt, ...) JFFS2_DEBUG(fmt, ##__VA_ARGS__)
+#else
+#define dbg_noderef(fmt, ...)
+#endif
+
+/* Manipulations with the list of inodes (JFFS2 inocache) */
+#ifdef JFFS2_DBG_INOCACHE_MESSAGES
+#define dbg_inocache(fmt, ...) JFFS2_DEBUG(fmt, ##__VA_ARGS__)
+#else
+#define dbg_inocache(fmt, ...)
+#endif
+
+/* Summary debugging messages */
+#ifdef JFFS2_DBG_SUMMARY_MESSAGES
+#define dbg_summary(fmt, ...) JFFS2_DEBUG(fmt, ##__VA_ARGS__)
+#else
+#define dbg_summary(fmt, ...)
+#endif
+
+/* File system build messages */
+#ifdef JFFS2_DBG_FSBUILD_MESSAGES
+#define dbg_fsbuild(fmt, ...) JFFS2_DEBUG(fmt, ##__VA_ARGS__)
+#else
+#define dbg_fsbuild(fmt, ...)
+#endif
+
+/* Watch the object allocations */
+#ifdef JFFS2_DBG_MEMALLOC_MESSAGES
+#define dbg_memalloc(fmt, ...) JFFS2_DEBUG(fmt, ##__VA_ARGS__)
+#else
+#define dbg_memalloc(fmt, ...)
+#endif
+
+
+/* "Sanity" checks */
+void
+__jffs2_dbg_acct_sanity_check_nolock(struct jffs2_sb_info *c,
+ struct jffs2_eraseblock *jeb);
+void
+__jffs2_dbg_acct_sanity_check(struct jffs2_sb_info *c,
+ struct jffs2_eraseblock *jeb);
+
+/* "Paranoia" checks */
+void
+__jffs2_dbg_fragtree_paranoia_check(struct jffs2_inode_info *f);
+void
+__jffs2_dbg_fragtree_paranoia_check_nolock(struct jffs2_inode_info *f);
+void
+__jffs2_dbg_acct_paranoia_check(struct jffs2_sb_info *c,
+ struct jffs2_eraseblock *jeb);
+void
+__jffs2_dbg_acct_paranoia_check_nolock(struct jffs2_sb_info *c,
+ struct jffs2_eraseblock *jeb);
+void
+__jffs2_dbg_prewrite_paranoia_check(struct jffs2_sb_info *c,
+ uint32_t ofs, int len);
+
+/* "Dump" functions */
+void
+__jffs2_dbg_dump_jeb(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb);
+void
+__jffs2_dbg_dump_jeb_nolock(struct jffs2_eraseblock *jeb);
+void
+__jffs2_dbg_dump_block_lists(struct jffs2_sb_info *c);
+void
+__jffs2_dbg_dump_block_lists_nolock(struct jffs2_sb_info *c);
+void
+__jffs2_dbg_dump_node_refs(struct jffs2_sb_info *c,
+ struct jffs2_eraseblock *jeb);
+void
+__jffs2_dbg_dump_node_refs_nolock(struct jffs2_sb_info *c,
+ struct jffs2_eraseblock *jeb);
+void
+__jffs2_dbg_dump_fragtree(struct jffs2_inode_info *f);
+void
+__jffs2_dbg_dump_fragtree_nolock(struct jffs2_inode_info *f);
+void
+__jffs2_dbg_dump_buffer(unsigned char *buf, int len, uint32_t offs);
+void
+__jffs2_dbg_dump_node(struct jffs2_sb_info *c, uint32_t ofs);
+
+#ifdef JFFS2_DBG_PARANOIA_CHECKS
+#define jffs2_dbg_fragtree_paranoia_check(f) \
+ __jffs2_dbg_fragtree_paranoia_check(f)
+#define jffs2_dbg_fragtree_paranoia_check_nolock(f) \
+ __jffs2_dbg_fragtree_paranoia_check_nolock(f)
+#define jffs2_dbg_acct_paranoia_check(c, jeb) \
+ __jffs2_dbg_acct_paranoia_check(c,jeb)
+#define jffs2_dbg_acct_paranoia_check_nolock(c, jeb) \
+ __jffs2_dbg_acct_paranoia_check_nolock(c,jeb)
+#define jffs2_dbg_prewrite_paranoia_check(c, ofs, len) \
+ __jffs2_dbg_prewrite_paranoia_check(c, ofs, len)
+#else
+#define jffs2_dbg_fragtree_paranoia_check(f)
+#define jffs2_dbg_fragtree_paranoia_check_nolock(f)
+#define jffs2_dbg_acct_paranoia_check(c, jeb)
+#define jffs2_dbg_acct_paranoia_check_nolock(c, jeb)
+#define jffs2_dbg_prewrite_paranoia_check(c, ofs, len)
+#endif /* !JFFS2_PARANOIA_CHECKS */
+
+#ifdef JFFS2_DBG_DUMPS
+#define jffs2_dbg_dump_jeb(c, jeb) \
+ __jffs2_dbg_dump_jeb(c, jeb);
+#define jffs2_dbg_dump_jeb_nolock(jeb) \
+ __jffs2_dbg_dump_jeb_nolock(jeb);
+#define jffs2_dbg_dump_block_lists(c) \
+ __jffs2_dbg_dump_block_lists(c)
+#define jffs2_dbg_dump_block_lists_nolock(c) \
+ __jffs2_dbg_dump_block_lists_nolock(c)
+#define jffs2_dbg_dump_fragtree(f) \
+ __jffs2_dbg_dump_fragtree(f);
+#define jffs2_dbg_dump_fragtree_nolock(f) \
+ __jffs2_dbg_dump_fragtree_nolock(f);
+#define jffs2_dbg_dump_buffer(buf, len, offs) \
+ __jffs2_dbg_dump_buffer(*buf, len, offs);
+#define jffs2_dbg_dump_node(c, ofs) \
+ __jffs2_dbg_dump_node(c, ofs);
+#else
+#define jffs2_dbg_dump_jeb(c, jeb)
+#define jffs2_dbg_dump_jeb_nolock(jeb)
+#define jffs2_dbg_dump_block_lists(c)
+#define jffs2_dbg_dump_block_lists_nolock(c)
+#define jffs2_dbg_dump_fragtree(f)
+#define jffs2_dbg_dump_fragtree_nolock(f)
+#define jffs2_dbg_dump_buffer(buf, len, offs)
+#define jffs2_dbg_dump_node(c, ofs)
+#endif /* !JFFS2_DBG_DUMPS */
+
+#ifdef JFFS2_DBG_SANITY_CHECKS
+#define jffs2_dbg_acct_sanity_check(c, jeb) \
+ __jffs2_dbg_acct_sanity_check(c, jeb)
+#define jffs2_dbg_acct_sanity_check_nolock(c, jeb) \
+ __jffs2_dbg_acct_sanity_check_nolock(c, jeb)
+#else
+#define jffs2_dbg_acct_sanity_check(c, jeb)
+#define jffs2_dbg_acct_sanity_check_nolock(c, jeb)
+#endif /* !JFFS2_DBG_SANITY_CHECKS */
+
+#endif /* _JFFS2_DEBUG_H_ */
diff --git a/linux-2.4.x/fs/jffs2/dir.c b/linux-2.4.x/fs/jffs2/dir.c
index acc2cf1..7fa48f3 100644
--- a/linux-2.4.x/fs/jffs2/dir.c
+++ b/linux-2.4.x/fs/jffs2/dir.c
@@ -1,103 +1,86 @@
/*
* JFFS2 -- Journalling Flash File System, Version 2.
*
- * Copyright (C) 2001 Red Hat, Inc.
+ * Copyright (C) 2001-2003 Red Hat, Inc.
*
- * Created by David Woodhouse <dwmw2@cambridge.redhat.com>
+ * Created by David Woodhouse <dwmw2@infradead.org>
*
- * The original JFFS, from which the design for JFFS2 was derived,
- * was designed and implemented by Axis Communications AB.
+ * For licensing information, see the file 'LICENCE' in this directory.
*
- * The contents of this file are subject to the Red Hat eCos Public
- * License Version 1.1 (the "Licence"); you may not use this file
- * except in compliance with the Licence. You may obtain a copy of
- * the Licence at http://www.redhat.com/
- *
- * Software distributed under the Licence is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied.
- * See the Licence for the specific language governing rights and
- * limitations under the Licence.
- *
- * The Original Code is JFFS2 - Journalling Flash File System, version 2
- *
- * Alternatively, the contents of this file may be used under the
- * terms of the GNU General Public License version 2 (the "GPL"), in
- * which case the provisions of the GPL are applicable instead of the
- * above. If you wish to allow the use of your version of this file
- * only under the terms of the GPL and not to allow others to use your
- * version of this file under the RHEPL, indicate your decision by
- * deleting the provisions above and replace them with the notice and
- * other provisions required by the GPL. If you do not delete the
- * provisions above, a recipient may use your version of this file
- * under either the RHEPL or the GPL.
- *
- * $Id: dir.c,v 1.45.2.7 2002/08/26 15:30:18 dwmw2 Exp $
+ * $Id: dir.c,v 1.91 2006/04/18 00:09:35 rpurdie Exp $
*
*/
#include <linux/kernel.h>
#include <linux/slab.h>
+#include <linux/sched.h>
#include <linux/fs.h>
-#include <linux/mtd/compatmac.h> /* For completion */
+#include <linux/crc32.h>
#include <linux/jffs2.h>
#include <linux/jffs2_fs_i.h>
#include <linux/jffs2_fs_sb.h>
+#include <linux/time.h>
#include "nodelist.h"
-#include "crc32.h"
static int jffs2_readdir (struct file *, void *, filldir_t);
-static int jffs2_create (struct inode *,struct dentry *,int);
-static struct dentry *jffs2_lookup (struct inode *,struct dentry *);
+static int jffs2_create (struct inode *,struct dentry *,int,
+ struct nameidata *);
+static struct dentry *jffs2_lookup (struct inode *,struct dentry *,
+ struct nameidata *);
static int jffs2_link (struct dentry *,struct inode *,struct dentry *);
static int jffs2_unlink (struct inode *,struct dentry *);
static int jffs2_symlink (struct inode *,struct dentry *,const char *);
static int jffs2_mkdir (struct inode *,struct dentry *,int);
static int jffs2_rmdir (struct inode *,struct dentry *);
-static int jffs2_mknod (struct inode *,struct dentry *,int,int);
+static int jffs2_mknod (struct inode *,struct dentry *,int,dev_t);
static int jffs2_rename (struct inode *, struct dentry *,
struct inode *, struct dentry *);
struct file_operations jffs2_dir_operations =
{
- read: generic_read_dir,
- readdir: jffs2_readdir,
- ioctl: jffs2_ioctl,
- fsync: jffs2_null_fsync
+ .read = generic_read_dir,
+ .readdir = jffs2_readdir,
+ .ioctl = jffs2_ioctl,
+ .fsync = jffs2_fsync
};
struct inode_operations jffs2_dir_inode_operations =
{
- create: jffs2_create,
- lookup: jffs2_lookup,
- link: jffs2_link,
- unlink: jffs2_unlink,
- symlink: jffs2_symlink,
- mkdir: jffs2_mkdir,
- rmdir: jffs2_rmdir,
- mknod: jffs2_mknod,
- rename: jffs2_rename,
- setattr: jffs2_setattr,
+ .create = jffs2_create,
+ .lookup = jffs2_lookup,
+ .link = jffs2_link,
+ .unlink = jffs2_unlink,
+ .symlink = jffs2_symlink,
+ .mkdir = jffs2_mkdir,
+ .rmdir = jffs2_rmdir,
+ .mknod = jffs2_mknod,
+ .rename = jffs2_rename,
+ .setattr = jffs2_setattr,
};
/***********************************************************************/
/* We keep the dirent list sorted in increasing order of name hash,
- and we use the same hash function as the dentries. Makes this
+ and we use the same hash function as the dentries. Makes this
nice and simple
*/
-static struct dentry *jffs2_lookup(struct inode *dir_i, struct dentry *target)
+static struct dentry *jffs2_lookup(struct inode *dir_i, struct dentry *target,
+ struct nameidata *nd)
{
struct jffs2_inode_info *dir_f;
struct jffs2_sb_info *c;
struct jffs2_full_dirent *fd = NULL, *fd_list;
- __u32 ino = 0;
+ uint32_t ino = 0;
struct inode *inode = NULL;
D1(printk(KERN_DEBUG "jffs2_lookup()\n"));
+ if (target->d_name.len > JFFS2_MAX_NAME_LEN)
+ return ERR_PTR(-ENAMETOOLONG);
+
dir_f = JFFS2_INODE_INFO(dir_i);
c = JFFS2_SB_INFO(dir_i->i_sb);
@@ -105,7 +88,7 @@ static struct dentry *jffs2_lookup(struct inode *dir_i, struct dentry *target)
/* NB: The 2.2 backport will need to explicitly check for '.' and '..' here */
for (fd_list = dir_f->dents; fd_list && fd_list->nhash <= target->d_name.hash; fd_list = fd_list->next) {
- if (fd_list->nhash == target->d_name.hash &&
+ if (fd_list->nhash == target->d_name.hash &&
(!fd || fd_list->version > fd->version) &&
strlen(fd_list->name) == target->d_name.len &&
!strncmp(fd_list->name, target->d_name.name, target->d_name.len)) {
@@ -153,8 +136,9 @@ static int jffs2_readdir(struct file *filp, void *dirent, filldir_t filldir)
offset++;
}
if (offset == 1) {
- D1(printk(KERN_DEBUG "Dirent 1: \"..\", ino #%lu\n", filp->f_dentry->d_parent->d_inode->i_ino));
- if (filldir(dirent, "..", 2, 1, filp->f_dentry->d_parent->d_inode->i_ino, DT_DIR) < 0)
+ unsigned long pino = parent_ino(filp->f_dentry);
+ D1(printk(KERN_DEBUG "Dirent 1: \"..\", ino #%lu\n", pino));
+ if (filldir(dirent, "..", 2, 1, pino, DT_DIR) < 0)
goto out;
offset++;
}
@@ -166,7 +150,7 @@ static int jffs2_readdir(struct file *filp, void *dirent, filldir_t filldir)
curofs++;
/* First loop: curofs = 2; offset = 2 */
if (curofs < offset) {
- D2(printk(KERN_DEBUG "Skipping dirent: \"%s\", ino #%u, type %d, because curofs %ld < offset %ld\n",
+ D2(printk(KERN_DEBUG "Skipping dirent: \"%s\", ino #%u, type %d, because curofs %ld < offset %ld\n",
fd->name, fd->ino, fd->type, curofs, offset));
continue;
}
@@ -188,45 +172,29 @@ static int jffs2_readdir(struct file *filp, void *dirent, filldir_t filldir)
/***********************************************************************/
-static int jffs2_create(struct inode *dir_i, struct dentry *dentry, int mode)
+
+static int jffs2_create(struct inode *dir_i, struct dentry *dentry, int mode,
+ struct nameidata *nd)
{
+ struct jffs2_raw_inode *ri;
struct jffs2_inode_info *f, *dir_f;
struct jffs2_sb_info *c;
struct inode *inode;
- struct jffs2_raw_inode *ri;
- struct jffs2_raw_dirent *rd;
- struct jffs2_full_dnode *fn;
- struct jffs2_full_dirent *fd;
- int namelen;
- __u32 alloclen, phys_ofs;
- __u32 writtenlen;
int ret;
ri = jffs2_alloc_raw_inode();
if (!ri)
return -ENOMEM;
-
+
c = JFFS2_SB_INFO(dir_i->i_sb);
D1(printk(KERN_DEBUG "jffs2_create()\n"));
- /* Try to reserve enough space for both node and dirent.
- * Just the node will do for now, though
- */
- namelen = dentry->d_name.len;
- ret = jffs2_reserve_space(c, sizeof(*ri), &phys_ofs, &alloclen, ALLOC_NORMAL);
- D1(printk(KERN_DEBUG "jffs2_create(): reserved 0x%x bytes\n", alloclen));
- if (ret) {
- jffs2_free_raw_inode(ri);
- return ret;
- }
-
inode = jffs2_new_inode(dir_i, mode, ri);
if (IS_ERR(inode)) {
D1(printk(KERN_DEBUG "jffs2_new_inode() failed\n"));
jffs2_free_raw_inode(ri);
- jffs2_complete_reservation(c);
return PTR_ERR(inode);
}
@@ -236,93 +204,21 @@ static int jffs2_create(struct inode *dir_i, struct dentry *dentry, int mode)
inode->i_mapping->nrpages = 0;
f = JFFS2_INODE_INFO(inode);
-
- ri->data_crc = 0;
- ri->node_crc = crc32(0, ri, sizeof(*ri)-8);
-
- fn = jffs2_write_dnode(inode, ri, NULL, 0, phys_ofs, &writtenlen);
- D1(printk(KERN_DEBUG "jffs2_create created file with mode 0x%x\n", ri->mode));
- jffs2_free_raw_inode(ri);
-
- if (IS_ERR(fn)) {
- D1(printk(KERN_DEBUG "jffs2_write_dnode() failed\n"));
- /* Eeek. Wave bye bye */
- up(&f->sem);
- jffs2_complete_reservation(c);
- jffs2_clear_inode(inode);
- return PTR_ERR(fn);
- }
- /* No data here. Only a metadata node, which will be
- obsoleted by the first data write
- */
- f->metadata = fn;
-
- /* Work out where to put the dirent node now. */
- writtenlen = PAD(writtenlen);
- phys_ofs += writtenlen;
- alloclen -= writtenlen;
- up(&f->sem);
-
- if (alloclen < sizeof(*rd)+namelen) {
- /* Not enough space left in this chunk. Get some more */
- jffs2_complete_reservation(c);
- ret = jffs2_reserve_space(c, sizeof(*rd)+namelen, &phys_ofs, &alloclen, ALLOC_NORMAL);
-
- if (ret) {
- /* Eep. */
- D1(printk(KERN_DEBUG "jffs2_reserve_space() for dirent failed\n"));
- jffs2_clear_inode(inode);
- return ret;
- }
- }
-
- rd = jffs2_alloc_raw_dirent();
- if (!rd) {
- /* Argh. Now we treat it like a normal delete */
- jffs2_complete_reservation(c);
- jffs2_clear_inode(inode);
- return -ENOMEM;
- }
-
dir_f = JFFS2_INODE_INFO(dir_i);
- down(&dir_f->sem);
- rd->magic = JFFS2_MAGIC_BITMASK;
- rd->nodetype = JFFS2_NODETYPE_DIRENT;
- rd->totlen = sizeof(*rd) + namelen;
- rd->hdr_crc = crc32(0, rd, sizeof(struct jffs2_unknown_node)-4);
+ ret = jffs2_do_create(c, dir_f, f, ri,
+ dentry->d_name.name, dentry->d_name.len);
- rd->pino = dir_i->i_ino;
- rd->version = ++dir_f->highest_version;
- rd->ino = inode->i_ino;
- rd->mctime = CURRENT_TIME;
- rd->nsize = namelen;
- rd->type = DT_REG;
- rd->node_crc = crc32(0, rd, sizeof(*rd)-8);
- rd->name_crc = crc32(0, dentry->d_name.name, namelen);
-
- fd = jffs2_write_dirent(dir_i, rd, dentry->d_name.name, namelen, phys_ofs, &writtenlen);
-
- jffs2_complete_reservation(c);
-
- if (IS_ERR(fd)) {
- /* dirent failed to write. Delete the inode normally
- as if it were the final unlink() */
- jffs2_free_raw_dirent(rd);
- up(&dir_f->sem);
- jffs2_clear_inode(inode);
- return PTR_ERR(fd);
+ if (ret) {
+ make_bad_inode(inode);
+ iput(inode);
+ jffs2_free_raw_inode(ri);
+ return ret;
}
- dir_i->i_mtime = dir_i->i_ctime = rd->mctime;
-
- jffs2_free_raw_dirent(rd);
-
- /* Link the fd into the inode's list, obsoleting an old
- one if necessary. */
- jffs2_add_fd_to_list(c, fd, &dir_f->dents);
- up(&dir_f->sem);
+ dir_i->i_mtime = dir_i->i_ctime = ITIME(je32_to_cpu(ri->ctime));
+ jffs2_free_raw_inode(ri);
d_instantiate(dentry, inode);
D1(printk(KERN_DEBUG "jffs2_create: Created ino #%lu with mode %o, nlink %d(%d). nrpages %ld\n",
@@ -332,174 +228,55 @@ static int jffs2_create(struct inode *dir_i, struct dentry *dentry, int mode)
/***********************************************************************/
-static int jffs2_do_unlink(struct inode *dir_i, struct dentry *dentry, int rename)
-{
- struct jffs2_inode_info *dir_f, *f;
- struct jffs2_sb_info *c;
- struct jffs2_raw_dirent *rd;
- struct jffs2_full_dirent *fd;
- __u32 alloclen, phys_ofs;
- int ret;
-
- c = JFFS2_SB_INFO(dir_i->i_sb);
-
- rd = jffs2_alloc_raw_dirent();
- if (!rd)
- return -ENOMEM;
-
- ret = jffs2_reserve_space(c, sizeof(*rd)+dentry->d_name.len, &phys_ofs, &alloclen, ALLOC_DELETION);
- if (ret) {
- jffs2_free_raw_dirent(rd);
- return ret;
- }
-
- dir_f = JFFS2_INODE_INFO(dir_i);
- down(&dir_f->sem);
-
- /* Build a deletion node */
- rd->magic = JFFS2_MAGIC_BITMASK;
- rd->nodetype = JFFS2_NODETYPE_DIRENT;
- rd->totlen = sizeof(*rd) + dentry->d_name.len;
- rd->hdr_crc = crc32(0, rd, sizeof(struct jffs2_unknown_node)-4);
-
- rd->pino = dir_i->i_ino;
- rd->version = ++dir_f->highest_version;
- rd->ino = 0;
- rd->mctime = CURRENT_TIME;
- rd->nsize = dentry->d_name.len;
- rd->type = DT_UNKNOWN;
- rd->node_crc = crc32(0, rd, sizeof(*rd)-8);
- rd->name_crc = crc32(0, dentry->d_name.name, dentry->d_name.len);
-
- fd = jffs2_write_dirent(dir_i, rd, dentry->d_name.name, dentry->d_name.len, phys_ofs, NULL);
-
- jffs2_complete_reservation(c);
- jffs2_free_raw_dirent(rd);
-
- if (IS_ERR(fd)) {
- up(&dir_f->sem);
- return PTR_ERR(fd);
- }
-
- /* File it. This will mark the old one obsolete. */
- jffs2_add_fd_to_list(c, fd, &dir_f->dents);
- up(&dir_f->sem);
-
- if (!rename) {
- f = JFFS2_INODE_INFO(dentry->d_inode);
- down(&f->sem);
-
- while (f->dents) {
- /* There can be only deleted ones */
- fd = f->dents;
-
- f->dents = fd->next;
-
- if (fd->ino) {
- printk(KERN_WARNING "Deleting inode #%u with active dentry \"%s\"->ino #%u\n",
- f->inocache->ino, fd->name, fd->ino);
- } else {
- D1(printk(KERN_DEBUG "Removing deletion dirent for \"%s\" from dir ino #%u\n", fd->name, f->inocache->ino));
- }
- jffs2_mark_node_obsolete(c, fd->raw);
- jffs2_free_full_dirent(fd);
- }
- /* Don't oops on unlinking a bad inode */
- if (f->inocache)
- f->inocache->nlink--;
- dentry->d_inode->i_nlink--;
- up(&f->sem);
- }
-
- return 0;
-}
static int jffs2_unlink(struct inode *dir_i, struct dentry *dentry)
{
- return jffs2_do_unlink(dir_i, dentry, 0);
+ struct jffs2_sb_info *c = JFFS2_SB_INFO(dir_i->i_sb);
+ struct jffs2_inode_info *dir_f = JFFS2_INODE_INFO(dir_i);
+ struct jffs2_inode_info *dead_f = JFFS2_INODE_INFO(dentry->d_inode);
+ int ret;
+ uint32_t now = get_seconds();
+
+ ret = jffs2_do_unlink(c, dir_f, dentry->d_name.name,
+ dentry->d_name.len, dead_f, now);
+ if (dead_f->inocache)
+ dentry->d_inode->i_nlink = dead_f->inocache->nlink;
+ if (!ret)
+ dir_i->i_mtime = dir_i->i_ctime = ITIME(now);
+ return ret;
}
/***********************************************************************/
-static int jffs2_do_link (struct dentry *old_dentry, struct inode *dir_i, struct dentry *dentry, int rename)
-{
- struct jffs2_inode_info *dir_f, *f;
- struct jffs2_sb_info *c;
- struct jffs2_raw_dirent *rd;
- struct jffs2_full_dirent *fd;
- __u32 alloclen, phys_ofs;
- int ret;
-
- c = JFFS2_SB_INFO(dir_i->i_sb);
-
- rd = jffs2_alloc_raw_dirent();
- if (!rd)
- return -ENOMEM;
-
- ret = jffs2_reserve_space(c, sizeof(*rd)+dentry->d_name.len, &phys_ofs, &alloclen, ALLOC_NORMAL);
- if (ret) {
- jffs2_free_raw_dirent(rd);
- return ret;
- }
-
- dir_f = JFFS2_INODE_INFO(dir_i);
- down(&dir_f->sem);
-
- /* Build a deletion node */
- rd->magic = JFFS2_MAGIC_BITMASK;
- rd->nodetype = JFFS2_NODETYPE_DIRENT;
- rd->totlen = sizeof(*rd) + dentry->d_name.len;
- rd->hdr_crc = crc32(0, rd, sizeof(struct jffs2_unknown_node)-4);
-
- rd->pino = dir_i->i_ino;
- rd->version = ++dir_f->highest_version;
- rd->ino = old_dentry->d_inode->i_ino;
- rd->mctime = CURRENT_TIME;
- rd->nsize = dentry->d_name.len;
-
- /* XXX: This is ugly. */
- rd->type = (old_dentry->d_inode->i_mode & S_IFMT) >> 12;
- if (!rd->type) rd->type = DT_REG;
-
- rd->node_crc = crc32(0, rd, sizeof(*rd)-8);
- rd->name_crc = crc32(0, dentry->d_name.name, dentry->d_name.len);
-
- fd = jffs2_write_dirent(dir_i, rd, dentry->d_name.name, dentry->d_name.len, phys_ofs, NULL);
-
- jffs2_complete_reservation(c);
- jffs2_free_raw_dirent(rd);
-
- if (IS_ERR(fd)) {
- up(&dir_f->sem);
- return PTR_ERR(fd);
- }
-
- /* File it. This will mark the old one obsolete. */
- jffs2_add_fd_to_list(c, fd, &dir_f->dents);
- up(&dir_f->sem);
-
- if (!rename) {
- f = JFFS2_INODE_INFO(old_dentry->d_inode);
- down(&f->sem);
- old_dentry->d_inode->i_nlink = ++f->inocache->nlink;
- up(&f->sem);
- }
- return 0;
-}
static int jffs2_link (struct dentry *old_dentry, struct inode *dir_i, struct dentry *dentry)
{
+ struct jffs2_sb_info *c = JFFS2_SB_INFO(old_dentry->d_inode->i_sb);
+ struct jffs2_inode_info *f = JFFS2_INODE_INFO(old_dentry->d_inode);
+ struct jffs2_inode_info *dir_f = JFFS2_INODE_INFO(dir_i);
int ret;
+ uint8_t type;
+ uint32_t now;
- /* Can't link a bad inode. */
- if (!JFFS2_INODE_INFO(old_dentry->d_inode)->inocache)
+ /* Don't let people make hard links to bad inodes. */
+ if (!f->inocache)
return -EIO;
if (S_ISDIR(old_dentry->d_inode->i_mode))
return -EPERM;
- ret = jffs2_do_link(old_dentry, dir_i, dentry, 0);
+ /* XXX: This is ugly */
+ type = (old_dentry->d_inode->i_mode & S_IFMT) >> 12;
+ if (!type) type = DT_REG;
+
+ now = get_seconds();
+ ret = jffs2_do_link(c, dir_f, f->inocache->ino, type, dentry->d_name.name, dentry->d_name.len, now);
+
if (!ret) {
+ down(&f->sem);
+ old_dentry->d_inode->i_nlink = ++f->inocache->nlink;
+ up(&f->sem);
d_instantiate(dentry, old_dentry->d_inode);
+ dir_i->i_mtime = dir_i->i_ctime = ITIME(now);
atomic_inc(&old_dentry->d_inode->i_count);
}
return ret;
@@ -517,27 +294,27 @@ static int jffs2_symlink (struct inode *dir_i, struct dentry *dentry, const char
struct jffs2_full_dnode *fn;
struct jffs2_full_dirent *fd;
int namelen;
- __u32 alloclen, phys_ofs;
- __u32 writtenlen;
- int ret;
+ uint32_t alloclen, phys_ofs;
+ int ret, targetlen = strlen(target);
/* FIXME: If you care. We'd need to use frags for the target
if it grows much more than this */
- if (strlen(target) > 254)
+ if (targetlen > 254)
return -EINVAL;
ri = jffs2_alloc_raw_inode();
if (!ri)
return -ENOMEM;
-
+
c = JFFS2_SB_INFO(dir_i->i_sb);
-
- /* Try to reserve enough space for both node and dirent.
- * Just the node will do for now, though
+
+ /* Try to reserve enough space for both node and dirent.
+ * Just the node will do for now, though
*/
namelen = dentry->d_name.len;
- ret = jffs2_reserve_space(c, sizeof(*ri) + strlen(target), &phys_ofs, &alloclen, ALLOC_NORMAL);
+ ret = jffs2_reserve_space(c, sizeof(*ri) + targetlen, &phys_ofs, &alloclen,
+ ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE);
if (ret) {
jffs2_free_raw_inode(ri);
@@ -556,15 +333,16 @@ static int jffs2_symlink (struct inode *dir_i, struct dentry *dentry, const char
f = JFFS2_INODE_INFO(inode);
- inode->i_size = ri->isize = ri->dsize = ri->csize = strlen(target);
- ri->totlen = sizeof(*ri) + ri->dsize;
- ri->hdr_crc = crc32(0, ri, sizeof(struct jffs2_unknown_node)-4);
+ inode->i_size = targetlen;
+ ri->isize = ri->dsize = ri->csize = cpu_to_je32(inode->i_size);
+ ri->totlen = cpu_to_je32(sizeof(*ri) + inode->i_size);
+ ri->hdr_crc = cpu_to_je32(crc32(0, ri, sizeof(struct jffs2_unknown_node)-4));
ri->compr = JFFS2_COMPR_NONE;
- ri->data_crc = crc32(0, target, strlen(target));
- ri->node_crc = crc32(0, ri, sizeof(*ri)-8);
-
- fn = jffs2_write_dnode(inode, ri, target, strlen(target), phys_ofs, &writtenlen);
+ ri->data_crc = cpu_to_je32(crc32(0, target, targetlen));
+ ri->node_crc = cpu_to_je32(crc32(0, ri, sizeof(*ri)-8));
+
+ fn = jffs2_write_dnode(c, f, ri, target, targetlen, phys_ofs, ALLOC_NORMAL);
jffs2_free_raw_inode(ri);
@@ -575,26 +353,33 @@ static int jffs2_symlink (struct inode *dir_i, struct dentry *dentry, const char
jffs2_clear_inode(inode);
return PTR_ERR(fn);
}
- /* No data here. Only a metadata node, which will be
+
+ /* We use f->target field to store the target path. */
+ f->target = kmalloc(targetlen + 1, GFP_KERNEL);
+ if (!f->target) {
+ printk(KERN_WARNING "Can't allocate %d bytes of memory\n", targetlen + 1);
+ up(&f->sem);
+ jffs2_complete_reservation(c);
+ jffs2_clear_inode(inode);
+ return -ENOMEM;
+ }
+
+ memcpy(f->target, target, targetlen + 1);
+ D1(printk(KERN_DEBUG "jffs2_symlink: symlink's target '%s' cached\n", (char *)f->target));
+
+ /* No data here. Only a metadata node, which will be
obsoleted by the first data write
*/
f->metadata = fn;
up(&f->sem);
- /* Work out where to put the dirent node now. */
- writtenlen = (writtenlen+3)&~3;
- phys_ofs += writtenlen;
- alloclen -= writtenlen;
-
- if (alloclen < sizeof(*rd)+namelen) {
- /* Not enough space left in this chunk. Get some more */
- jffs2_complete_reservation(c);
- ret = jffs2_reserve_space(c, sizeof(*rd)+namelen, &phys_ofs, &alloclen, ALLOC_NORMAL);
- if (ret) {
- /* Eep. */
- jffs2_clear_inode(inode);
- return ret;
- }
+ jffs2_complete_reservation(c);
+ ret = jffs2_reserve_space(c, sizeof(*rd)+namelen, &phys_ofs, &alloclen,
+ ALLOC_NORMAL, JFFS2_SUMMARY_DIRENT_SIZE(namelen));
+ if (ret) {
+ /* Eep. */
+ jffs2_clear_inode(inode);
+ return ret;
}
rd = jffs2_alloc_raw_dirent();
@@ -608,41 +393,42 @@ static int jffs2_symlink (struct inode *dir_i, struct dentry *dentry, const char
dir_f = JFFS2_INODE_INFO(dir_i);
down(&dir_f->sem);
- rd->magic = JFFS2_MAGIC_BITMASK;
- rd->nodetype = JFFS2_NODETYPE_DIRENT;
- rd->totlen = sizeof(*rd) + namelen;
- rd->hdr_crc = crc32(0, rd, sizeof(struct jffs2_unknown_node)-4);
+ rd->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
+ rd->nodetype = cpu_to_je16(JFFS2_NODETYPE_DIRENT);
+ rd->totlen = cpu_to_je32(sizeof(*rd) + namelen);
+ rd->hdr_crc = cpu_to_je32(crc32(0, rd, sizeof(struct jffs2_unknown_node)-4));
- rd->pino = dir_i->i_ino;
- rd->version = ++dir_f->highest_version;
- rd->ino = inode->i_ino;
- rd->mctime = CURRENT_TIME;
+ rd->pino = cpu_to_je32(dir_i->i_ino);
+ rd->version = cpu_to_je32(++dir_f->highest_version);
+ rd->ino = cpu_to_je32(inode->i_ino);
+ rd->mctime = cpu_to_je32(get_seconds());
rd->nsize = namelen;
rd->type = DT_LNK;
- rd->node_crc = crc32(0, rd, sizeof(*rd)-8);
- rd->name_crc = crc32(0, dentry->d_name.name, namelen);
+ rd->node_crc = cpu_to_je32(crc32(0, rd, sizeof(*rd)-8));
+ rd->name_crc = cpu_to_je32(crc32(0, dentry->d_name.name, namelen));
+
+ fd = jffs2_write_dirent(c, dir_f, rd, dentry->d_name.name, namelen, phys_ofs, ALLOC_NORMAL);
- fd = jffs2_write_dirent(dir_i, rd, dentry->d_name.name, namelen, phys_ofs, &writtenlen);
-
- jffs2_complete_reservation(c);
-
if (IS_ERR(fd)) {
- /* dirent failed to write. Delete the inode normally
+ /* dirent failed to write. Delete the inode normally
as if it were the final unlink() */
+ jffs2_complete_reservation(c);
jffs2_free_raw_dirent(rd);
up(&dir_f->sem);
jffs2_clear_inode(inode);
return PTR_ERR(fd);
}
- dir_i->i_mtime = dir_i->i_ctime = rd->mctime;
+ dir_i->i_mtime = dir_i->i_ctime = ITIME(je32_to_cpu(rd->mctime));
jffs2_free_raw_dirent(rd);
/* Link the fd into the inode's list, obsoleting an old
one if necessary. */
jffs2_add_fd_to_list(c, fd, &dir_f->dents);
+
up(&dir_f->sem);
+ jffs2_complete_reservation(c);
d_instantiate(dentry, inode);
return 0;
@@ -659,8 +445,7 @@ static int jffs2_mkdir (struct inode *dir_i, struct dentry *dentry, int mode)
struct jffs2_full_dnode *fn;
struct jffs2_full_dirent *fd;
int namelen;
- __u32 alloclen, phys_ofs;
- __u32 writtenlen;
+ uint32_t alloclen, phys_ofs;
int ret;
mode |= S_IFDIR;
@@ -668,14 +453,15 @@ static int jffs2_mkdir (struct inode *dir_i, struct dentry *dentry, int mode)
ri = jffs2_alloc_raw_inode();
if (!ri)
return -ENOMEM;
-
+
c = JFFS2_SB_INFO(dir_i->i_sb);
- /* Try to reserve enough space for both node and dirent.
- * Just the node will do for now, though
+ /* Try to reserve enough space for both node and dirent.
+ * Just the node will do for now, though
*/
namelen = dentry->d_name.len;
- ret = jffs2_reserve_space(c, sizeof(*ri), &phys_ofs, &alloclen, ALLOC_NORMAL);
+ ret = jffs2_reserve_space(c, sizeof(*ri), &phys_ofs, &alloclen, ALLOC_NORMAL,
+ JFFS2_SUMMARY_INODE_SIZE);
if (ret) {
jffs2_free_raw_inode(ri);
@@ -692,13 +478,15 @@ static int jffs2_mkdir (struct inode *dir_i, struct dentry *dentry, int mode)
inode->i_op = &jffs2_dir_inode_operations;
inode->i_fop = &jffs2_dir_operations;
+ /* Directories get nlink 2 at start */
+ inode->i_nlink = 2;
f = JFFS2_INODE_INFO(inode);
- ri->data_crc = 0;
- ri->node_crc = crc32(0, ri, sizeof(*ri)-8);
-
- fn = jffs2_write_dnode(inode, ri, NULL, 0, phys_ofs, &writtenlen);
+ ri->data_crc = cpu_to_je32(0);
+ ri->node_crc = cpu_to_je32(crc32(0, ri, sizeof(*ri)-8));
+
+ fn = jffs2_write_dnode(c, f, ri, NULL, 0, phys_ofs, ALLOC_NORMAL);
jffs2_free_raw_inode(ri);
@@ -709,28 +497,21 @@ static int jffs2_mkdir (struct inode *dir_i, struct dentry *dentry, int mode)
jffs2_clear_inode(inode);
return PTR_ERR(fn);
}
- /* No data here. Only a metadata node, which will be
+ /* No data here. Only a metadata node, which will be
obsoleted by the first data write
*/
f->metadata = fn;
up(&f->sem);
- /* Work out where to put the dirent node now. */
- writtenlen = PAD(writtenlen);
- phys_ofs += writtenlen;
- alloclen -= writtenlen;
-
- if (alloclen < sizeof(*rd)+namelen) {
- /* Not enough space left in this chunk. Get some more */
- jffs2_complete_reservation(c);
- ret = jffs2_reserve_space(c, sizeof(*rd)+namelen, &phys_ofs, &alloclen, ALLOC_NORMAL);
- if (ret) {
- /* Eep. */
- jffs2_clear_inode(inode);
- return ret;
- }
+ jffs2_complete_reservation(c);
+ ret = jffs2_reserve_space(c, sizeof(*rd)+namelen, &phys_ofs, &alloclen,
+ ALLOC_NORMAL, JFFS2_SUMMARY_DIRENT_SIZE(namelen));
+ if (ret) {
+ /* Eep. */
+ jffs2_clear_inode(inode);
+ return ret;
}
-
+
rd = jffs2_alloc_raw_dirent();
if (!rd) {
/* Argh. Now we treat it like a normal delete */
@@ -742,41 +523,43 @@ static int jffs2_mkdir (struct inode *dir_i, struct dentry *dentry, int mode)
dir_f = JFFS2_INODE_INFO(dir_i);
down(&dir_f->sem);
- rd->magic = JFFS2_MAGIC_BITMASK;
- rd->nodetype = JFFS2_NODETYPE_DIRENT;
- rd->totlen = sizeof(*rd) + namelen;
- rd->hdr_crc = crc32(0, rd, sizeof(struct jffs2_unknown_node)-4);
+ rd->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
+ rd->nodetype = cpu_to_je16(JFFS2_NODETYPE_DIRENT);
+ rd->totlen = cpu_to_je32(sizeof(*rd) + namelen);
+ rd->hdr_crc = cpu_to_je32(crc32(0, rd, sizeof(struct jffs2_unknown_node)-4));
- rd->pino = dir_i->i_ino;
- rd->version = ++dir_f->highest_version;
- rd->ino = inode->i_ino;
- rd->mctime = CURRENT_TIME;
+ rd->pino = cpu_to_je32(dir_i->i_ino);
+ rd->version = cpu_to_je32(++dir_f->highest_version);
+ rd->ino = cpu_to_je32(inode->i_ino);
+ rd->mctime = cpu_to_je32(get_seconds());
rd->nsize = namelen;
rd->type = DT_DIR;
- rd->node_crc = crc32(0, rd, sizeof(*rd)-8);
- rd->name_crc = crc32(0, dentry->d_name.name, namelen);
+ rd->node_crc = cpu_to_je32(crc32(0, rd, sizeof(*rd)-8));
+ rd->name_crc = cpu_to_je32(crc32(0, dentry->d_name.name, namelen));
+
+ fd = jffs2_write_dirent(c, dir_f, rd, dentry->d_name.name, namelen, phys_ofs, ALLOC_NORMAL);
- fd = jffs2_write_dirent(dir_i, rd, dentry->d_name.name, namelen, phys_ofs, &writtenlen);
-
- jffs2_complete_reservation(c);
-
if (IS_ERR(fd)) {
- /* dirent failed to write. Delete the inode normally
+ /* dirent failed to write. Delete the inode normally
as if it were the final unlink() */
+ jffs2_complete_reservation(c);
jffs2_free_raw_dirent(rd);
up(&dir_f->sem);
jffs2_clear_inode(inode);
return PTR_ERR(fd);
}
- dir_i->i_mtime = dir_i->i_ctime = rd->mctime;
+ dir_i->i_mtime = dir_i->i_ctime = ITIME(je32_to_cpu(rd->mctime));
+ dir_i->i_nlink++;
jffs2_free_raw_dirent(rd);
/* Link the fd into the inode's list, obsoleting an old
one if necessary. */
jffs2_add_fd_to_list(c, fd, &dir_f->dents);
+
up(&dir_f->sem);
+ jffs2_complete_reservation(c);
d_instantiate(dentry, inode);
return 0;
@@ -786,15 +569,19 @@ static int jffs2_rmdir (struct inode *dir_i, struct dentry *dentry)
{
struct jffs2_inode_info *f = JFFS2_INODE_INFO(dentry->d_inode);
struct jffs2_full_dirent *fd;
+ int ret;
for (fd = f->dents ; fd; fd = fd->next) {
if (fd->ino)
return -ENOTEMPTY;
}
- return jffs2_unlink(dir_i, dentry);
+ ret = jffs2_unlink(dir_i, dentry);
+ if (!ret)
+ dir_i->i_nlink--;
+ return ret;
}
-static int jffs2_mknod (struct inode *dir_i, struct dentry *dentry, int mode, int rdev)
+static int jffs2_mknod (struct inode *dir_i, struct dentry *dentry, int mode, dev_t rdev)
{
struct jffs2_inode_info *f, *dir_f;
struct jffs2_sb_info *c;
@@ -804,28 +591,31 @@ static int jffs2_mknod (struct inode *dir_i, struct dentry *dentry, int mode, in
struct jffs2_full_dnode *fn;
struct jffs2_full_dirent *fd;
int namelen;
- unsigned short dev;
+ jint16_t dev;
int devlen = 0;
- __u32 alloclen, phys_ofs;
- __u32 writtenlen;
+ uint32_t alloclen, phys_ofs;
int ret;
+ if (!old_valid_dev(rdev))
+ return -EINVAL;
+
ri = jffs2_alloc_raw_inode();
if (!ri)
return -ENOMEM;
-
+
c = JFFS2_SB_INFO(dir_i->i_sb);
-
+
if (S_ISBLK(mode) || S_ISCHR(mode)) {
- dev = (MAJOR(to_kdev_t(rdev)) << 8) | MINOR(to_kdev_t(rdev));
+ dev = cpu_to_je16(old_encode_dev(rdev));
devlen = sizeof(dev);
}
-
- /* Try to reserve enough space for both node and dirent.
- * Just the node will do for now, though
+
+ /* Try to reserve enough space for both node and dirent.
+ * Just the node will do for now, though
*/
namelen = dentry->d_name.len;
- ret = jffs2_reserve_space(c, sizeof(*ri) + devlen, &phys_ofs, &alloclen, ALLOC_NORMAL);
+ ret = jffs2_reserve_space(c, sizeof(*ri) + devlen, &phys_ofs, &alloclen,
+ ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE);
if (ret) {
jffs2_free_raw_inode(ri);
@@ -844,15 +634,15 @@ static int jffs2_mknod (struct inode *dir_i, struct dentry *dentry, int mode, in
f = JFFS2_INODE_INFO(inode);
- ri->dsize = ri->csize = devlen;
- ri->totlen = sizeof(*ri) + ri->csize;
- ri->hdr_crc = crc32(0, ri, sizeof(struct jffs2_unknown_node)-4);
+ ri->dsize = ri->csize = cpu_to_je32(devlen);
+ ri->totlen = cpu_to_je32(sizeof(*ri) + devlen);
+ ri->hdr_crc = cpu_to_je32(crc32(0, ri, sizeof(struct jffs2_unknown_node)-4));
ri->compr = JFFS2_COMPR_NONE;
- ri->data_crc = crc32(0, &dev, devlen);
- ri->node_crc = crc32(0, ri, sizeof(*ri)-8);
-
- fn = jffs2_write_dnode(inode, ri, (char *)&dev, devlen, phys_ofs, &writtenlen);
+ ri->data_crc = cpu_to_je32(crc32(0, &dev, devlen));
+ ri->node_crc = cpu_to_je32(crc32(0, ri, sizeof(*ri)-8));
+
+ fn = jffs2_write_dnode(c, f, ri, (char *)&dev, devlen, phys_ofs, ALLOC_NORMAL);
jffs2_free_raw_inode(ri);
@@ -863,26 +653,19 @@ static int jffs2_mknod (struct inode *dir_i, struct dentry *dentry, int mode, in
jffs2_clear_inode(inode);
return PTR_ERR(fn);
}
- /* No data here. Only a metadata node, which will be
+ /* No data here. Only a metadata node, which will be
obsoleted by the first data write
*/
f->metadata = fn;
up(&f->sem);
- /* Work out where to put the dirent node now. */
- writtenlen = (writtenlen+3)&~3;
- phys_ofs += writtenlen;
- alloclen -= writtenlen;
-
- if (alloclen < sizeof(*rd)+namelen) {
- /* Not enough space left in this chunk. Get some more */
- jffs2_complete_reservation(c);
- ret = jffs2_reserve_space(c, sizeof(*rd)+namelen, &phys_ofs, &alloclen, ALLOC_NORMAL);
- if (ret) {
- /* Eep. */
- jffs2_clear_inode(inode);
- return ret;
- }
+ jffs2_complete_reservation(c);
+ ret = jffs2_reserve_space(c, sizeof(*rd)+namelen, &phys_ofs, &alloclen,
+ ALLOC_NORMAL, JFFS2_SUMMARY_DIRENT_SIZE(namelen));
+ if (ret) {
+ /* Eep. */
+ jffs2_clear_inode(inode);
+ return ret;
}
rd = jffs2_alloc_raw_dirent();
@@ -896,44 +679,45 @@ static int jffs2_mknod (struct inode *dir_i, struct dentry *dentry, int mode, in
dir_f = JFFS2_INODE_INFO(dir_i);
down(&dir_f->sem);
- rd->magic = JFFS2_MAGIC_BITMASK;
- rd->nodetype = JFFS2_NODETYPE_DIRENT;
- rd->totlen = sizeof(*rd) + namelen;
- rd->hdr_crc = crc32(0, rd, sizeof(struct jffs2_unknown_node)-4);
+ rd->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
+ rd->nodetype = cpu_to_je16(JFFS2_NODETYPE_DIRENT);
+ rd->totlen = cpu_to_je32(sizeof(*rd) + namelen);
+ rd->hdr_crc = cpu_to_je32(crc32(0, rd, sizeof(struct jffs2_unknown_node)-4));
- rd->pino = dir_i->i_ino;
- rd->version = ++dir_f->highest_version;
- rd->ino = inode->i_ino;
- rd->mctime = CURRENT_TIME;
+ rd->pino = cpu_to_je32(dir_i->i_ino);
+ rd->version = cpu_to_je32(++dir_f->highest_version);
+ rd->ino = cpu_to_je32(inode->i_ino);
+ rd->mctime = cpu_to_je32(get_seconds());
rd->nsize = namelen;
/* XXX: This is ugly. */
rd->type = (mode & S_IFMT) >> 12;
- rd->node_crc = crc32(0, rd, sizeof(*rd)-8);
- rd->name_crc = crc32(0, dentry->d_name.name, namelen);
+ rd->node_crc = cpu_to_je32(crc32(0, rd, sizeof(*rd)-8));
+ rd->name_crc = cpu_to_je32(crc32(0, dentry->d_name.name, namelen));
+
+ fd = jffs2_write_dirent(c, dir_f, rd, dentry->d_name.name, namelen, phys_ofs, ALLOC_NORMAL);
- fd = jffs2_write_dirent(dir_i, rd, dentry->d_name.name, namelen, phys_ofs, &writtenlen);
-
- jffs2_complete_reservation(c);
-
if (IS_ERR(fd)) {
- /* dirent failed to write. Delete the inode normally
+ /* dirent failed to write. Delete the inode normally
as if it were the final unlink() */
+ jffs2_complete_reservation(c);
jffs2_free_raw_dirent(rd);
up(&dir_f->sem);
jffs2_clear_inode(inode);
return PTR_ERR(fd);
}
- dir_i->i_mtime = dir_i->i_ctime = rd->mctime;
+ dir_i->i_mtime = dir_i->i_ctime = ITIME(je32_to_cpu(rd->mctime));
jffs2_free_raw_dirent(rd);
/* Link the fd into the inode's list, obsoleting an old
one if necessary. */
jffs2_add_fd_to_list(c, fd, &dir_f->dents);
+
up(&dir_f->sem);
+ jffs2_complete_reservation(c);
d_instantiate(dentry, inode);
@@ -944,9 +728,12 @@ static int jffs2_rename (struct inode *old_dir_i, struct dentry *old_dentry,
struct inode *new_dir_i, struct dentry *new_dentry)
{
int ret;
+ struct jffs2_sb_info *c = JFFS2_SB_INFO(old_dir_i->i_sb);
struct jffs2_inode_info *victim_f = NULL;
+ uint8_t type;
+ uint32_t now;
- /* The VFS will check for us and prevent trying to rename a
+ /* The VFS will check for us and prevent trying to rename a
* file over a directory and vice versa, but if it's a directory,
* the VFS can't check whether the victim is empty. The filesystem
* needs to do that for itself.
@@ -968,12 +755,21 @@ static int jffs2_rename (struct inode *old_dir_i, struct dentry *old_dentry,
}
/* XXX: We probably ought to alloc enough space for
- both nodes at the same time. Writing the new link,
+ both nodes at the same time. Writing the new link,
then getting -ENOSPC, is quite bad :)
*/
/* Make a hard link */
- ret = jffs2_do_link(old_dentry, new_dir_i, new_dentry, 1);
+
+ /* XXX: This is ugly */
+ type = (old_dentry->d_inode->i_mode & S_IFMT) >> 12;
+ if (!type) type = DT_REG;
+
+ now = get_seconds();
+ ret = jffs2_do_link(c, JFFS2_INODE_INFO(new_dir_i),
+ old_dentry->d_inode->i_ino, type,
+ new_dentry->d_name.name, new_dentry->d_name.len, now);
+
if (ret)
return ret;
@@ -989,22 +785,39 @@ static int jffs2_rename (struct inode *old_dir_i, struct dentry *old_dentry,
}
}
+ /* If it was a directory we moved, and there was no victim,
+ increase i_nlink on its new parent */
+ if (S_ISDIR(old_dentry->d_inode->i_mode) && !victim_f)
+ new_dir_i->i_nlink++;
+
/* Unlink the original */
- ret = jffs2_do_unlink(old_dir_i, old_dentry, 1);
-
+ ret = jffs2_do_unlink(c, JFFS2_INODE_INFO(old_dir_i),
+ old_dentry->d_name.name, old_dentry->d_name.len, NULL, now);
+
+ /* We don't touch inode->i_nlink */
+
if (ret) {
/* Oh shit. We really ought to make a single node which can do both atomically */
struct jffs2_inode_info *f = JFFS2_INODE_INFO(old_dentry->d_inode);
down(&f->sem);
+ old_dentry->d_inode->i_nlink++;
if (f->inocache)
- old_dentry->d_inode->i_nlink = f->inocache->nlink++;
+ f->inocache->nlink++;
up(&f->sem);
-
+
printk(KERN_NOTICE "jffs2_rename(): Link succeeded, unlink failed (err %d). You now have a hard link\n", ret);
/* Might as well let the VFS know */
d_instantiate(new_dentry, old_dentry->d_inode);
atomic_inc(&old_dentry->d_inode->i_count);
+ new_dir_i->i_mtime = new_dir_i->i_ctime = ITIME(now);
+ return ret;
}
- return ret;
+
+ if (S_ISDIR(old_dentry->d_inode->i_mode))
+ old_dir_i->i_nlink--;
+
+ new_dir_i->i_mtime = new_dir_i->i_ctime = old_dir_i->i_mtime = old_dir_i->i_ctime = ITIME(now);
+
+ return 0;
}
diff --git a/linux-2.4.x/fs/jffs2/erase.c b/linux-2.4.x/fs/jffs2/erase.c
index 9179d32..a18e930 100644
--- a/linux-2.4.x/fs/jffs2/erase.c
+++ b/linux-2.4.x/fs/jffs2/erase.c
@@ -1,68 +1,65 @@
/*
* JFFS2 -- Journalling Flash File System, Version 2.
*
- * Copyright (C) 2001 Red Hat, Inc.
+ * Copyright (C) 2001-2003 Red Hat, Inc.
*
- * Created by David Woodhouse <dwmw2@cambridge.redhat.com>
+ * Created by David Woodhouse <dwmw2@infradead.org>
*
- * The original JFFS, from which the design for JFFS2 was derived,
- * was designed and implemented by Axis Communications AB.
+ * For licensing information, see the file 'LICENCE' in this directory.
*
- * The contents of this file are subject to the Red Hat eCos Public
- * License Version 1.1 (the "Licence"); you may not use this file
- * except in compliance with the Licence. You may obtain a copy of
- * the Licence at http://www.redhat.com/
- *
- * Software distributed under the Licence is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied.
- * See the Licence for the specific language governing rights and
- * limitations under the Licence.
- *
- * The Original Code is JFFS2 - Journalling Flash File System, version 2
- *
- * Alternatively, the contents of this file may be used under the
- * terms of the GNU General Public License version 2 (the "GPL"), in
- * which case the provisions of the GPL are applicable instead of the
- * above. If you wish to allow the use of your version of this file
- * only under the terms of the GPL and not to allow others to use your
- * version of this file under the RHEPL, indicate your decision by
- * deleting the provisions above and replace them with the notice and
- * other provisions required by the GPL. If you do not delete the
- * provisions above, a recipient may use your version of this file
- * under either the RHEPL or the GPL.
- *
- * $Id: erase.c,v 1.24 2001/12/06 16:38:38 dwmw2 Exp $
+ * $Id: erase.c,v 1.92 2006/01/21 21:50:44 havasi Exp $
*
*/
+
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/mtd/mtd.h>
-#include <linux/jffs2.h>
-#include <linux/interrupt.h>
+#include <linux/compiler.h>
+#include <linux/crc32.h>
+#include <linux/sched.h>
+#include <linux/pagemap.h>
#include "nodelist.h"
-#include "crc32.h"
struct erase_priv_struct {
struct jffs2_eraseblock *jeb;
struct jffs2_sb_info *c;
};
-
+
+#ifndef __ECOS
static void jffs2_erase_callback(struct erase_info *);
+#endif
+static void jffs2_erase_failed(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, uint32_t bad_offset);
+static void jffs2_erase_succeeded(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb);
static void jffs2_free_all_node_refs(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb);
+static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb);
-void jffs2_erase_block(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb)
+static void jffs2_erase_block(struct jffs2_sb_info *c,
+ struct jffs2_eraseblock *jeb)
{
- struct erase_info *instr;
int ret;
+ uint32_t bad_offset;
+#ifdef __ECOS
+ ret = jffs2_flash_erase(c, jeb);
+ if (!ret) {
+ jffs2_erase_succeeded(c, jeb);
+ return;
+ }
+ bad_offset = jeb->offset;
+#else /* Linux */
+ struct erase_info *instr;
+ D1(printk(KERN_DEBUG "jffs2_erase_block(): erase block %#08x (range %#08x-%#08x)\n",
+ jeb->offset, jeb->offset, jeb->offset + c->sector_size));
instr = kmalloc(sizeof(struct erase_info) + sizeof(struct erase_priv_struct), GFP_KERNEL);
if (!instr) {
printk(KERN_WARNING "kmalloc for struct erase_info in jffs2_erase_block failed. Refiling block for later\n");
- spin_lock_bh(&c->erase_completion_lock);
+ spin_lock(&c->erase_completion_lock);
list_del(&jeb->list);
list_add(&jeb->list, &c->erase_pending_list);
c->erasing_size -= c->sector_size;
- spin_unlock_bh(&c->erase_completion_lock);
+ c->dirty_size += c->sector_size;
+ jeb->dirty_size = c->sector_size;
+ spin_unlock(&c->erase_completion_lock);
return;
}
@@ -73,98 +70,164 @@ void jffs2_erase_block(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb)
instr->len = c->sector_size;
instr->callback = jffs2_erase_callback;
instr->priv = (unsigned long)(&instr[1]);
-
+ instr->fail_addr = 0xffffffff;
+
((struct erase_priv_struct *)instr->priv)->jeb = jeb;
((struct erase_priv_struct *)instr->priv)->c = c;
ret = c->mtd->erase(c->mtd, instr);
- if (!ret) {
+ if (!ret)
return;
- }
+
+ bad_offset = instr->fail_addr;
+ kfree(instr);
+#endif /* __ECOS */
+
if (ret == -ENOMEM || ret == -EAGAIN) {
/* Erase failed immediately. Refile it on the list */
D1(printk(KERN_DEBUG "Erase at 0x%08x failed: %d. Refiling on erase_pending_list\n", jeb->offset, ret));
- spin_lock_bh(&c->erase_completion_lock);
+ spin_lock(&c->erase_completion_lock);
list_del(&jeb->list);
list_add(&jeb->list, &c->erase_pending_list);
c->erasing_size -= c->sector_size;
- spin_unlock_bh(&c->erase_completion_lock);
- kfree(instr);
+ c->dirty_size += c->sector_size;
+ jeb->dirty_size = c->sector_size;
+ spin_unlock(&c->erase_completion_lock);
return;
}
- if (ret == -EROFS)
+ if (ret == -EROFS)
printk(KERN_WARNING "Erase at 0x%08x failed immediately: -EROFS. Is the sector locked?\n", jeb->offset);
else
printk(KERN_WARNING "Erase at 0x%08x failed immediately: errno %d\n", jeb->offset, ret);
- spin_lock_bh(&c->erase_completion_lock);
- list_del(&jeb->list);
- list_add(&jeb->list, &c->bad_list);
- c->nr_erasing_blocks--;
- c->bad_size += c->sector_size;
- c->erasing_size -= c->sector_size;
- spin_unlock_bh(&c->erase_completion_lock);
- wake_up(&c->erase_wait);
- kfree(instr);
+
+ jffs2_erase_failed(c, jeb, bad_offset);
}
-void jffs2_erase_pending_blocks(struct jffs2_sb_info *c)
+void jffs2_erase_pending_blocks(struct jffs2_sb_info *c, int count)
{
struct jffs2_eraseblock *jeb;
- spin_lock_bh(&c->erase_completion_lock);
- while (!list_empty(&c->erase_pending_list)) {
+ down(&c->erase_free_sem);
- jeb = list_entry(c->erase_pending_list.next, struct jffs2_eraseblock, list);
+ spin_lock(&c->erase_completion_lock);
- D1(printk(KERN_DEBUG "Starting erase of pending block 0x%08x\n", jeb->offset));
+ while (!list_empty(&c->erase_complete_list) ||
+ !list_empty(&c->erase_pending_list)) {
+
+ if (!list_empty(&c->erase_complete_list)) {
+ jeb = list_entry(c->erase_complete_list.next, struct jffs2_eraseblock, list);
+ list_del(&jeb->list);
+ spin_unlock(&c->erase_completion_lock);
+ jffs2_mark_erased_block(c, jeb);
+
+ if (!--count) {
+ D1(printk(KERN_DEBUG "Count reached. jffs2_erase_pending_blocks leaving\n"));
+ goto done;
+ }
+
+ } else if (!list_empty(&c->erase_pending_list)) {
+ jeb = list_entry(c->erase_pending_list.next, struct jffs2_eraseblock, list);
+ D1(printk(KERN_DEBUG "Starting erase of pending block 0x%08x\n", jeb->offset));
+ list_del(&jeb->list);
+ c->erasing_size += c->sector_size;
+ c->wasted_size -= jeb->wasted_size;
+ c->free_size -= jeb->free_size;
+ c->used_size -= jeb->used_size;
+ c->dirty_size -= jeb->dirty_size;
+ jeb->wasted_size = jeb->used_size = jeb->dirty_size = jeb->free_size = 0;
+ jffs2_free_all_node_refs(c, jeb);
+ list_add(&jeb->list, &c->erasing_list);
+ spin_unlock(&c->erase_completion_lock);
+
+ jffs2_erase_block(c, jeb);
+
+ } else {
+ BUG();
+ }
- list_del(&jeb->list);
- c->erasing_size += c->sector_size;
- c->free_size -= jeb->free_size;
- c->used_size -= jeb->used_size;
- c->dirty_size -= jeb->dirty_size;
- jeb->used_size = jeb->dirty_size = jeb->free_size = 0;
- jffs2_free_all_node_refs(c, jeb);
- list_add(&jeb->list, &c->erasing_list);
- spin_unlock_bh(&c->erase_completion_lock);
-
- jffs2_erase_block(c, jeb);
/* Be nice */
- if (current->need_resched)
- schedule();
- spin_lock_bh(&c->erase_completion_lock);
+ cond_resched();
+ spin_lock(&c->erase_completion_lock);
}
- spin_unlock_bh(&c->erase_completion_lock);
+
+ spin_unlock(&c->erase_completion_lock);
+ done:
D1(printk(KERN_DEBUG "jffs2_erase_pending_blocks completed\n"));
+
+ up(&c->erase_free_sem);
}
+static void jffs2_erase_succeeded(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb)
+{
+ D1(printk(KERN_DEBUG "Erase completed successfully at 0x%08x\n", jeb->offset));
+ spin_lock(&c->erase_completion_lock);
+ list_del(&jeb->list);
+ list_add_tail(&jeb->list, &c->erase_complete_list);
+ spin_unlock(&c->erase_completion_lock);
+ if (!EBFLAGS_HAS_EBH(jeb)) {
+ if (c->nr_blocks_with_ebh != 0) {
+ jeb->erase_count = c->total_erase_count/c->nr_blocks_with_ebh;
+ } else {
+ jeb->erase_count = 0;
+ }
+ EBFLAGS_SET_EBH(jeb);
+ c->nr_blocks_with_ebh++;
+ c->total_erase_count += jeb->erase_count;
+ }
+ jeb->erase_count++;
+ if (jeb->erase_count > c->max_erase_count) {
+ c->max_erase_count = jeb->erase_count;
+ }
+ c->total_erase_count++;
+ /* Ensure that kupdated calls us again to mark them clean */
+ jffs2_erase_pending_trigger(c);
+}
+
+static void jffs2_erase_failed(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, uint32_t bad_offset)
+{
+ /* For NAND, if the failure did not occur at the device level for a
+ specific physical page, don't bother updating the bad block table. */
+ if (jffs2_ebh_oob(c) && (bad_offset != 0xffffffff)) {
+ /* We had a device-level failure to erase. Let's see if we've
+ failed too many times. */
+ if (!jffs2_write_nand_badblock(c, jeb, bad_offset)) {
+ /* We'd like to give this block another try. */
+ spin_lock(&c->erase_completion_lock);
+ list_del(&jeb->list);
+ list_add(&jeb->list, &c->erase_pending_list);
+ c->erasing_size -= c->sector_size;
+ c->dirty_size += c->sector_size;
+ jeb->dirty_size = c->sector_size;
+ spin_unlock(&c->erase_completion_lock);
+ return;
+ }
+ }
+
+ spin_lock(&c->erase_completion_lock);
+ c->erasing_size -= c->sector_size;
+ c->bad_size += c->sector_size;
+ list_del(&jeb->list);
+ list_add(&jeb->list, &c->bad_list);
+ c->nr_erasing_blocks--;
+ spin_unlock(&c->erase_completion_lock);
+ wake_up(&c->erase_wait);
+}
+#ifndef __ECOS
static void jffs2_erase_callback(struct erase_info *instr)
{
struct erase_priv_struct *priv = (void *)instr->priv;
if(instr->state != MTD_ERASE_DONE) {
printk(KERN_WARNING "Erase at 0x%08x finished, but state != MTD_ERASE_DONE. State is 0x%x instead.\n", instr->addr, instr->state);
- spin_lock(&priv->c->erase_completion_lock);
- priv->c->erasing_size -= priv->c->sector_size;
- priv->c->bad_size += priv->c->sector_size;
- list_del(&priv->jeb->list);
- list_add(&priv->jeb->list, &priv->c->bad_list);
- priv->c->nr_erasing_blocks--;
- spin_unlock(&priv->c->erase_completion_lock);
- wake_up(&priv->c->erase_wait);
+ jffs2_erase_failed(priv->c, priv->jeb, instr->fail_addr);
} else {
- D1(printk(KERN_DEBUG "Erase completed successfully at 0x%08x\n", instr->addr));
- spin_lock(&priv->c->erase_completion_lock);
- list_del(&priv->jeb->list);
- list_add_tail(&priv->jeb->list, &priv->c->erase_complete_list);
- spin_unlock(&priv->c->erase_completion_lock);
- }
- /* Make sure someone picks up the block off the erase_complete list */
- OFNI_BS_2SFFJ(priv->c)->s_dirt = 1;
+ jffs2_erase_succeeded(priv->c, priv->jeb);
+ }
kfree(instr);
}
+#endif /* !__ECOS */
/* Hmmm. Maybe we should accept the extra space it takes and make
this a standard doubly-linked list? */
@@ -179,15 +242,15 @@ static inline void jffs2_remove_node_refs_from_ino_list(struct jffs2_sb_info *c,
/* Walk the inode's list once, removing any nodes from this eraseblock */
while (1) {
if (!(*prev)->next_in_ino) {
- /* We're looking at the jffs2_inode_cache, which is
+ /* We're looking at the jffs2_inode_cache, which is
at the end of the linked list. Stash it and continue
from the beginning of the list */
ic = (struct jffs2_inode_cache *)(*prev);
prev = &ic->nodes;
continue;
- }
+ }
- if (((*prev)->flash_offset & ~(c->sector_size -1)) == jeb->offset) {
+ if (SECTOR_ADDR((*prev)->flash_offset) == jeb->offset) {
/* It's in the block we're erasing */
struct jffs2_raw_node_ref *this;
@@ -219,9 +282,9 @@ static inline void jffs2_remove_node_refs_from_ino_list(struct jffs2_sb_info *c,
printk(KERN_DEBUG "After remove_node_refs_from_ino_list: \n" KERN_DEBUG);
this = ic->nodes;
-
+
while(this) {
- printk( "0x%08x(%d)->", this->flash_offset & ~3, this->flash_offset &3);
+ printk( "0x%08x(%d)->", ref_offset(this), ref_flags(this));
if (++i == 5) {
printk("\n" KERN_DEBUG);
i=0;
@@ -231,11 +294,8 @@ static inline void jffs2_remove_node_refs_from_ino_list(struct jffs2_sb_info *c,
printk("\n");
});
- if (ic->nodes == (void *)ic) {
- D1(printk(KERN_DEBUG "inocache for ino #%u is all gone now. Freeing\n", ic->ino));
+ if (ic->nodes == (void *)ic && ic->nlink == 0)
jffs2_del_ino_cache(c, ic);
- jffs2_free_inode_cache(ic);
- }
}
static void jffs2_free_all_node_refs(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb)
@@ -245,7 +305,7 @@ static void jffs2_free_all_node_refs(struct jffs2_sb_info *c, struct jffs2_erase
while(jeb->first_node) {
ref = jeb->first_node;
jeb->first_node = ref->next_phys;
-
+
/* Remove from the inode-list */
if (ref->next_in_ino)
jffs2_remove_node_refs_from_ino_list(c, ref, jeb);
@@ -256,118 +316,157 @@ static void jffs2_free_all_node_refs(struct jffs2_sb_info *c, struct jffs2_erase
jeb->last_node = NULL;
}
-void jffs2_erase_pending_trigger(struct jffs2_sb_info *c)
+static int jffs2_block_check_erase(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, uint32_t *bad_offset)
{
- OFNI_BS_2SFFJ(c)->s_dirt = 1;
+ void *ebuf;
+ uint32_t ofs;
+ int ret = -EIO;
+
+ ebuf = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!ebuf) {
+ printk(KERN_WARNING "Failed to allocate page buffer for verifying erase at 0x%08x. Refiling\n", jeb->offset);
+ return -EAGAIN;
+ }
+
+ D1(printk(KERN_DEBUG "Verifying erase at 0x%08x\n", jeb->offset));
+
+ for (ofs = jeb->offset; ofs < jeb->offset + c->sector_size; ) {
+ uint32_t readlen = min((uint32_t)PAGE_SIZE, jeb->offset + c->sector_size - ofs);
+ int i;
+
+ *bad_offset = ofs;
+
+ if (jffs2_flash_read_safe(c, ofs, readlen, (void *)ebuf))
+ goto fail;
+
+ for (i=0; i<readlen; i += sizeof(unsigned long)) {
+ /* It's OK. We know it's properly aligned */
+ unsigned long *datum = ebuf + i;
+ if (*datum + 1) {
+ *bad_offset += i;
+ printk(KERN_WARNING "Newly-erased block contained word 0x%lx at offset 0x%08x\n", *datum, *bad_offset);
+ goto fail;
+ }
+ }
+ ofs += readlen;
+ cond_resched();
+ }
+ ret = 0;
+fail:
+ kfree(ebuf);
+ return ret;
}
-void jffs2_mark_erased_blocks(struct jffs2_sb_info *c)
+static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb)
{
- static struct jffs2_unknown_node marker = {JFFS2_MAGIC_BITMASK, JFFS2_NODETYPE_CLEANMARKER, sizeof(struct jffs2_unknown_node)};
- struct jffs2_eraseblock *jeb;
- struct jffs2_raw_node_ref *marker_ref;
- unsigned char *ebuf;
- ssize_t retlen;
+ struct jffs2_raw_node_ref *ebh_ref = NULL;
+ size_t retlen;
int ret;
+ uint32_t bad_offset;
- marker.hdr_crc = crc32(0, &marker, sizeof(struct jffs2_unknown_node)-4);
+ switch (jffs2_block_check_erase(c, jeb, &bad_offset)) {
+ case -EAGAIN: goto refile;
+ case -EIO: goto filebad;
+ }
- spin_lock_bh(&c->erase_completion_lock);
- while (!list_empty(&c->erase_complete_list)) {
- jeb = list_entry(c->erase_complete_list.next, struct jffs2_eraseblock, list);
- list_del(&jeb->list);
- spin_unlock_bh(&c->erase_completion_lock);
+ /* Write the erase complete marker */
+ D1(printk(KERN_DEBUG "Writing eraseblock header to block at 0x%08x\n", jeb->offset));
+ bad_offset = jeb->offset;
- marker_ref = jffs2_alloc_raw_node_ref();
- if (!marker_ref) {
- printk(KERN_WARNING "Failed to allocate raw node ref for clean marker\n");
- /* Come back later */
- jffs2_erase_pending_trigger(c);
- return;
- }
+ /* Cleanmarker in oob area or no cleanmarker at all ? */
+ if (jffs2_ebh_oob(c)) {
- ebuf = kmalloc(PAGE_SIZE, GFP_KERNEL);
- if (!ebuf) {
- printk(KERN_WARNING "Failed to allocate page buffer for verifying erase at 0x%08x. Assuming it worked\n", jeb->offset);
- } else {
- __u32 ofs = jeb->offset;
-
- D1(printk(KERN_DEBUG "Verifying erase at 0x%08x\n", jeb->offset));
- while(ofs < jeb->offset + c->sector_size) {
- __u32 readlen = min((__u32)PAGE_SIZE, jeb->offset + c->sector_size - ofs);
- int i;
-
- ret = c->mtd->read(c->mtd, ofs, readlen, &retlen, ebuf);
- if (ret < 0) {
- printk(KERN_WARNING "Read of newly-erased block at 0x%08x failed: %d. Putting on bad_list\n", ofs, ret);
- goto bad;
- }
- if (retlen != readlen) {
- printk(KERN_WARNING "Short read from newly-erased block at 0x%08x. Wanted %d, got %d\n", ofs, readlen, retlen);
- goto bad;
- }
- for (i=0; i<readlen; i += sizeof(unsigned long)) {
- /* It's OK. We know it's properly aligned */
- unsigned long datum = *(unsigned long *)(&ebuf[i]);
- if (datum + 1) {
- printk(KERN_WARNING "Newly-erased block contained word 0x%lx at offset 0x%08x\n", datum, ofs + i);
- bad:
- jffs2_free_raw_node_ref(marker_ref);
- kfree(ebuf);
- bad2:
- spin_lock_bh(&c->erase_completion_lock);
- c->erasing_size -= c->sector_size;
- c->bad_size += c->sector_size;
-
- list_add_tail(&jeb->list, &c->bad_list);
- c->nr_erasing_blocks--;
- spin_unlock_bh(&c->erase_completion_lock);
- wake_up(&c->erase_wait);
- return;
- }
- }
- ofs += readlen;
- }
- kfree(ebuf);
- }
-
- /* Write the erase complete marker */
- D1(printk(KERN_DEBUG "Writing erased marker to block at 0x%08x\n", jeb->offset));
- ret = c->mtd->write(c->mtd, jeb->offset, sizeof(marker), &retlen, (char *)&marker);
- if (ret) {
- printk(KERN_WARNING "Write clean marker to block at 0x%08x failed: %d\n",
- jeb->offset, ret);
- goto bad2;
+ if (jffs2_write_nand_ebh(c, jeb))
+ goto filebad;
+
+ jeb->first_node = jeb->last_node = NULL;
+ jeb->free_size = c->sector_size;
+ jeb->used_size = 0;
+ jeb->dirty_size = 0;
+ jeb->wasted_size = 0;
+
+ } else {
+
+ struct kvec vecs[1];
+ struct jffs2_raw_ebh ebh = {
+ .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
+ .nodetype = cpu_to_je16(JFFS2_NODETYPE_ERASEBLOCK_HEADER),
+ .totlen = cpu_to_je32(c->ebh_size),
+ .reserved = 0,
+ .compat_fset = JFFS2_EBH_COMPAT_FSET,
+ .incompat_fset = JFFS2_EBH_INCOMPAT_FSET,
+ .rocompat_fset = JFFS2_EBH_ROCOMPAT_FSET,
+ };
+
+ ebh_ref = jffs2_alloc_raw_node_ref();
+ if (!ebh_ref) {
+ printk(KERN_WARNING "Failed to allocate raw node ref for clean marker. Refiling\n");
+ goto refile;
}
- if (retlen != sizeof(marker)) {
- printk(KERN_WARNING "Short write to newly-erased block at 0x%08x: Wanted %d, got %d\n",
- jeb->offset, sizeof(marker), retlen);
- goto bad2;
+ ebh.erase_count = cpu_to_je32(jeb->erase_count);
+ ebh.hdr_crc = cpu_to_je32(crc32(0, &ebh, sizeof(struct jffs2_unknown_node)-4));
+ ebh.node_crc = cpu_to_je32(crc32(0, (unsigned char *)&ebh + sizeof(struct jffs2_unknown_node) + 4,
+ sizeof(struct jffs2_raw_ebh) - sizeof(struct jffs2_unknown_node) - 4));
+
+ vecs[0].iov_base = (unsigned char *) &ebh;
+ vecs[0].iov_len = sizeof(ebh);
+ ret = jffs2_flash_direct_writev(c, vecs, 1, jeb->offset, &retlen);
+
+ if (ret || retlen != sizeof(ebh)) {
+ if (ret)
+ printk(KERN_WARNING "Write eraseblock header to block at 0x%08x failed: %d\n",
+ jeb->offset, ret);
+ else
+ printk(KERN_WARNING "Short write to newly-erased block at 0x%08x: Wanted %zd, got %zd\n",
+ jeb->offset, sizeof(ebh), retlen);
+
+ jffs2_free_raw_node_ref(ebh_ref);
+ goto filebad;
}
- marker_ref->next_in_ino = NULL;
- marker_ref->next_phys = NULL;
- marker_ref->flash_offset = jeb->offset;
- marker_ref->totlen = PAD(sizeof(marker));
+ ebh_ref->next_in_ino = NULL;
+ ebh_ref->next_phys = NULL;
+ ebh_ref->flash_offset = jeb->offset | REF_NORMAL;
+ ebh_ref->__totlen = c->ebh_size;
- jeb->first_node = jeb->last_node = marker_ref;
+ jeb->first_node = jeb->last_node = ebh_ref;
- jeb->free_size = c->sector_size - marker_ref->totlen;
- jeb->used_size = marker_ref->totlen;
+ jeb->free_size = c->sector_size - c->ebh_size;
+ jeb->used_size = c->ebh_size;
jeb->dirty_size = 0;
+ jeb->wasted_size = 0;
+ }
- spin_lock_bh(&c->erase_completion_lock);
- c->erasing_size -= c->sector_size;
- c->free_size += jeb->free_size;
- c->used_size += jeb->used_size;
+ spin_lock(&c->erase_completion_lock);
+ c->erasing_size -= c->sector_size;
+ c->free_size += jeb->free_size;
+ c->used_size += jeb->used_size;
- ACCT_SANITY_CHECK(c,jeb);
- ACCT_PARANOIA_CHECK(jeb);
+ jffs2_dbg_acct_sanity_check_nolock(c,jeb);
+ jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
- list_add_tail(&jeb->list, &c->free_list);
- c->nr_erasing_blocks--;
- c->nr_free_blocks++;
- wake_up(&c->erase_wait);
- }
- spin_unlock_bh(&c->erase_completion_lock);
+ list_add_tail(&jeb->list, &c->free_list);
+ jffs2_add_to_hash_table(c, jeb, 2);
+ c->nr_erasing_blocks--;
+ c->nr_free_blocks++;
+ spin_unlock(&c->erase_completion_lock);
+ wake_up(&c->erase_wait);
+ return;
+
+filebad:
+ spin_lock(&c->erase_completion_lock);
+ /* Stick it on a list (any list) so erase_failed can take it
+ right off again. Silly, but shouldn't happen often. */
+ list_add(&jeb->list, &c->erasing_list);
+ spin_unlock(&c->erase_completion_lock);
+ jffs2_erase_failed(c, jeb, bad_offset);
+ return;
+
+refile:
+ /* Stick it back on the list from whence it came and come back later */
+ jffs2_erase_pending_trigger(c);
+ spin_lock(&c->erase_completion_lock);
+ list_add(&jeb->list, &c->erase_complete_list);
+ spin_unlock(&c->erase_completion_lock);
+ return;
}
diff --git a/linux-2.4.x/fs/jffs2/file.c b/linux-2.4.x/fs/jffs2/file.c
index b12c0f4..61effa9 100644
--- a/linux-2.4.x/fs/jffs2/file.c
+++ b/linux-2.4.x/fs/jffs2/file.c
@@ -1,383 +1,171 @@
/*
* JFFS2 -- Journalling Flash File System, Version 2.
*
- * Copyright (C) 2001 Red Hat, Inc.
+ * Copyright (C) 2001-2003 Red Hat, Inc.
*
- * Created by David Woodhouse <dwmw2@cambridge.redhat.com>
+ * Created by David Woodhouse <dwmw2@infradead.org>
*
- * The original JFFS, from which the design for JFFS2 was derived,
- * was designed and implemented by Axis Communications AB.
+ * For licensing information, see the file 'LICENCE' in this directory.
*
- * The contents of this file are subject to the Red Hat eCos Public
- * License Version 1.1 (the "Licence"); you may not use this file
- * except in compliance with the Licence. You may obtain a copy of
- * the Licence at http://www.redhat.com/
- *
- * Software distributed under the Licence is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied.
- * See the Licence for the specific language governing rights and
- * limitations under the Licence.
- *
- * The Original Code is JFFS2 - Journalling Flash File System, version 2
- *
- * Alternatively, the contents of this file may be used under the
- * terms of the GNU General Public License version 2 (the "GPL"), in
- * which case the provisions of the GPL are applicable instead of the
- * above. If you wish to allow the use of your version of this file
- * only under the terms of the GPL and not to allow others to use your
- * version of this file under the RHEPL, indicate your decision by
- * deleting the provisions above and replace them with the notice and
- * other provisions required by the GPL. If you do not delete the
- * provisions above, a recipient may use your version of this file
- * under either the RHEPL or the GPL.
- *
- * $Id: file.c,v 1.58.2.6 2002/11/12 13:17:01 dwmw2 Exp $
+ * $Id: file.c,v 1.106 2005/11/07 11:14:39 gleixner Exp $
*
*/
#include <linux/kernel.h>
-#include <linux/mtd/compatmac.h> /* for min() */
#include <linux/slab.h>
#include <linux/fs.h>
+#include <linux/time.h>
#include <linux/pagemap.h>
+#include <linux/highmem.h>
+#include <linux/crc32.h>
#include <linux/jffs2.h>
#include "nodelist.h"
-#include "crc32.h"
-
-extern int generic_file_open(struct inode *, struct file *) __attribute__((weak));
-extern loff_t generic_file_llseek(struct file *file, loff_t offset, int origin) __attribute__((weak));
+static int jffs2_commit_write (struct file *filp, struct page *pg,
+ unsigned start, unsigned end);
+static int jffs2_prepare_write (struct file *filp, struct page *pg,
+ unsigned start, unsigned end);
+static int jffs2_readpage (struct file *filp, struct page *pg);
-int jffs2_null_fsync(struct file *filp, struct dentry *dentry, int datasync)
+int jffs2_fsync(struct file *filp, struct dentry *dentry, int datasync)
{
- /* Move along. Nothing to see here */
+ struct inode *inode = dentry->d_inode;
+ struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
+
+ /* Trigger GC to flush any pending writes for this inode */
+ jffs2_flush_wbuf_gc(c, inode->i_ino);
+
return 0;
}
struct file_operations jffs2_file_operations =
{
- llseek: generic_file_llseek,
- open: generic_file_open,
- read: generic_file_read,
- write: generic_file_write,
- ioctl: jffs2_ioctl,
- mmap: generic_file_mmap,
- fsync: jffs2_null_fsync
+ .llseek = generic_file_llseek,
+ .open = generic_file_open,
+ .read = generic_file_read,
+ .write = generic_file_write,
+ .ioctl = jffs2_ioctl,
+ .mmap = generic_file_readonly_mmap,
+ .fsync = jffs2_fsync,
+ .sendfile = generic_file_sendfile
};
/* jffs2_file_inode_operations */
struct inode_operations jffs2_file_inode_operations =
{
- setattr: jffs2_setattr
+ .setattr = jffs2_setattr
};
struct address_space_operations jffs2_file_address_operations =
{
- readpage: jffs2_readpage,
- prepare_write: jffs2_prepare_write,
- commit_write: jffs2_commit_write
+ .readpage = jffs2_readpage,
+ .prepare_write =jffs2_prepare_write,
+ .commit_write = jffs2_commit_write
};
-int jffs2_setattr (struct dentry *dentry, struct iattr *iattr)
-{
- struct jffs2_full_dnode *old_metadata, *new_metadata;
- struct inode *inode = dentry->d_inode;
- struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
- struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
- struct jffs2_raw_inode *ri;
- unsigned short dev;
- unsigned char *mdata = NULL;
- int mdatalen = 0;
- unsigned int ivalid;
- __u32 phys_ofs, alloclen;
- int ret;
- D1(printk(KERN_DEBUG "jffs2_setattr(): ino #%lu\n", inode->i_ino));
- ret = inode_change_ok(inode, iattr);
- if (ret)
- return ret;
-
- /* Special cases - we don't want more than one data node
- for these types on the medium at any time. So setattr
- must read the original data associated with the node
- (i.e. the device numbers or the target name) and write
- it out again with the appropriate data attached */
- if (S_ISBLK(inode->i_mode) || S_ISCHR(inode->i_mode)) {
- /* For these, we don't actually need to read the old node */
- dev = (MAJOR(to_kdev_t(dentry->d_inode->i_rdev)) << 8) |
- MINOR(to_kdev_t(dentry->d_inode->i_rdev));
- mdata = (char *)&dev;
- mdatalen = sizeof(dev);
- D1(printk(KERN_DEBUG "jffs2_setattr(): Writing %d bytes of kdev_t\n", mdatalen));
- } else if (S_ISLNK(inode->i_mode)) {
- mdatalen = f->metadata->size;
- mdata = kmalloc(f->metadata->size, GFP_USER);
- if (!mdata)
- return -ENOMEM;
- ret = jffs2_read_dnode(c, f->metadata, mdata, 0, mdatalen);
- if (ret) {
- kfree(mdata);
- return ret;
- }
- D1(printk(KERN_DEBUG "jffs2_setattr(): Writing %d bytes of symlink target\n", mdatalen));
- }
-
- ri = jffs2_alloc_raw_inode();
- if (!ri) {
- if (S_ISLNK(inode->i_mode))
- kfree(mdata);
- return -ENOMEM;
- }
-
- ret = jffs2_reserve_space(c, sizeof(*ri) + mdatalen, &phys_ofs, &alloclen, ALLOC_NORMAL);
- if (ret) {
- jffs2_free_raw_inode(ri);
- if (S_ISLNK(inode->i_mode))
- kfree(mdata);
- return ret;
- }
- down(&f->sem);
- ivalid = iattr->ia_valid;
-
- ri->magic = JFFS2_MAGIC_BITMASK;
- ri->nodetype = JFFS2_NODETYPE_INODE;
- ri->totlen = sizeof(*ri) + mdatalen;
- ri->hdr_crc = crc32(0, ri, sizeof(struct jffs2_unknown_node)-4);
-
- ri->ino = inode->i_ino;
- ri->version = ++f->highest_version;
-
- ri->mode = (ivalid & ATTR_MODE)?iattr->ia_mode:inode->i_mode;
- ri->uid = (ivalid & ATTR_UID)?iattr->ia_uid:inode->i_uid;
- ri->gid = (ivalid & ATTR_GID)?iattr->ia_gid:inode->i_gid;
-
- if (ivalid & ATTR_MODE && ri->mode & S_ISGID &&
- !in_group_p(ri->gid) && !capable(CAP_FSETID))
- ri->mode &= ~S_ISGID;
-
- ri->isize = (ivalid & ATTR_SIZE)?iattr->ia_size:inode->i_size;
- ri->atime = (ivalid & ATTR_ATIME)?iattr->ia_atime:inode->i_atime;
- ri->mtime = (ivalid & ATTR_MTIME)?iattr->ia_mtime:inode->i_mtime;
- ri->ctime = (ivalid & ATTR_CTIME)?iattr->ia_ctime:inode->i_ctime;
-
- ri->offset = 0;
- ri->csize = ri->dsize = mdatalen;
- ri->compr = JFFS2_COMPR_NONE;
- if (inode->i_size < ri->isize) {
- /* It's an extension. Make it a hole node */
- ri->compr = JFFS2_COMPR_ZERO;
- ri->dsize = ri->isize - inode->i_size;
- ri->offset = inode->i_size;
- }
- ri->node_crc = crc32(0, ri, sizeof(*ri)-8);
- if (mdatalen)
- ri->data_crc = crc32(0, mdata, mdatalen);
- else
- ri->data_crc = 0;
-
- new_metadata = jffs2_write_dnode(inode, ri, mdata, mdatalen, phys_ofs, NULL);
- if (S_ISLNK(inode->i_mode))
- kfree(mdata);
-
- jffs2_complete_reservation(c);
-
- if (IS_ERR(new_metadata)) {
- jffs2_free_raw_inode(ri);
- up(&f->sem);
- return PTR_ERR(new_metadata);
- }
- /* It worked. Update the inode */
- inode->i_atime = ri->atime;
- inode->i_ctime = ri->ctime;
- inode->i_mtime = ri->mtime;
- inode->i_mode = ri->mode;
- inode->i_uid = ri->uid;
- inode->i_gid = ri->gid;
-
-
- old_metadata = f->metadata;
-
- if (inode->i_size > ri->isize) {
- vmtruncate(inode, ri->isize);
- jffs2_truncate_fraglist (c, &f->fraglist, ri->isize);
- }
-
- if (inode->i_size < ri->isize) {
- jffs2_add_full_dnode_to_inode(c, f, new_metadata);
- inode->i_size = ri->isize;
- f->metadata = NULL;
- } else {
- f->metadata = new_metadata;
- }
- if (old_metadata) {
- jffs2_mark_node_obsolete(c, old_metadata->raw);
- jffs2_free_full_dnode(old_metadata);
- }
- jffs2_free_raw_inode(ri);
- up(&f->sem);
- return 0;
-}
-
-int jffs2_do_readpage_nolock (struct inode *inode, struct page *pg)
+static int jffs2_do_readpage_nolock (struct inode *inode, struct page *pg)
{
struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
- struct jffs2_node_frag *frag = f->fraglist;
- __u32 offset = pg->index << PAGE_CACHE_SHIFT;
- __u32 end = offset + PAGE_CACHE_SIZE;
unsigned char *pg_buf;
int ret;
- D1(printk(KERN_DEBUG "jffs2_do_readpage_nolock(): ino #%lu, page at offset 0x%x\n", inode->i_ino, offset));
-
- if (!PageLocked(pg))
- PAGE_BUG(pg);
+ D2(printk(KERN_DEBUG "jffs2_do_readpage_nolock(): ino #%lu, page at offset 0x%lx\n", inode->i_ino, pg->index << PAGE_CACHE_SHIFT));
- while(frag && frag->ofs + frag->size <= offset) {
- // D1(printk(KERN_DEBUG "skipping frag %d-%d; before the region we care about\n", frag->ofs, frag->ofs + frag->size));
- frag = frag->next;
- }
+ BUG_ON(!PageLocked(pg));
pg_buf = kmap(pg);
+ /* FIXME: Can kmap fail? */
- /* XXX FIXME: Where a single physical node actually shows up in two
- frags, we read it twice. Don't do that. */
- /* Now we're pointing at the first frag which overlaps our page */
- while(offset < end) {
- D2(printk(KERN_DEBUG "jffs2_readpage: offset %d, end %d\n", offset, end));
- if (!frag || frag->ofs > offset) {
- __u32 holesize = end - offset;
- if (frag) {
- D1(printk(KERN_NOTICE "Eep. Hole in ino %ld fraglist. frag->ofs = 0x%08x, offset = 0x%08x\n", inode->i_ino, frag->ofs, offset));
- holesize = min(holesize, frag->ofs - offset);
- D1(jffs2_print_frag_list(f));
- }
- D1(printk(KERN_DEBUG "Filling non-frag hole from %d-%d\n", offset, offset+holesize));
- memset(pg_buf, 0, holesize);
- pg_buf += holesize;
- offset += holesize;
- continue;
- } else if (frag->ofs < offset && (offset & (PAGE_CACHE_SIZE-1)) != 0) {
- D1(printk(KERN_NOTICE "Eep. Overlap in ino #%ld fraglist. frag->ofs = 0x%08x, offset = 0x%08x\n",
- inode->i_ino, frag->ofs, offset));
- D1(jffs2_print_frag_list(f));
- memset(pg_buf, 0, end - offset);
- ClearPageUptodate(pg);
- SetPageError(pg);
- kunmap(pg);
- return -EIO;
- } else if (!frag->node) {
- __u32 holeend = min(end, frag->ofs + frag->size);
- D1(printk(KERN_DEBUG "Filling frag hole from %d-%d (frag 0x%x 0x%x)\n", offset, holeend, frag->ofs, frag->ofs + frag->size));
- memset(pg_buf, 0, holeend - offset);
- pg_buf += holeend - offset;
- offset = holeend;
- frag = frag->next;
- continue;
- } else {
- __u32 readlen;
- __u32 fragofs; /* offset within the frag to start reading */
-
- fragofs = offset - frag->ofs;
- readlen = min(frag->size - fragofs, end - offset);
- D1(printk(KERN_DEBUG "Reading %d-%d from node at 0x%x\n", frag->ofs+fragofs,
- fragofs+frag->ofs+readlen, frag->node->raw->flash_offset & ~3));
- ret = jffs2_read_dnode(c, frag->node, pg_buf, fragofs + frag->ofs - frag->node->ofs, readlen);
- D2(printk(KERN_DEBUG "node read done\n"));
- if (ret) {
- D1(printk(KERN_DEBUG"jffs2_readpage error %d\n",ret));
- memset(pg_buf, 0, readlen);
- ClearPageUptodate(pg);
- SetPageError(pg);
- kunmap(pg);
- return ret;
- }
-
- pg_buf += readlen;
- offset += readlen;
- frag = frag->next;
- D2(printk(KERN_DEBUG "node read was OK. Looping\n"));
- }
+ ret = jffs2_read_inode_range(c, f, pg_buf, pg->index << PAGE_CACHE_SHIFT, PAGE_CACHE_SIZE);
+
+ if (ret) {
+ ClearPageUptodate(pg);
+ SetPageError(pg);
+ } else {
+ SetPageUptodate(pg);
+ ClearPageError(pg);
}
- D2(printk(KERN_DEBUG "readpage finishing\n"));
- SetPageUptodate(pg);
- ClearPageError(pg);
flush_dcache_page(pg);
-
kunmap(pg);
- D1(printk(KERN_DEBUG "readpage finished\n"));
+
+ D2(printk(KERN_DEBUG "readpage finished\n"));
return 0;
}
int jffs2_do_readpage_unlock(struct inode *inode, struct page *pg)
{
int ret = jffs2_do_readpage_nolock(inode, pg);
- UnlockPage(pg);
+ unlock_page(pg);
return ret;
}
-int jffs2_readpage (struct file *filp, struct page *pg)
+static int jffs2_readpage (struct file *filp, struct page *pg)
{
struct jffs2_inode_info *f = JFFS2_INODE_INFO(pg->mapping->host);
int ret;
-
+
down(&f->sem);
ret = jffs2_do_readpage_unlock(pg->mapping->host, pg);
up(&f->sem);
return ret;
}
-int jffs2_prepare_write (struct file *filp, struct page *pg, unsigned start, unsigned end)
+static int jffs2_prepare_write (struct file *filp, struct page *pg,
+ unsigned start, unsigned end)
{
struct inode *inode = pg->mapping->host;
struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
- __u32 pageofs = pg->index << PAGE_CACHE_SHIFT;
+ uint32_t pageofs = pg->index << PAGE_CACHE_SHIFT;
int ret = 0;
- D1(printk(KERN_DEBUG "jffs2_prepare_write() nrpages %ld\n", inode->i_mapping->nrpages));
+ D1(printk(KERN_DEBUG "jffs2_prepare_write()\n"));
if (pageofs > inode->i_size) {
/* Make new hole frag from old EOF to new page */
struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
struct jffs2_raw_inode ri;
struct jffs2_full_dnode *fn;
- __u32 phys_ofs, alloc_len;
-
+ uint32_t phys_ofs, alloc_len;
+
D1(printk(KERN_DEBUG "Writing new hole frag 0x%x-0x%x between current EOF and new page\n",
(unsigned int)inode->i_size, pageofs));
- ret = jffs2_reserve_space(c, sizeof(ri), &phys_ofs, &alloc_len, ALLOC_NORMAL);
+ ret = jffs2_reserve_space(c, sizeof(ri), &phys_ofs, &alloc_len,
+ ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE);
if (ret)
return ret;
down(&f->sem);
memset(&ri, 0, sizeof(ri));
- ri.magic = JFFS2_MAGIC_BITMASK;
- ri.nodetype = JFFS2_NODETYPE_INODE;
- ri.totlen = sizeof(ri);
- ri.hdr_crc = crc32(0, &ri, sizeof(struct jffs2_unknown_node)-4);
-
- ri.ino = f->inocache->ino;
- ri.version = ++f->highest_version;
- ri.mode = inode->i_mode;
- ri.uid = inode->i_uid;
- ri.gid = inode->i_gid;
- ri.isize = max((__u32)inode->i_size, pageofs);
- ri.atime = ri.ctime = ri.mtime = CURRENT_TIME;
- ri.offset = inode->i_size;
- ri.dsize = pageofs - inode->i_size;
- ri.csize = 0;
+ ri.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
+ ri.nodetype = cpu_to_je16(JFFS2_NODETYPE_INODE);
+ ri.totlen = cpu_to_je32(sizeof(ri));
+ ri.hdr_crc = cpu_to_je32(crc32(0, &ri, sizeof(struct jffs2_unknown_node)-4));
+
+ ri.ino = cpu_to_je32(f->inocache->ino);
+ ri.version = cpu_to_je32(++f->highest_version);
+ ri.mode = cpu_to_jemode(inode->i_mode);
+ ri.uid = cpu_to_je16(inode->i_uid);
+ ri.gid = cpu_to_je16(inode->i_gid);
+ ri.isize = cpu_to_je32(max((uint32_t)inode->i_size, pageofs));
+ ri.atime = ri.ctime = ri.mtime = cpu_to_je32(get_seconds());
+ ri.offset = cpu_to_je32(inode->i_size);
+ ri.dsize = cpu_to_je32(pageofs - inode->i_size);
+ ri.csize = cpu_to_je32(0);
ri.compr = JFFS2_COMPR_ZERO;
- ri.node_crc = crc32(0, &ri, sizeof(ri)-8);
- ri.data_crc = 0;
-
- fn = jffs2_write_dnode(inode, &ri, NULL, 0, phys_ofs, NULL);
- jffs2_complete_reservation(c);
+ ri.node_crc = cpu_to_je32(crc32(0, &ri, sizeof(ri)-8));
+ ri.data_crc = cpu_to_je32(0);
+
+ fn = jffs2_write_dnode(c, f, &ri, NULL, 0, phys_ofs, ALLOC_NORMAL);
+
if (IS_ERR(fn)) {
ret = PTR_ERR(fn);
+ jffs2_complete_reservation(c);
up(&f->sem);
return ret;
}
@@ -391,16 +179,17 @@ int jffs2_prepare_write (struct file *filp, struct page *pg, unsigned start, uns
D1(printk(KERN_DEBUG "Eep. add_full_dnode_to_inode() failed in prepare_write, returned %d\n", ret));
jffs2_mark_node_obsolete(c, fn->raw);
jffs2_free_full_dnode(fn);
+ jffs2_complete_reservation(c);
up(&f->sem);
return ret;
}
+ jffs2_complete_reservation(c);
inode->i_size = pageofs;
up(&f->sem);
}
-
/* Read in the page if it wasn't already present, unless it's a whole page */
- if (!Page_Uptodate(pg) && (start || end < PAGE_CACHE_SIZE)) {
+ if (!PageUptodate(pg) && (start || end < PAGE_CACHE_SIZE)) {
down(&f->sem);
ret = jffs2_do_readpage_nolock(inode, pg);
up(&f->sem);
@@ -409,7 +198,8 @@ int jffs2_prepare_write (struct file *filp, struct page *pg, unsigned start, uns
return ret;
}
-int jffs2_commit_write (struct file *filp, struct page *pg, unsigned start, unsigned end)
+static int jffs2_commit_write (struct file *filp, struct page *pg,
+ unsigned start, unsigned end)
{
/* Actually commit the write from the page cache page we're looking at.
* For now, we write the full page out each time. It sucks, but it's simple
@@ -417,141 +207,78 @@ int jffs2_commit_write (struct file *filp, struct page *pg, unsigned start, unsi
struct inode *inode = pg->mapping->host;
struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
- __u32 newsize = max_t(__u32, filp->f_dentry->d_inode->i_size, (pg->index << PAGE_CACHE_SHIFT) + end);
- __u32 file_ofs = (pg->index << PAGE_CACHE_SHIFT);
- __u32 writelen = min((__u32)PAGE_CACHE_SIZE, newsize - file_ofs);
struct jffs2_raw_inode *ri;
+ unsigned aligned_start = start & ~3;
int ret = 0;
- ssize_t writtenlen = 0;
+ uint32_t writtenlen = 0;
- D1(printk(KERN_DEBUG "jffs2_commit_write(): ino #%lu, page at 0x%lx, range %d-%d, flags %lx\n", inode->i_ino, pg->index << PAGE_CACHE_SHIFT, start, end, pg->flags));
+ D1(printk(KERN_DEBUG "jffs2_commit_write(): ino #%lu, page at 0x%lx, range %d-%d, flags %lx\n",
+ inode->i_ino, pg->index << PAGE_CACHE_SHIFT, start, end, pg->flags));
if (!start && end == PAGE_CACHE_SIZE) {
/* We need to avoid deadlock with page_cache_read() in
jffs2_garbage_collect_pass(). So we have to mark the
- page up to date, to prevent page_cache_read() from
+ page up to date, to prevent page_cache_read() from
trying to re-lock it. */
SetPageUptodate(pg);
}
ri = jffs2_alloc_raw_inode();
- if (!ri)
+
+ if (!ri) {
+ D1(printk(KERN_DEBUG "jffs2_commit_write(): Allocation of raw inode failed\n"));
return -ENOMEM;
+ }
- while(writelen) {
- struct jffs2_full_dnode *fn;
- unsigned char *comprbuf = NULL;
- unsigned char comprtype = JFFS2_COMPR_NONE;
- __u32 phys_ofs, alloclen;
- __u32 datalen, cdatalen;
+ /* Set the fields that the generic jffs2_write_inode_range() code can't find */
+ ri->ino = cpu_to_je32(inode->i_ino);
+ ri->mode = cpu_to_jemode(inode->i_mode);
+ ri->uid = cpu_to_je16(inode->i_uid);
+ ri->gid = cpu_to_je16(inode->i_gid);
+ ri->isize = cpu_to_je32((uint32_t)inode->i_size);
+ ri->atime = ri->ctime = ri->mtime = cpu_to_je32(get_seconds());
- D2(printk(KERN_DEBUG "jffs2_commit_write() loop: 0x%x to write to 0x%x\n", writelen, file_ofs));
+ /* In 2.4, it was already kmapped by generic_file_write(). Doesn't
+ hurt to do it again. The alternative is ifdefs, which are ugly. */
+ kmap(pg);
- ret = jffs2_reserve_space(c, sizeof(*ri) + JFFS2_MIN_DATA_LEN, &phys_ofs, &alloclen, ALLOC_NORMAL);
- if (ret) {
- SetPageError(pg);
- D1(printk(KERN_DEBUG "jffs2_reserve_space returned %d\n", ret));
- break;
- }
- down(&f->sem);
- datalen = writelen;
- cdatalen = min(alloclen - sizeof(*ri), writelen);
+ ret = jffs2_write_inode_range(c, f, ri, page_address(pg) + aligned_start,
+ (pg->index << PAGE_CACHE_SHIFT) + aligned_start,
+ end - aligned_start, &writtenlen);
- comprbuf = kmalloc(cdatalen, GFP_KERNEL);
- if (comprbuf) {
- comprtype = jffs2_compress(page_address(pg)+ (file_ofs & (PAGE_CACHE_SIZE-1)), comprbuf, &datalen, &cdatalen);
- }
- if (comprtype == JFFS2_COMPR_NONE) {
- /* Either compression failed, or the allocation of comprbuf failed */
- if (comprbuf)
- kfree(comprbuf);
- comprbuf = page_address(pg) + (file_ofs & (PAGE_CACHE_SIZE -1));
- datalen = cdatalen;
- }
- /* Now comprbuf points to the data to be written, be it compressed or not.
- comprtype holds the compression type, and comprtype == JFFS2_COMPR_NONE means
- that the comprbuf doesn't need to be kfree()d.
- */
-
- ri->magic = JFFS2_MAGIC_BITMASK;
- ri->nodetype = JFFS2_NODETYPE_INODE;
- ri->totlen = sizeof(*ri) + cdatalen;
- ri->hdr_crc = crc32(0, ri, sizeof(struct jffs2_unknown_node)-4);
-
- ri->ino = inode->i_ino;
- ri->version = ++f->highest_version;
- ri->mode = inode->i_mode;
- ri->uid = inode->i_uid;
- ri->gid = inode->i_gid;
- ri->isize = max((__u32)inode->i_size, file_ofs + datalen);
- ri->atime = ri->ctime = ri->mtime = CURRENT_TIME;
- ri->offset = file_ofs;
- ri->csize = cdatalen;
- ri->dsize = datalen;
- ri->compr = comprtype;
- ri->node_crc = crc32(0, ri, sizeof(*ri)-8);
- ri->data_crc = crc32(0, comprbuf, cdatalen);
-
- fn = jffs2_write_dnode(inode, ri, comprbuf, cdatalen, phys_ofs, NULL);
+ kunmap(pg);
- jffs2_complete_reservation(c);
+ if (ret) {
+ /* There was an error writing. */
+ SetPageError(pg);
+ }
- if (comprtype != JFFS2_COMPR_NONE)
- kfree(comprbuf);
+ /* Adjust writtenlen for the padding we did, so we don't confuse our caller */
+ if (writtenlen < (start&3))
+ writtenlen = 0;
+ else
+ writtenlen -= (start&3);
- if (IS_ERR(fn)) {
- ret = PTR_ERR(fn);
- up(&f->sem);
- SetPageError(pg);
- break;
- }
- ret = jffs2_add_full_dnode_to_inode(c, f, fn);
- if (f->metadata) {
- jffs2_mark_node_obsolete(c, f->metadata->raw);
- jffs2_free_full_dnode(f->metadata);
- f->metadata = NULL;
- }
- up(&f->sem);
- if (ret) {
- /* Eep */
- D1(printk(KERN_DEBUG "Eep. add_full_dnode_to_inode() failed in commit_write, returned %d\n", ret));
- jffs2_mark_node_obsolete(c, fn->raw);
- jffs2_free_full_dnode(fn);
- SetPageError(pg);
- break;
- }
- inode->i_size = ri->isize;
- inode->i_blocks = (inode->i_size + 511) >> 9;
- inode->i_ctime = inode->i_mtime = ri->ctime;
- if (!datalen) {
- printk(KERN_WARNING "Eep. We didn't actually write any bloody data\n");
- ret = -EIO;
- SetPageError(pg);
- break;
+ if (writtenlen) {
+ if (inode->i_size < (pg->index << PAGE_CACHE_SHIFT) + start + writtenlen) {
+ inode->i_size = (pg->index << PAGE_CACHE_SHIFT) + start + writtenlen;
+ inode->i_blocks = (inode->i_size + 511) >> 9;
+
+ inode->i_ctime = inode->i_mtime = ITIME(je32_to_cpu(ri->ctime));
}
- D1(printk(KERN_DEBUG "increasing writtenlen by %d\n", datalen));
- writtenlen += datalen;
- file_ofs += datalen;
- writelen -= datalen;
}
jffs2_free_raw_inode(ri);
- if (writtenlen < end) {
+ if (start+writtenlen < end) {
/* generic_file_write has written more to the page cache than we've
- actually written to the medium. Mark the page !Uptodate so that
+ actually written to the medium. Mark the page !Uptodate so that
it gets reread */
D1(printk(KERN_DEBUG "jffs2_commit_write(): Not all bytes written. Marking page !uptodate\n"));
SetPageError(pg);
ClearPageUptodate(pg);
}
- if (writtenlen <= start) {
- /* We didn't even get to the start of the affected part */
- ret = ret?ret:-ENOSPC;
- D1(printk(KERN_DEBUG "jffs2_commit_write(): Only %x bytes written to page. start (%x) not reached, returning %d\n", writtenlen, start, ret));
- }
- writtenlen = min(end-start, writtenlen-start);
- D1(printk(KERN_DEBUG "jffs2_commit_write() returning %d. nrpages is %ld\n",writtenlen?writtenlen:ret, inode->i_mapping->nrpages));
- return writtenlen?writtenlen:ret;
+ D1(printk(KERN_DEBUG "jffs2_commit_write() returning %d\n",start+writtenlen==end?0:ret));
+ return start+writtenlen==end?0:ret;
}
diff --git a/linux-2.4.x/fs/jffs2/fs.c b/linux-2.4.x/fs/jffs2/fs.c
new file mode 100644
index 0000000..fc22284
--- /dev/null
+++ b/linux-2.4.x/fs/jffs2/fs.c
@@ -0,0 +1,695 @@
+/*
+ * JFFS2 -- Journalling Flash File System, Version 2.
+ *
+ * Copyright (C) 2001-2003 Red Hat, Inc.
+ *
+ * Created by David Woodhouse <dwmw2@infradead.org>
+ *
+ * For licensing information, see the file 'LICENCE' in this directory.
+ *
+ * $Id: fs.c,v 1.72 2006/02/09 16:13:35 dwmw2 Exp $
+ *
+ */
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/fs.h>
+#include <linux/list.h>
+#include <linux/mtd/mtd.h>
+#include <linux/pagemap.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/vfs.h>
+#include <linux/crc32.h>
+#include "nodelist.h"
+
+static int jffs2_flash_setup(struct jffs2_sb_info *c);
+
+static int jffs2_do_setattr (struct inode *inode, struct iattr *iattr)
+{
+ struct jffs2_full_dnode *old_metadata, *new_metadata;
+ struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
+ struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
+ struct jffs2_raw_inode *ri;
+ unsigned short dev;
+ unsigned char *mdata = NULL;
+ int mdatalen = 0;
+ unsigned int ivalid;
+ uint32_t phys_ofs, alloclen;
+ int ret;
+ D1(printk(KERN_DEBUG "jffs2_setattr(): ino #%lu\n", inode->i_ino));
+ ret = inode_change_ok(inode, iattr);
+ if (ret)
+ return ret;
+
+ /* Special cases - we don't want more than one data node
+ for these types on the medium at any time. So setattr
+ must read the original data associated with the node
+ (i.e. the device numbers or the target name) and write
+ it out again with the appropriate data attached */
+ if (S_ISBLK(inode->i_mode) || S_ISCHR(inode->i_mode)) {
+ /* For these, we don't actually need to read the old node */
+ dev = old_encode_dev(inode->i_rdev);
+ mdata = (char *)&dev;
+ mdatalen = sizeof(dev);
+ D1(printk(KERN_DEBUG "jffs2_setattr(): Writing %d bytes of kdev_t\n", mdatalen));
+ } else if (S_ISLNK(inode->i_mode)) {
+ mdatalen = f->metadata->size;
+ mdata = kmalloc(f->metadata->size, GFP_USER);
+ if (!mdata)
+ return -ENOMEM;
+ ret = jffs2_read_dnode(c, f, f->metadata, mdata, 0, mdatalen);
+ if (ret) {
+ kfree(mdata);
+ return ret;
+ }
+ D1(printk(KERN_DEBUG "jffs2_setattr(): Writing %d bytes of symlink target\n", mdatalen));
+ }
+
+ ri = jffs2_alloc_raw_inode();
+ if (!ri) {
+ if (S_ISLNK(inode->i_mode))
+ kfree(mdata);
+ return -ENOMEM;
+ }
+
+ ret = jffs2_reserve_space(c, sizeof(*ri) + mdatalen, &phys_ofs, &alloclen,
+ ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE);
+ if (ret) {
+ jffs2_free_raw_inode(ri);
+ if (S_ISLNK(inode->i_mode & S_IFMT))
+ kfree(mdata);
+ return ret;
+ }
+ down(&f->sem);
+ ivalid = iattr->ia_valid;
+
+ ri->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
+ ri->nodetype = cpu_to_je16(JFFS2_NODETYPE_INODE);
+ ri->totlen = cpu_to_je32(sizeof(*ri) + mdatalen);
+ ri->hdr_crc = cpu_to_je32(crc32(0, ri, sizeof(struct jffs2_unknown_node)-4));
+
+ ri->ino = cpu_to_je32(inode->i_ino);
+ ri->version = cpu_to_je32(++f->highest_version);
+
+ ri->uid = cpu_to_je16((ivalid & ATTR_UID)?iattr->ia_uid:inode->i_uid);
+ ri->gid = cpu_to_je16((ivalid & ATTR_GID)?iattr->ia_gid:inode->i_gid);
+
+ if (ivalid & ATTR_MODE)
+ if (iattr->ia_mode & S_ISGID &&
+ !in_group_p(je16_to_cpu(ri->gid)) && !capable(CAP_FSETID))
+ ri->mode = cpu_to_jemode(iattr->ia_mode & ~S_ISGID);
+ else
+ ri->mode = cpu_to_jemode(iattr->ia_mode);
+ else
+ ri->mode = cpu_to_jemode(inode->i_mode);
+
+
+ ri->isize = cpu_to_je32((ivalid & ATTR_SIZE)?iattr->ia_size:inode->i_size);
+ ri->atime = cpu_to_je32(I_SEC((ivalid & ATTR_ATIME)?iattr->ia_atime:inode->i_atime));
+ ri->mtime = cpu_to_je32(I_SEC((ivalid & ATTR_MTIME)?iattr->ia_mtime:inode->i_mtime));
+ ri->ctime = cpu_to_je32(I_SEC((ivalid & ATTR_CTIME)?iattr->ia_ctime:inode->i_ctime));
+
+ ri->offset = cpu_to_je32(0);
+ ri->csize = ri->dsize = cpu_to_je32(mdatalen);
+ ri->compr = JFFS2_COMPR_NONE;
+ if (ivalid & ATTR_SIZE && inode->i_size < iattr->ia_size) {
+ /* It's an extension. Make it a hole node */
+ ri->compr = JFFS2_COMPR_ZERO;
+ ri->dsize = cpu_to_je32(iattr->ia_size - inode->i_size);
+ ri->offset = cpu_to_je32(inode->i_size);
+ }
+ ri->node_crc = cpu_to_je32(crc32(0, ri, sizeof(*ri)-8));
+ if (mdatalen)
+ ri->data_crc = cpu_to_je32(crc32(0, mdata, mdatalen));
+ else
+ ri->data_crc = cpu_to_je32(0);
+
+ new_metadata = jffs2_write_dnode(c, f, ri, mdata, mdatalen, phys_ofs, ALLOC_NORMAL);
+ if (S_ISLNK(inode->i_mode))
+ kfree(mdata);
+
+ if (IS_ERR(new_metadata)) {
+ jffs2_complete_reservation(c);
+ jffs2_free_raw_inode(ri);
+ up(&f->sem);
+ return PTR_ERR(new_metadata);
+ }
+ /* It worked. Update the inode */
+ inode->i_atime = ITIME(je32_to_cpu(ri->atime));
+ inode->i_ctime = ITIME(je32_to_cpu(ri->ctime));
+ inode->i_mtime = ITIME(je32_to_cpu(ri->mtime));
+ inode->i_mode = jemode_to_cpu(ri->mode);
+ inode->i_uid = je16_to_cpu(ri->uid);
+ inode->i_gid = je16_to_cpu(ri->gid);
+
+
+ old_metadata = f->metadata;
+
+ if (ivalid & ATTR_SIZE && inode->i_size > iattr->ia_size)
+ jffs2_truncate_fragtree (c, &f->fragtree, iattr->ia_size);
+
+ if (ivalid & ATTR_SIZE && inode->i_size < iattr->ia_size) {
+ jffs2_add_full_dnode_to_inode(c, f, new_metadata);
+ inode->i_size = iattr->ia_size;
+ f->metadata = NULL;
+ } else {
+ f->metadata = new_metadata;
+ }
+ if (old_metadata) {
+ jffs2_mark_node_obsolete(c, old_metadata->raw);
+ jffs2_free_full_dnode(old_metadata);
+ }
+ jffs2_free_raw_inode(ri);
+
+ up(&f->sem);
+ jffs2_complete_reservation(c);
+
+ /* We have to do the vmtruncate() without f->sem held, since
+ some pages may be locked and waiting for it in readpage().
+ We are protected from a simultaneous write() extending i_size
+ back past iattr->ia_size, because do_truncate() holds the
+ generic inode semaphore. */
+ if (ivalid & ATTR_SIZE && inode->i_size > iattr->ia_size)
+ vmtruncate(inode, iattr->ia_size);
+
+ return 0;
+}
+
+int jffs2_setattr(struct dentry *dentry, struct iattr *iattr)
+{
+ return jffs2_do_setattr(dentry->d_inode, iattr);
+}
+
+int jffs2_statfs(struct super_block *sb, struct kstatfs *buf)
+{
+ struct jffs2_sb_info *c = JFFS2_SB_INFO(sb);
+ unsigned long avail;
+
+ buf->f_type = JFFS2_SUPER_MAGIC;
+ buf->f_bsize = 1 << PAGE_SHIFT;
+ buf->f_blocks = c->flash_size >> PAGE_SHIFT;
+ buf->f_files = 0;
+ buf->f_ffree = 0;
+ buf->f_namelen = JFFS2_MAX_NAME_LEN;
+
+ spin_lock(&c->erase_completion_lock);
+ avail = c->dirty_size + c->free_size;
+ if (avail > c->sector_size * c->resv_blocks_write)
+ avail -= c->sector_size * c->resv_blocks_write;
+ else
+ avail = 0;
+ spin_unlock(&c->erase_completion_lock);
+
+ buf->f_bavail = buf->f_bfree = avail >> PAGE_SHIFT;
+
+ return 0;
+}
+
+
+void jffs2_clear_inode (struct inode *inode)
+{
+ /* We can forget about this inode for now - drop all
+ * the nodelists associated with it, etc.
+ */
+ struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
+ struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
+
+ D1(printk(KERN_DEBUG "jffs2_clear_inode(): ino #%lu mode %o\n", inode->i_ino, inode->i_mode));
+
+ jffs2_do_clear_inode(c, f);
+}
+
+void jffs2_read_inode (struct inode *inode)
+{
+ struct jffs2_inode_info *f;
+ struct jffs2_sb_info *c;
+ struct jffs2_raw_inode latest_node;
+ int ret;
+
+ D1(printk(KERN_DEBUG "jffs2_read_inode(): inode->i_ino == %lu\n", inode->i_ino));
+
+ f = JFFS2_INODE_INFO(inode);
+ c = JFFS2_SB_INFO(inode->i_sb);
+
+ jffs2_init_inode_info(f);
+ down(&f->sem);
+
+ ret = jffs2_do_read_inode(c, f, inode->i_ino, &latest_node);
+
+ if (ret) {
+ make_bad_inode(inode);
+ up(&f->sem);
+ return;
+ }
+ inode->i_mode = jemode_to_cpu(latest_node.mode);
+ inode->i_uid = je16_to_cpu(latest_node.uid);
+ inode->i_gid = je16_to_cpu(latest_node.gid);
+ inode->i_size = je32_to_cpu(latest_node.isize);
+ inode->i_atime = ITIME(je32_to_cpu(latest_node.atime));
+ inode->i_mtime = ITIME(je32_to_cpu(latest_node.mtime));
+ inode->i_ctime = ITIME(je32_to_cpu(latest_node.ctime));
+
+ inode->i_nlink = f->inocache->nlink;
+
+ inode->i_blksize = PAGE_SIZE;
+ inode->i_blocks = (inode->i_size + 511) >> 9;
+
+ switch (inode->i_mode & S_IFMT) {
+ jint16_t rdev;
+
+ case S_IFLNK:
+ inode->i_op = &jffs2_symlink_inode_operations;
+ break;
+
+ case S_IFDIR:
+ {
+ struct jffs2_full_dirent *fd;
+
+ for (fd=f->dents; fd; fd = fd->next) {
+ if (fd->type == DT_DIR && fd->ino)
+ inode->i_nlink++;
+ }
+ /* and '..' */
+ inode->i_nlink++;
+ /* Root dir gets i_nlink 3 for some reason */
+ if (inode->i_ino == 1)
+ inode->i_nlink++;
+
+ inode->i_op = &jffs2_dir_inode_operations;
+ inode->i_fop = &jffs2_dir_operations;
+ break;
+ }
+ case S_IFREG:
+ inode->i_op = &jffs2_file_inode_operations;
+ inode->i_fop = &jffs2_file_operations;
+ inode->i_mapping->a_ops = &jffs2_file_address_operations;
+ inode->i_mapping->nrpages = 0;
+ break;
+
+ case S_IFBLK:
+ case S_IFCHR:
+ /* Read the device numbers from the media */
+ D1(printk(KERN_DEBUG "Reading device numbers from flash\n"));
+ if (jffs2_read_dnode(c, f, f->metadata, (char *)&rdev, 0, sizeof(rdev)) < 0) {
+ /* Eep */
+ printk(KERN_NOTICE "Read device numbers for inode %lu failed\n", (unsigned long)inode->i_ino);
+ up(&f->sem);
+ jffs2_do_clear_inode(c, f);
+ make_bad_inode(inode);
+ return;
+ }
+
+ case S_IFSOCK:
+ case S_IFIFO:
+ inode->i_op = &jffs2_file_inode_operations;
+ init_special_inode(inode, inode->i_mode,
+ old_decode_dev((je16_to_cpu(rdev))));
+ break;
+
+ default:
+ printk(KERN_WARNING "jffs2_read_inode(): Bogus imode %o for ino %lu\n", inode->i_mode, (unsigned long)inode->i_ino);
+ }
+
+ up(&f->sem);
+
+ D1(printk(KERN_DEBUG "jffs2_read_inode() returning\n"));
+}
+
+void jffs2_dirty_inode(struct inode *inode)
+{
+ struct iattr iattr;
+
+ if (!(inode->i_state & I_DIRTY_DATASYNC)) {
+ D2(printk(KERN_DEBUG "jffs2_dirty_inode() not calling setattr() for ino #%lu\n", inode->i_ino));
+ return;
+ }
+
+ D1(printk(KERN_DEBUG "jffs2_dirty_inode() calling setattr() for ino #%lu\n", inode->i_ino));
+
+ iattr.ia_valid = ATTR_MODE|ATTR_UID|ATTR_GID|ATTR_ATIME|ATTR_MTIME|ATTR_CTIME;
+ iattr.ia_mode = inode->i_mode;
+ iattr.ia_uid = inode->i_uid;
+ iattr.ia_gid = inode->i_gid;
+ iattr.ia_atime = inode->i_atime;
+ iattr.ia_mtime = inode->i_mtime;
+ iattr.ia_ctime = inode->i_ctime;
+
+ jffs2_do_setattr(inode, &iattr);
+}
+
+int jffs2_remount_fs (struct super_block *sb, int *flags, char *data)
+{
+ struct jffs2_sb_info *c = JFFS2_SB_INFO(sb);
+
+ if (c->flags & JFFS2_SB_FLAG_RO && !(sb->s_flags & MS_RDONLY))
+ return -EROFS;
+
+ /* We stop if it was running, then restart if it needs to.
+ This also catches the case where it was stopped and this
+ is just a remount to restart it.
+ Flush the writebuffer, if neccecary, else we loose it */
+ if (!(sb->s_flags & MS_RDONLY)) {
+ jffs2_stop_garbage_collect_thread(c);
+ down(&c->alloc_sem);
+ jffs2_flush_wbuf_pad(c);
+ up(&c->alloc_sem);
+ }
+
+ if (!(*flags & MS_RDONLY))
+ jffs2_start_garbage_collect_thread(c);
+
+ *flags |= MS_NOATIME;
+
+ return 0;
+}
+
+void jffs2_write_super (struct super_block *sb)
+{
+ struct jffs2_sb_info *c = JFFS2_SB_INFO(sb);
+ sb->s_dirt = 0;
+
+ if (sb->s_flags & MS_RDONLY)
+ return;
+
+ D1(printk(KERN_DEBUG "jffs2_write_super()\n"));
+ jffs2_garbage_collect_trigger(c);
+ jffs2_erase_pending_blocks(c, 0);
+ jffs2_flush_wbuf_gc(c, 0);
+}
+
+
+/* jffs2_new_inode: allocate a new inode and inocache, add it to the hash,
+ fill in the raw_inode while you're at it. */
+struct inode *jffs2_new_inode (struct inode *dir_i, int mode, struct jffs2_raw_inode *ri)
+{
+ struct inode *inode;
+ struct super_block *sb = dir_i->i_sb;
+ struct jffs2_sb_info *c;
+ struct jffs2_inode_info *f;
+ int ret;
+
+ D1(printk(KERN_DEBUG "jffs2_new_inode(): dir_i %ld, mode 0x%x\n", dir_i->i_ino, mode));
+
+ c = JFFS2_SB_INFO(sb);
+
+ inode = new_inode(sb);
+
+ if (!inode)
+ return ERR_PTR(-ENOMEM);
+
+ f = JFFS2_INODE_INFO(inode);
+ jffs2_init_inode_info(f);
+ down(&f->sem);
+
+ memset(ri, 0, sizeof(*ri));
+ /* Set OS-specific defaults for new inodes */
+ ri->uid = cpu_to_je16(current->fsuid);
+
+ if (dir_i->i_mode & S_ISGID) {
+ ri->gid = cpu_to_je16(dir_i->i_gid);
+ if (S_ISDIR(mode))
+ mode |= S_ISGID;
+ } else {
+ ri->gid = cpu_to_je16(current->fsgid);
+ }
+ ri->mode = cpu_to_jemode(mode);
+ ret = jffs2_do_new_inode (c, f, mode, ri);
+ if (ret) {
+ make_bad_inode(inode);
+ iput(inode);
+ return ERR_PTR(ret);
+ }
+ inode->i_nlink = 1;
+ inode->i_ino = je32_to_cpu(ri->ino);
+ inode->i_mode = jemode_to_cpu(ri->mode);
+ inode->i_gid = je16_to_cpu(ri->gid);
+ inode->i_uid = je16_to_cpu(ri->uid);
+ inode->i_atime = inode->i_ctime = inode->i_mtime = CURRENT_TIME_SEC;
+ ri->atime = ri->mtime = ri->ctime = cpu_to_je32(I_SEC(inode->i_mtime));
+
+ inode->i_blksize = PAGE_SIZE;
+ inode->i_blocks = 0;
+ inode->i_size = 0;
+
+ insert_inode_hash(inode);
+
+ return inode;
+}
+
+
+int jffs2_do_fill_super(struct super_block *sb, void *data, int silent)
+{
+ struct jffs2_sb_info *c;
+ struct inode *root_i;
+ int ret;
+ size_t blocks;
+
+ c = JFFS2_SB_INFO(sb);
+
+#ifndef CONFIG_JFFS2_FS_WRITEBUFFER
+ if (c->mtd->type == MTD_NANDFLASH) {
+ JFFS2_ERROR("cannot operate on NAND flash unless jffs2 NAND support is compiled in.\n");
+ return -EINVAL;
+ }
+ if (c->mtd->type == MTD_DATAFLASH) {
+ JFFS2_ERROR("cannot operate on DataFlash unless jffs2 DataFlash support is compiled in.\n");
+ return -EINVAL;
+ }
+#endif
+
+ c->flash_size = c->mtd->size;
+ c->sector_size = c->mtd->erasesize;
+ blocks = c->flash_size / c->sector_size;
+
+ /*
+ * Size alignment check
+ */
+ if ((c->sector_size * blocks) != c->flash_size) {
+ c->flash_size = c->sector_size * blocks;
+ JFFS2_WARNING("flash size not aligned to erasesize, reducing to %dKiB\n",
+ c->flash_size / 1024);
+ }
+
+ if (c->flash_size < 5*c->sector_size) {
+ JFFS2_ERROR("too few erase blocks (%d)\n", c->flash_size / c->sector_size);
+ return -EINVAL;
+ }
+
+ c->cleanmarker_size = sizeof(struct jffs2_unknown_node);
+ c->ebh_size = PAD(sizeof(struct jffs2_raw_ebh));
+
+ /* NAND (or other bizarre) flash... do setup accordingly */
+ ret = jffs2_flash_setup(c);
+ if (ret)
+ return ret;
+
+ c->inocache_list = kmalloc(INOCACHE_HASHSIZE * sizeof(struct jffs2_inode_cache *), GFP_KERNEL);
+ if (!c->inocache_list) {
+ ret = -ENOMEM;
+ goto out_wbuf;
+ }
+ memset(c->inocache_list, 0, INOCACHE_HASHSIZE * sizeof(struct jffs2_inode_cache *));
+
+ if ((ret = jffs2_do_mount_fs(c)))
+ goto out_inohash;
+
+ ret = -EINVAL;
+
+ dbg_fsbuild("getting root inode\n");
+ root_i = iget(sb, 1);
+ if (is_bad_inode(root_i)) {
+ JFFS2_ERROR("get root inode failed\n");
+ goto out_root_i;
+ }
+
+ dbg_fsbuild("call d_alloc_root()\n");
+ sb->s_root = d_alloc_root(root_i);
+ if (!sb->s_root)
+ goto out_root_i;
+
+ sb->s_maxbytes = 0xFFFFFFFF;
+ sb->s_blocksize = PAGE_CACHE_SIZE;
+ sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
+ sb->s_magic = JFFS2_SUPER_MAGIC;
+ if (!(sb->s_flags & MS_RDONLY))
+ jffs2_start_garbage_collect_thread(c);
+ return 0;
+
+ out_root_i:
+ iput(root_i);
+ jffs2_free_ino_caches(c);
+ jffs2_free_raw_node_refs(c);
+ jffs2_free_eraseblocks(c);
+ out_inohash:
+ kfree(c->inocache_list);
+ out_wbuf:
+ jffs2_flash_cleanup(c);
+
+ return ret;
+}
+
+void jffs2_gc_release_inode(struct jffs2_sb_info *c,
+ struct jffs2_inode_info *f)
+{
+ iput(OFNI_EDONI_2SFFJ(f));
+}
+
+struct jffs2_inode_info *jffs2_gc_fetch_inode(struct jffs2_sb_info *c,
+ int inum, int nlink)
+{
+ struct inode *inode;
+ struct jffs2_inode_cache *ic;
+ if (!nlink) {
+ /* The inode has zero nlink but its nodes weren't yet marked
+ obsolete. This has to be because we're still waiting for
+ the final (close() and) iput() to happen.
+
+ There's a possibility that the final iput() could have
+ happened while we were contemplating. In order to ensure
+ that we don't cause a new read_inode() (which would fail)
+ for the inode in question, we use ilookup() in this case
+ instead of iget().
+
+ The nlink can't _become_ zero at this point because we're
+ holding the alloc_sem, and jffs2_do_unlink() would also
+ need that while decrementing nlink on any inode.
+ */
+ inode = ilookup(OFNI_BS_2SFFJ(c), inum);
+ if (!inode) {
+ D1(printk(KERN_DEBUG "ilookup() failed for ino #%u; inode is probably deleted.\n",
+ inum));
+
+ spin_lock(&c->inocache_lock);
+ ic = jffs2_get_ino_cache(c, inum);
+ if (!ic) {
+ D1(printk(KERN_DEBUG "Inode cache for ino #%u is gone.\n", inum));
+ spin_unlock(&c->inocache_lock);
+ return NULL;
+ }
+ if (ic->state != INO_STATE_CHECKEDABSENT) {
+ /* Wait for progress. Don't just loop */
+ D1(printk(KERN_DEBUG "Waiting for ino #%u in state %d\n",
+ ic->ino, ic->state));
+ sleep_on_spinunlock(&c->inocache_wq, &c->inocache_lock);
+ } else {
+ spin_unlock(&c->inocache_lock);
+ }
+
+ return NULL;
+ }
+ } else {
+ /* Inode has links to it still; they're not going away because
+ jffs2_do_unlink() would need the alloc_sem and we have it.
+ Just iget() it, and if read_inode() is necessary that's OK.
+ */
+ inode = iget(OFNI_BS_2SFFJ(c), inum);
+ if (!inode)
+ return ERR_PTR(-ENOMEM);
+ }
+ if (is_bad_inode(inode)) {
+ printk(KERN_NOTICE "Eep. read_inode() failed for ino #%u. nlink %d\n",
+ inum, nlink);
+ /* NB. This will happen again. We need to do something appropriate here. */
+ iput(inode);
+ return ERR_PTR(-EIO);
+ }
+
+ return JFFS2_INODE_INFO(inode);
+}
+
+unsigned char *jffs2_gc_fetch_page(struct jffs2_sb_info *c,
+ struct jffs2_inode_info *f,
+ unsigned long offset,
+ unsigned long *priv)
+{
+ struct inode *inode = OFNI_EDONI_2SFFJ(f);
+ struct page *pg;
+
+ pg = read_cache_page(inode->i_mapping, offset >> PAGE_CACHE_SHIFT,
+ (void *)jffs2_do_readpage_unlock, inode);
+ if (IS_ERR(pg))
+ return (void *)pg;
+
+ *priv = (unsigned long)pg;
+ return kmap(pg);
+}
+
+void jffs2_gc_release_page(struct jffs2_sb_info *c,
+ unsigned char *ptr,
+ unsigned long *priv)
+{
+ struct page *pg = (void *)*priv;
+
+ kunmap(pg);
+ page_cache_release(pg);
+}
+
+static int jffs2_flash_setup(struct jffs2_sb_info *c) {
+ int ret = 0;
+
+ if (jffs2_ebh_oob(c)) {
+ /* NAND flash... do setup accordingly */
+ ret = jffs2_nand_flash_setup(c);
+ if (ret)
+ return ret;
+ }
+
+ /* add setups for other bizarre flashes here... */
+ if (jffs2_nor_ecc(c)) {
+ ret = jffs2_nor_ecc_flash_setup(c);
+ if (ret)
+ return ret;
+ }
+
+ /* and Dataflash */
+ if (jffs2_dataflash(c)) {
+ ret = jffs2_dataflash_setup(c);
+ if (ret)
+ return ret;
+ }
+
+ /* and Intel "Sibley" flash */
+ if (jffs2_nor_wbuf_flash(c)) {
+ ret = jffs2_nor_wbuf_flash_setup(c);
+ if (ret)
+ return ret;
+ }
+
+ /* and MTD-on-blockdevice */
+ if (jffs2_block_mtd(c)) {
+ ret = jffs2_block_mtd_setup(c);
+ if (ret)
+ return ret;
+ }
+
+ return ret;
+}
+
+void jffs2_flash_cleanup(struct jffs2_sb_info *c) {
+
+ if (jffs2_ebh_oob(c)) {
+ jffs2_nand_flash_cleanup(c);
+ }
+
+ /* add cleanups for other bizarre flashes here... */
+ if (jffs2_nor_ecc(c)) {
+ jffs2_nor_ecc_flash_cleanup(c);
+ }
+
+ /* and DataFlash */
+ if (jffs2_dataflash(c)) {
+ jffs2_dataflash_cleanup(c);
+ }
+
+ /* and Intel "Sibley" flash */
+ if (jffs2_nor_wbuf_flash(c)) {
+ jffs2_nor_wbuf_flash_cleanup(c);
+ }
+
+ /* and MTD-on-blockdevice */
+ if (jffs2_block_mtd(c)) {
+ jffs2_block_mtd_cleanup(c);
+ }
+}
diff --git a/linux-2.4.x/fs/jffs2/gc.c b/linux-2.4.x/fs/jffs2/gc.c
index ead017f..08b34f7 100644
--- a/linux-2.4.x/fs/jffs2/gc.c
+++ b/linux-2.4.x/fs/jffs2/gc.c
@@ -1,99 +1,170 @@
/*
* JFFS2 -- Journalling Flash File System, Version 2.
*
- * Copyright (C) 2001 Red Hat, Inc.
+ * Copyright (C) 2001-2003 Red Hat, Inc.
*
- * Created by David Woodhouse <dwmw2@cambridge.redhat.com>
+ * Created by David Woodhouse <dwmw2@infradead.org>
*
- * The original JFFS, from which the design for JFFS2 was derived,
- * was designed and implemented by Axis Communications AB.
+ * For licensing information, see the file 'LICENCE' in this directory.
*
- * The contents of this file are subject to the Red Hat eCos Public
- * License Version 1.1 (the "Licence"); you may not use this file
- * except in compliance with the Licence. You may obtain a copy of
- * the Licence at http://www.redhat.com/
- *
- * Software distributed under the Licence is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied.
- * See the Licence for the specific language governing rights and
- * limitations under the Licence.
- *
- * The Original Code is JFFS2 - Journalling Flash File System, version 2
- *
- * Alternatively, the contents of this file may be used under the
- * terms of the GNU General Public License version 2 (the "GPL"), in
- * which case the provisions of the GPL are applicable instead of the
- * above. If you wish to allow the use of your version of this file
- * only under the terms of the GPL and not to allow others to use your
- * version of this file under the RHEPL, indicate your decision by
- * deleting the provisions above and replace them with the notice and
- * other provisions required by the GPL. If you do not delete the
- * provisions above, a recipient may use your version of this file
- * under either the RHEPL or the GPL.
- *
- * $Id: gc.c,v 1.52.2.5 2002/10/10 13:18:38 dwmw2 Exp $
+ * $Id: gc.c,v 1.161 2006/04/09 21:58:23 dwmw2 Exp $
*
*/
#include <linux/kernel.h>
#include <linux/mtd/mtd.h>
#include <linux/slab.h>
-#include <linux/jffs2.h>
-#include <linux/sched.h>
-#include <linux/interrupt.h>
#include <linux/pagemap.h>
+#include <linux/crc32.h>
+#include <linux/compiler.h>
+#include <linux/stat.h>
#include "nodelist.h"
-#include "crc32.h"
-
-static int jffs2_garbage_collect_metadata(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
- struct inode *inode, struct jffs2_full_dnode *fd);
-static int jffs2_garbage_collect_dirent(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
- struct inode *inode, struct jffs2_full_dirent *fd);
-static int jffs2_garbage_collect_deletion_dirent(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
- struct inode *inode, struct jffs2_full_dirent *fd);
+#include "compr.h"
+
+static int jffs2_garbage_collect_pristine(struct jffs2_sb_info *c,
+ struct jffs2_inode_cache *ic,
+ struct jffs2_raw_node_ref *raw);
+static int jffs2_garbage_collect_metadata(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
+ struct jffs2_inode_info *f, struct jffs2_full_dnode *fd);
+static int jffs2_garbage_collect_dirent(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
+ struct jffs2_inode_info *f, struct jffs2_full_dirent *fd);
+static int jffs2_garbage_collect_deletion_dirent(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
+ struct jffs2_inode_info *f, struct jffs2_full_dirent *fd);
static int jffs2_garbage_collect_hole(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
- struct inode *indeo, struct jffs2_full_dnode *fn,
- __u32 start, __u32 end);
+ struct jffs2_inode_info *f, struct jffs2_full_dnode *fn,
+ uint32_t start, uint32_t end);
static int jffs2_garbage_collect_dnode(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
- struct inode *inode, struct jffs2_full_dnode *fn,
- __u32 start, __u32 end);
+ struct jffs2_inode_info *f, struct jffs2_full_dnode *fn,
+ uint32_t start, uint32_t end);
+static int jffs2_garbage_collect_live(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
+ struct jffs2_raw_node_ref *raw, struct jffs2_inode_info *f);
/* Called with erase_completion_lock held */
static struct jffs2_eraseblock *jffs2_find_gc_block(struct jffs2_sb_info *c)
{
struct jffs2_eraseblock *ret;
struct list_head *nextlist = NULL;
+ int n = jiffies % 128;
+ int flag = 0;
/* Pick an eraseblock to garbage collect next. This is where we'll
put the clever wear-levelling algorithms. Eventually. */
- if (!list_empty(&c->bad_used_list) && c->nr_free_blocks > JFFS2_RESERVED_BLOCKS_GCBAD) {
+ /* We possibly want to favour the dirtier blocks more when the
+ number of free blocks is low. */
+again:
+ if (!list_empty(&c->bad_used_list) && c->nr_free_blocks > c->resv_blocks_gcbad) {
D1(printk(KERN_DEBUG "Picking block from bad_used_list to GC next\n"));
nextlist = &c->bad_used_list;
- } else if (jiffies % 100 && !list_empty(&c->dirty_list)) {
- /* Most of the time, pick one off the dirty list */
+ } else if (n < 50 && !list_empty(&c->erasable_list)) {
+ /* Note that most of them will have gone directly to be erased.
+ So don't favour the erasable_list _too_ much. */
+ D1(printk(KERN_DEBUG "Picking block from erasable_list to GC next\n"));
+ nextlist = &c->erasable_list;
+ } else if (n < 112 && !list_empty(&c->very_dirty_list)) {
+ /* Most of the time, pick one off the very_dirty list */
+ D1(printk(KERN_DEBUG "Picking block from very_dirty_list to GC next\n"));
+ nextlist = &c->very_dirty_list;
+ flag = 1;
+ } else if (n < 128 && !list_empty(&c->dirty_list)) {
D1(printk(KERN_DEBUG "Picking block from dirty_list to GC next\n"));
nextlist = &c->dirty_list;
+ flag = 1;
} else if (!list_empty(&c->clean_list)) {
D1(printk(KERN_DEBUG "Picking block from clean_list to GC next\n"));
nextlist = &c->clean_list;
+ flag = 1;
} else if (!list_empty(&c->dirty_list)) {
D1(printk(KERN_DEBUG "Picking block from dirty_list to GC next (clean_list was empty)\n"));
nextlist = &c->dirty_list;
+ flag = 1;
+ } else if (!list_empty(&c->very_dirty_list)) {
+ D1(printk(KERN_DEBUG "Picking block from very_dirty_list to GC next (clean_list and dirty_list were empty)\n"));
+ nextlist = &c->very_dirty_list;
+ flag = 1;
+ } else if (!list_empty(&c->erasable_list)) {
+ D1(printk(KERN_DEBUG "Picking block from erasable_list to GC next (clean_list and {very_,}dirty_list were empty)\n"));
+
+ nextlist = &c->erasable_list;
+ } else if (!list_empty(&c->erasable_pending_wbuf_list)) {
+ /* There are blocks are wating for the wbuf sync */
+ D1(printk(KERN_DEBUG "Synching wbuf in order to reuse erasable_pending_wbuf_list blocks\n"));
+ spin_unlock(&c->erase_completion_lock);
+ jffs2_flush_wbuf_pad(c);
+ spin_lock(&c->erase_completion_lock);
+ goto again;
} else {
- /* Eep. Both were empty */
- printk(KERN_NOTICE "jffs2: No clean _or_ dirty blocks to GC from! Where are they all?\n");
+ /* Eep. All were empty */
+ D1(printk(KERN_NOTICE "jffs2: No clean, dirty _or_ erasable blocks to GC from! Where are they all?\n"));
return NULL;
}
ret = list_entry(nextlist->next, struct jffs2_eraseblock, list);
list_del(&ret->list);
+ if (flag == 1) {
+ jffs2_remove_from_hash_table(c, ret, 1);
+ }
c->gcblock = ret;
ret->gc_node = ret->first_node;
if (!ret->gc_node) {
printk(KERN_WARNING "Eep. ret->gc_node for block at 0x%08x is NULL\n", ret->offset);
BUG();
}
+
+ /* Have we accidentally picked a clean block with wasted space ? */
+ if (ret->wasted_size) {
+ D1(printk(KERN_DEBUG "Converting wasted_size %08x to dirty_size\n", ret->wasted_size));
+ ret->dirty_size += ret->wasted_size;
+ c->wasted_size -= ret->wasted_size;
+ c->dirty_size += ret->wasted_size;
+ ret->wasted_size = 0;
+ }
+
+ return ret;
+}
+
+static int jffs2_should_pick_used_block(struct jffs2_sb_info *c)
+{
+ static uint8_t seqno = 99;
+
+ if (((c->max_erase_count >> BUCKET_RANGE_BIT_LEN) - c->used_blocks_current_index)
+ <= WL_DELTA/BUCKET_RANGE) {
+ return 0;
+ }
+ seqno++;
+ if (seqno == 100) {
+ seqno = 0;
+ return 1;
+ }
+ return 0;
+}
+
+static struct jffs2_eraseblock *jffs2_find_gc_block_with_wl(struct jffs2_sb_info *c)
+{
+ struct jffs2_eraseblock *ret;
+
+ if (jffs2_should_pick_used_block(c)) {
+ ret = jffs2_get_used_block(c);
+ if (ret == NULL) {
+ return NULL;
+ }
+ c->gcblock = ret;
+ ret->gc_node = ret->first_node;
+ if (!ret->gc_node) {
+ printk(KERN_WARNING "Eep. ret->gc_node for block at 0x%08x is NULL\n", ret->offset);
+ BUG();
+ }
+ if (ret->wasted_size) {
+ D1(printk(KERN_DEBUG "Converting wasted_size %08x to dirty_size\n", ret->wasted_size));
+ ret->dirty_size += ret->wasted_size;
+ c->wasted_size -= ret->wasted_size;
+ c->dirty_size += ret->wasted_size;
+ ret->wasted_size = 0;
+ }
+ } else {
+ ret = jffs2_find_gc_block(c);
+ }
+
return ret;
}
@@ -103,36 +174,111 @@ static struct jffs2_eraseblock *jffs2_find_gc_block(struct jffs2_sb_info *c)
*/
int jffs2_garbage_collect_pass(struct jffs2_sb_info *c)
{
- struct jffs2_eraseblock *jeb;
struct jffs2_inode_info *f;
+ struct jffs2_inode_cache *ic;
+ struct jffs2_eraseblock *jeb;
struct jffs2_raw_node_ref *raw;
- struct jffs2_node_frag *frag;
- struct jffs2_full_dnode *fn = NULL;
- struct jffs2_full_dirent *fd;
- __u32 start = 0, end = 0, nrfrags = 0;
- __u32 inum;
- struct inode *inode;
- int ret = 0;
+ int ret = 0, inum, nlink;
if (down_interruptible(&c->alloc_sem))
return -EINTR;
- spin_lock_bh(&c->erase_completion_lock);
+ for (;;) {
+ spin_lock(&c->erase_completion_lock);
+ if (!c->unchecked_size)
+ break;
+
+ /* We can't start doing GC yet. We haven't finished checking
+ the node CRCs etc. Do it now. */
+
+ /* checked_ino is protected by the alloc_sem */
+ if (c->checked_ino > c->highest_ino) {
+ printk(KERN_CRIT "Checked all inodes but still 0x%x bytes of unchecked space?\n",
+ c->unchecked_size);
+ jffs2_dbg_dump_block_lists_nolock(c);
+ spin_unlock(&c->erase_completion_lock);
+ BUG();
+ }
+
+ spin_unlock(&c->erase_completion_lock);
+
+ spin_lock(&c->inocache_lock);
+
+ ic = jffs2_get_ino_cache(c, c->checked_ino++);
+
+ if (!ic) {
+ spin_unlock(&c->inocache_lock);
+ continue;
+ }
+
+ if (!ic->nlink) {
+ D1(printk(KERN_DEBUG "Skipping check of ino #%d with nlink zero\n",
+ ic->ino));
+ spin_unlock(&c->inocache_lock);
+ continue;
+ }
+ switch(ic->state) {
+ case INO_STATE_CHECKEDABSENT:
+ case INO_STATE_PRESENT:
+ D1(printk(KERN_DEBUG "Skipping ino #%u already checked\n", ic->ino));
+ spin_unlock(&c->inocache_lock);
+ continue;
+
+ case INO_STATE_GC:
+ case INO_STATE_CHECKING:
+ printk(KERN_WARNING "Inode #%u is in state %d during CRC check phase!\n", ic->ino, ic->state);
+ spin_unlock(&c->inocache_lock);
+ BUG();
+
+ case INO_STATE_READING:
+ /* We need to wait for it to finish, lest we move on
+ and trigger the BUG() above while we haven't yet
+ finished checking all its nodes */
+ D1(printk(KERN_DEBUG "Waiting for ino #%u to finish reading\n", ic->ino));
+ /* We need to come back again for the _same_ inode. We've
+ made no progress in this case, but that should be OK */
+ c->checked_ino--;
+
+ up(&c->alloc_sem);
+ sleep_on_spinunlock(&c->inocache_wq, &c->inocache_lock);
+ return 0;
+
+ default:
+ BUG();
+
+ case INO_STATE_UNCHECKED:
+ ;
+ }
+ ic->state = INO_STATE_CHECKING;
+ spin_unlock(&c->inocache_lock);
+
+ D1(printk(KERN_DEBUG "jffs2_garbage_collect_pass() triggering inode scan of ino#%u\n", ic->ino));
+
+ ret = jffs2_do_crccheck_inode(c, ic);
+ if (ret)
+ printk(KERN_WARNING "Returned error for crccheck of ino #%u. Expect badness...\n", ic->ino);
+
+ jffs2_set_inocache_state(c, ic, INO_STATE_CHECKEDABSENT);
+ up(&c->alloc_sem);
+ return ret;
+ }
/* First, work out which block we're garbage-collecting */
jeb = c->gcblock;
if (!jeb)
- jeb = jffs2_find_gc_block(c);
+ jeb = jffs2_find_gc_block_with_wl(c);
if (!jeb) {
- printk(KERN_NOTICE "jffs2: Couldn't find erase block to garbage collect!\n");
- spin_unlock_bh(&c->erase_completion_lock);
+ D1 (printk(KERN_NOTICE "jffs2: Couldn't find erase block to garbage collect!\n"));
+ spin_unlock(&c->erase_completion_lock);
up(&c->alloc_sem);
return -EIO;
}
- D1(printk(KERN_DEBUG "garbage collect from block at phys 0x%08x\n", jeb->offset));
+ D1(printk(KERN_DEBUG "GC from block %08x, used_size %08x, dirty_size %08x, free_size %08x\n", jeb->offset, jeb->used_size, jeb->dirty_size, jeb->free_size));
+ D1(if (c->nextblock)
+ printk(KERN_DEBUG "Nextblock at %08x, used_size %08x, dirty_size %08x, wasted_size %08x, free_size %08x\n", c->nextblock->offset, c->nextblock->used_size, c->nextblock->dirty_size, c->nextblock->wasted_size, c->nextblock->free_size));
if (!jeb->used_size) {
up(&c->alloc_sem);
@@ -140,62 +286,216 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c)
}
raw = jeb->gc_node;
-
- while(raw->flash_offset & 1) {
- D1(printk(KERN_DEBUG "Node at 0x%08x is obsolete... skipping\n", raw->flash_offset &~3));
- jeb->gc_node = raw = raw->next_phys;
- if (!raw) {
+
+ while(ref_obsolete(raw)) {
+ D1(printk(KERN_DEBUG "Node at 0x%08x is obsolete... skipping\n", ref_offset(raw)));
+ raw = raw->next_phys;
+ if (unlikely(!raw)) {
printk(KERN_WARNING "eep. End of raw list while still supposedly nodes to GC\n");
- printk(KERN_WARNING "erase block at 0x%08x. free_size 0x%08x, dirty_size 0x%08x, used_size 0x%08x\n",
+ printk(KERN_WARNING "erase block at 0x%08x. free_size 0x%08x, dirty_size 0x%08x, used_size 0x%08x\n",
jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size);
- spin_unlock_bh(&c->erase_completion_lock);
+ jeb->gc_node = raw;
+ spin_unlock(&c->erase_completion_lock);
up(&c->alloc_sem);
BUG();
}
}
- D1(printk(KERN_DEBUG "Going to garbage collect node at 0x%08x\n", raw->flash_offset &~3));
+ jeb->gc_node = raw;
+
+ D1(printk(KERN_DEBUG "Going to garbage collect node at 0x%08x\n", ref_offset(raw)));
+
if (!raw->next_in_ino) {
/* Inode-less node. Clean marker, snapshot or something like that */
- spin_unlock_bh(&c->erase_completion_lock);
+ /* FIXME: If it's something that needs to be copied, including something
+ we don't grok that has JFFS2_NODETYPE_RWCOMPAT_COPY, we should do so */
+ spin_unlock(&c->erase_completion_lock);
jffs2_mark_node_obsolete(c, raw);
up(&c->alloc_sem);
goto eraseit_lock;
}
-
- inum = jffs2_raw_ref_to_inum(raw);
- D1(printk(KERN_DEBUG "Inode number is #%u\n", inum));
- spin_unlock_bh(&c->erase_completion_lock);
+ ic = jffs2_raw_ref_to_ic(raw);
+
+ /* We need to hold the inocache. Either the erase_completion_lock or
+ the inocache_lock are sufficient; we trade down since the inocache_lock
+ causes less contention. */
+ spin_lock(&c->inocache_lock);
+
+ spin_unlock(&c->erase_completion_lock);
+
+ D1(printk(KERN_DEBUG "jffs2_garbage_collect_pass collecting from block @0x%08x. Node @0x%08x(%d), ino #%u\n", jeb->offset, ref_offset(raw), ref_flags(raw), ic->ino));
+
+ /* Three possibilities:
+ 1. Inode is already in-core. We must iget it and do proper
+ updating to its fragtree, etc.
+ 2. Inode is not in-core, node is REF_PRISTINE. We lock the
+ inocache to prevent a read_inode(), copy the node intact.
+ 3. Inode is not in-core, node is not pristine. We must iget()
+ and take the slow path.
+ */
+
+ switch(ic->state) {
+ case INO_STATE_CHECKEDABSENT:
+ /* It's been checked, but it's not currently in-core.
+ We can just copy any pristine nodes, but have
+ to prevent anyone else from doing read_inode() while
+ we're at it, so we set the state accordingly */
+ if (ref_flags(raw) == REF_PRISTINE)
+ ic->state = INO_STATE_GC;
+ else {
+ D1(printk(KERN_DEBUG "Ino #%u is absent but node not REF_PRISTINE. Reading.\n",
+ ic->ino));
+ }
+ break;
- D1(printk(KERN_DEBUG "jffs2_garbage_collect_pass collecting from block @0x%08x. Node @0x%08x, ino #%u\n", jeb->offset, raw->flash_offset&~3, inum));
+ case INO_STATE_PRESENT:
+ /* It's in-core. GC must iget() it. */
+ break;
- inode = iget(OFNI_BS_2SFFJ(c), inum);
- if (is_bad_inode(inode)) {
- printk(KERN_NOTICE "Eep. read_inode() failed for ino #%u\n", inum);
- /* NB. This will happen again. We need to do something appropriate here. */
+ case INO_STATE_UNCHECKED:
+ case INO_STATE_CHECKING:
+ case INO_STATE_GC:
+ /* Should never happen. We should have finished checking
+ by the time we actually start doing any GC, and since
+ we're holding the alloc_sem, no other garbage collection
+ can happen.
+ */
+ printk(KERN_CRIT "Inode #%u already in state %d in jffs2_garbage_collect_pass()!\n",
+ ic->ino, ic->state);
up(&c->alloc_sem);
- iput(inode);
- return -EIO;
+ spin_unlock(&c->inocache_lock);
+ BUG();
+
+ case INO_STATE_READING:
+ /* Someone's currently trying to read it. We must wait for
+ them to finish and then go through the full iget() route
+ to do the GC. However, sometimes read_inode() needs to get
+ the alloc_sem() (for marking nodes invalid) so we must
+ drop the alloc_sem before sleeping. */
+
+ up(&c->alloc_sem);
+ D1(printk(KERN_DEBUG "jffs2_garbage_collect_pass() waiting for ino #%u in state %d\n",
+ ic->ino, ic->state));
+ sleep_on_spinunlock(&c->inocache_wq, &c->inocache_lock);
+ /* And because we dropped the alloc_sem we must start again from the
+ beginning. Ponder chance of livelock here -- we're returning success
+ without actually making any progress.
+
+ Q: What are the chances that the inode is back in INO_STATE_READING
+ again by the time we next enter this function? And that this happens
+ enough times to cause a real delay?
+
+ A: Small enough that I don't care :)
+ */
+ return 0;
+ }
+
+ /* OK. Now if the inode is in state INO_STATE_GC, we are going to copy the
+ node intact, and we don't have to muck about with the fragtree etc.
+ because we know it's not in-core. If it _was_ in-core, we go through
+ all the iget() crap anyway */
+
+ if (ic->state == INO_STATE_GC) {
+ spin_unlock(&c->inocache_lock);
+
+ ret = jffs2_garbage_collect_pristine(c, ic, raw);
+
+ spin_lock(&c->inocache_lock);
+ ic->state = INO_STATE_CHECKEDABSENT;
+ wake_up(&c->inocache_wq);
+
+ if (ret != -EBADFD) {
+ spin_unlock(&c->inocache_lock);
+ goto release_sem;
+ }
+
+ /* Fall through if it wanted us to, with inocache_lock held */
+ }
+
+ /* Prevent the fairly unlikely race where the gcblock is
+ entirely obsoleted by the final close of a file which had
+ the only valid nodes in the block, followed by erasure,
+ followed by freeing of the ic because the erased block(s)
+ held _all_ the nodes of that inode.... never been seen but
+ it's vaguely possible. */
+
+ inum = ic->ino;
+ nlink = ic->nlink;
+ spin_unlock(&c->inocache_lock);
+
+ f = jffs2_gc_fetch_inode(c, inum, nlink);
+ if (IS_ERR(f)) {
+ ret = PTR_ERR(f);
+ goto release_sem;
+ }
+ if (!f) {
+ ret = 0;
+ goto release_sem;
+ }
+
+ ret = jffs2_garbage_collect_live(c, jeb, raw, f);
+
+ jffs2_gc_release_inode(c, f);
+
+ release_sem:
+ up(&c->alloc_sem);
+
+ eraseit_lock:
+ /* If we've finished this block, start it erasing */
+ spin_lock(&c->erase_completion_lock);
+
+ eraseit:
+ if (c->gcblock && !c->gcblock->used_size) {
+ D1(printk(KERN_DEBUG "Block at 0x%08x completely obsoleted by GC. Moving to erase_pending_list\n", c->gcblock->offset));
+ /* We're GC'ing an empty block? */
+ list_add_tail(&c->gcblock->list, &c->erase_pending_list);
+ c->gcblock = NULL;
+ c->nr_erasing_blocks++;
+ jffs2_erase_pending_trigger(c);
}
+ spin_unlock(&c->erase_completion_lock);
+
+ return ret;
+}
+
+static int jffs2_garbage_collect_live(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
+ struct jffs2_raw_node_ref *raw, struct jffs2_inode_info *f)
+{
+ struct jffs2_node_frag *frag;
+ struct jffs2_full_dnode *fn = NULL;
+ struct jffs2_full_dirent *fd;
+ uint32_t start = 0, end = 0, nrfrags = 0;
+ int ret = 0;
- f = JFFS2_INODE_INFO(inode);
down(&f->sem);
+
/* Now we have the lock for this inode. Check that it's still the one at the head
of the list. */
- if (raw->flash_offset & 1) {
+ spin_lock(&c->erase_completion_lock);
+
+ if (c->gcblock != jeb) {
+ spin_unlock(&c->erase_completion_lock);
+ D1(printk(KERN_DEBUG "GC block is no longer gcblock. Restart\n"));
+ goto upnout;
+ }
+ if (ref_obsolete(raw)) {
+ spin_unlock(&c->erase_completion_lock);
D1(printk(KERN_DEBUG "node to be GC'd was obsoleted in the meantime.\n"));
/* They'll call again */
goto upnout;
}
+ spin_unlock(&c->erase_completion_lock);
+
/* OK. Looks safe. And nobody can get us now because we have the semaphore. Move the block */
if (f->metadata && f->metadata->raw == raw) {
fn = f->metadata;
- ret = jffs2_garbage_collect_metadata(c, jeb, inode, fn);
+ ret = jffs2_garbage_collect_metadata(c, jeb, f, fn);
goto upnout;
}
-
- for (frag = f->fraglist; frag; frag = frag->next) {
+
+ /* FIXME. Read node and do lookup? */
+ for (frag = frag_first(&f->fragtree); frag; frag = frag_next(frag)) {
if (frag->node && frag->node->raw == raw) {
fn = frag->node;
end = frag->ofs + frag->size;
@@ -206,17 +506,26 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c)
}
}
if (fn) {
+ if (ref_flags(raw) == REF_PRISTINE) {
+ ret = jffs2_garbage_collect_pristine(c, f->inocache, raw);
+ if (!ret) {
+ /* Urgh. Return it sensibly. */
+ frag->node->raw = f->inocache->nodes;
+ }
+ if (ret != -EBADFD)
+ goto upnout;
+ }
/* We found a datanode. Do the GC */
if((start >> PAGE_CACHE_SHIFT) < ((end-1) >> PAGE_CACHE_SHIFT)) {
/* It crosses a page boundary. Therefore, it must be a hole. */
- ret = jffs2_garbage_collect_hole(c, jeb, inode, fn, start, end);
+ ret = jffs2_garbage_collect_hole(c, jeb, f, fn, start, end);
} else {
/* It could still be a hole. But we GC the page this way anyway */
- ret = jffs2_garbage_collect_dnode(c, jeb, inode, fn, start, end);
+ ret = jffs2_garbage_collect_dnode(c, jeb, f, fn, start, end);
}
goto upnout;
}
-
+
/* Wasn't a dnode. Try dirent */
for (fd = f->dents; fd; fd=fd->next) {
if (fd->raw == raw)
@@ -224,66 +533,226 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c)
}
if (fd && fd->ino) {
- ret = jffs2_garbage_collect_dirent(c, jeb, inode, fd);
+ ret = jffs2_garbage_collect_dirent(c, jeb, f, fd);
} else if (fd) {
- ret = jffs2_garbage_collect_deletion_dirent(c, jeb, inode, fd);
+ ret = jffs2_garbage_collect_deletion_dirent(c, jeb, f, fd);
} else {
- printk(KERN_WARNING "Raw node at 0x%08x wasn't in node lists for ino #%lu\n", raw->flash_offset&~3, inode->i_ino);
- if (raw->flash_offset & 1) {
+ printk(KERN_WARNING "Raw node at 0x%08x wasn't in node lists for ino #%u\n",
+ ref_offset(raw), f->inocache->ino);
+ if (ref_obsolete(raw)) {
printk(KERN_WARNING "But it's obsolete so we don't mind too much\n");
} else {
- ret = -EIO;
+ jffs2_dbg_dump_node(c, ref_offset(raw));
+ BUG();
}
}
upnout:
up(&f->sem);
- up(&c->alloc_sem);
- iput(inode);
- eraseit_lock:
- /* If we've finished this block, start it erasing */
- spin_lock_bh(&c->erase_completion_lock);
+ return ret;
+}
- eraseit:
- if (c->gcblock && !c->gcblock->used_size) {
- D1(printk(KERN_DEBUG "Block at 0x%08x completely obsoleted by GC. Moving to erase_pending_list\n", c->gcblock->offset));
- /* We're GC'ing an empty block? */
- list_add_tail(&c->gcblock->list, &c->erase_pending_list);
- c->gcblock = NULL;
- c->nr_erasing_blocks++;
- jffs2_erase_pending_trigger(c);
+static int jffs2_garbage_collect_pristine(struct jffs2_sb_info *c,
+ struct jffs2_inode_cache *ic,
+ struct jffs2_raw_node_ref *raw)
+{
+ union jffs2_node_union *node;
+ struct jffs2_raw_node_ref *nraw;
+ size_t retlen;
+ int ret;
+ uint32_t phys_ofs, alloclen;
+ uint32_t crc, rawlen;
+ int retried = 0;
+
+ D1(printk(KERN_DEBUG "Going to GC REF_PRISTINE node at 0x%08x\n", ref_offset(raw)));
+
+ rawlen = ref_totlen(c, c->gcblock, raw);
+
+ /* Ask for a small amount of space (or the totlen if smaller) because we
+ don't want to force wastage of the end of a block if splitting would
+ work. */
+ ret = jffs2_reserve_space_gc(c, min_t(uint32_t, sizeof(struct jffs2_raw_inode) +
+ JFFS2_MIN_DATA_LEN, rawlen), &phys_ofs, &alloclen, rawlen);
+ /* this is not the exact summary size of it,
+ it is only an upper estimation */
+
+ if (ret)
+ return ret;
+
+ if (alloclen < rawlen) {
+ /* Doesn't fit untouched. We'll go the old route and split it */
+ return -EBADFD;
+ }
+
+ node = kmalloc(rawlen, GFP_KERNEL);
+ if (!node)
+ return -ENOMEM;
+
+ ret = jffs2_flash_read_safe(c, ref_offset(raw), rawlen, (char*)node);
+ if (ret)
+ goto out_node;
+
+ crc = crc32(0, node, sizeof(struct jffs2_unknown_node)-4);
+ if (je32_to_cpu(node->u.hdr_crc) != crc) {
+ printk(KERN_WARNING "Header CRC failed on REF_PRISTINE node at 0x%08x: Read 0x%08x, calculated 0x%08x\n",
+ ref_offset(raw), je32_to_cpu(node->u.hdr_crc), crc);
+ goto bail;
}
- spin_unlock_bh(&c->erase_completion_lock);
+ switch(je16_to_cpu(node->u.nodetype)) {
+ case JFFS2_NODETYPE_INODE:
+ crc = crc32(0, node, sizeof(node->i)-8);
+ if (je32_to_cpu(node->i.node_crc) != crc) {
+ printk(KERN_WARNING "Node CRC failed on REF_PRISTINE data node at 0x%08x: Read 0x%08x, calculated 0x%08x\n",
+ ref_offset(raw), je32_to_cpu(node->i.node_crc), crc);
+ goto bail;
+ }
+
+ if (je32_to_cpu(node->i.dsize)) {
+ crc = crc32(0, node->i.data, je32_to_cpu(node->i.csize));
+ if (je32_to_cpu(node->i.data_crc) != crc) {
+ printk(KERN_WARNING "Data CRC failed on REF_PRISTINE data node at 0x%08x: Read 0x%08x, calculated 0x%08x\n",
+ ref_offset(raw), je32_to_cpu(node->i.data_crc), crc);
+ goto bail;
+ }
+ }
+ break;
+
+ case JFFS2_NODETYPE_DIRENT:
+ crc = crc32(0, node, sizeof(node->d)-8);
+ if (je32_to_cpu(node->d.node_crc) != crc) {
+ printk(KERN_WARNING "Node CRC failed on REF_PRISTINE dirent node at 0x%08x: Read 0x%08x, calculated 0x%08x\n",
+ ref_offset(raw), je32_to_cpu(node->d.node_crc), crc);
+ goto bail;
+ }
+
+ if (node->d.nsize) {
+ crc = crc32(0, node->d.name, node->d.nsize);
+ if (je32_to_cpu(node->d.name_crc) != crc) {
+ printk(KERN_WARNING "Name CRC failed on REF_PRISTINE dirent ode at 0x%08x: Read 0x%08x, calculated 0x%08x\n",
+ ref_offset(raw), je32_to_cpu(node->d.name_crc), crc);
+ goto bail;
+ }
+ }
+ break;
+ default:
+ printk(KERN_WARNING "Unknown node type for REF_PRISTINE node at 0x%08x: 0x%04x\n",
+ ref_offset(raw), je16_to_cpu(node->u.nodetype));
+ goto bail;
+ }
+
+ nraw = jffs2_alloc_raw_node_ref();
+ if (!nraw) {
+ ret = -ENOMEM;
+ goto out_node;
+ }
+
+ /* OK, all the CRCs are good; this node can just be copied as-is. */
+ retry:
+ nraw->flash_offset = phys_ofs;
+ nraw->__totlen = rawlen;
+ nraw->next_phys = NULL;
+
+ ret = jffs2_flash_write(c, phys_ofs, rawlen, &retlen, (char *)node);
+
+ if (ret || (retlen != rawlen)) {
+ printk(KERN_NOTICE "Write of %d bytes at 0x%08x failed. returned %d, retlen %zd\n",
+ rawlen, phys_ofs, ret, retlen);
+ if (retlen) {
+ /* Doesn't belong to any inode */
+ nraw->next_in_ino = NULL;
+
+ nraw->flash_offset |= REF_OBSOLETE;
+ jffs2_add_physical_node_ref(c, nraw);
+ jffs2_mark_node_obsolete(c, nraw);
+ } else {
+ printk(KERN_NOTICE "Not marking the space at 0x%08x as dirty because the flash driver returned retlen zero\n", nraw->flash_offset);
+ jffs2_free_raw_node_ref(nraw);
+ }
+ if (!retried && (nraw = jffs2_alloc_raw_node_ref())) {
+ /* Try to reallocate space and retry */
+ uint32_t dummy;
+ struct jffs2_eraseblock *jeb = c->blocks[phys_ofs / c->sector_size];
+
+ retried = 1;
+
+ D1(printk(KERN_DEBUG "Retrying failed write of REF_PRISTINE node.\n"));
+
+ jffs2_dbg_acct_sanity_check(c,jeb);
+ jffs2_dbg_acct_paranoia_check(c, jeb);
+
+ ret = jffs2_reserve_space_gc(c, rawlen, &phys_ofs, &dummy, rawlen);
+ /* this is not the exact summary size of it,
+ it is only an upper estimation */
+
+ if (!ret) {
+ D1(printk(KERN_DEBUG "Allocated space at 0x%08x to retry failed write.\n", phys_ofs));
+
+ jffs2_dbg_acct_sanity_check(c,jeb);
+ jffs2_dbg_acct_paranoia_check(c, jeb);
+
+ goto retry;
+ }
+ D1(printk(KERN_DEBUG "Failed to allocate space to retry failed write: %d!\n", ret));
+ jffs2_free_raw_node_ref(nraw);
+ }
+
+ jffs2_free_raw_node_ref(nraw);
+ if (!ret)
+ ret = -EIO;
+ goto out_node;
+ }
+ nraw->flash_offset |= REF_PRISTINE;
+ jffs2_add_physical_node_ref(c, nraw);
+
+ /* Link into per-inode list. This is safe because of the ic
+ state being INO_STATE_GC. Note that if we're doing this
+ for an inode which is in-core, the 'nraw' pointer is then
+ going to be fetched from ic->nodes by our caller. */
+ spin_lock(&c->erase_completion_lock);
+ nraw->next_in_ino = ic->nodes;
+ ic->nodes = nraw;
+ spin_unlock(&c->erase_completion_lock);
+
+ jffs2_mark_node_obsolete(c, raw);
+ D1(printk(KERN_DEBUG "WHEEE! GC REF_PRISTINE node at 0x%08x succeeded\n", ref_offset(raw)));
+
+ out_node:
+ kfree(node);
return ret;
+ bail:
+ ret = -EBADFD;
+ goto out_node;
}
-static int jffs2_garbage_collect_metadata(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
- struct inode *inode, struct jffs2_full_dnode *fn)
+static int jffs2_garbage_collect_metadata(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
+ struct jffs2_inode_info *f, struct jffs2_full_dnode *fn)
{
- struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
struct jffs2_full_dnode *new_fn;
struct jffs2_raw_inode ri;
- unsigned short dev;
+ struct jffs2_node_frag *last_frag;
+ jint16_t dev;
char *mdata = NULL, mdatalen = 0;
- __u32 alloclen, phys_ofs;
+ uint32_t alloclen, phys_ofs, ilen;
int ret;
- if (S_ISBLK(inode->i_mode) || S_ISCHR(inode->i_mode)) {
+ if (S_ISBLK(JFFS2_F_I_MODE(f)) ||
+ S_ISCHR(JFFS2_F_I_MODE(f)) ) {
/* For these, we don't actually need to read the old node */
- dev = (MAJOR(to_kdev_t(inode->i_rdev)) << 8) |
- MINOR(to_kdev_t(inode->i_rdev));
+ /* FIXME: for minor or major > 255. */
+ dev = cpu_to_je16(((JFFS2_F_I_RDEV_MAJ(f) << 8) |
+ JFFS2_F_I_RDEV_MIN(f)));
mdata = (char *)&dev;
mdatalen = sizeof(dev);
D1(printk(KERN_DEBUG "jffs2_garbage_collect_metadata(): Writing %d bytes of kdev_t\n", mdatalen));
- } else if (S_ISLNK(inode->i_mode)) {
+ } else if (S_ISLNK(JFFS2_F_I_MODE(f))) {
mdatalen = fn->size;
mdata = kmalloc(fn->size, GFP_KERNEL);
if (!mdata) {
printk(KERN_WARNING "kmalloc of mdata failed in jffs2_garbage_collect_metadata()\n");
return -ENOMEM;
}
- ret = jffs2_read_dnode(c, fn, mdata, 0, mdatalen);
+ ret = jffs2_read_dnode(c, f, fn, mdata, 0, mdatalen);
if (ret) {
printk(KERN_WARNING "read of old metadata failed in jffs2_garbage_collect_metadata(): %d\n", ret);
kfree(mdata);
@@ -292,37 +761,46 @@ static int jffs2_garbage_collect_metadata(struct jffs2_sb_info *c, struct jffs2_
D1(printk(KERN_DEBUG "jffs2_garbage_collect_metadata(): Writing %d bites of symlink target\n", mdatalen));
}
-
- ret = jffs2_reserve_space_gc(c, sizeof(ri) + mdatalen, &phys_ofs, &alloclen);
+
+ ret = jffs2_reserve_space_gc(c, sizeof(ri) + mdatalen, &phys_ofs, &alloclen,
+ JFFS2_SUMMARY_INODE_SIZE);
if (ret) {
- printk(KERN_WARNING "jffs2_reserve_space_gc of %d bytes for garbage_collect_metadata failed: %d\n",
+ printk(KERN_WARNING "jffs2_reserve_space_gc of %zd bytes for garbage_collect_metadata failed: %d\n",
sizeof(ri)+ mdatalen, ret);
goto out;
}
-
+
+ last_frag = frag_last(&f->fragtree);
+ if (last_frag)
+ /* Fetch the inode length from the fragtree rather then
+ * from i_size since i_size may have not been updated yet */
+ ilen = last_frag->ofs + last_frag->size;
+ else
+ ilen = JFFS2_F_I_SIZE(f);
+
memset(&ri, 0, sizeof(ri));
- ri.magic = JFFS2_MAGIC_BITMASK;
- ri.nodetype = JFFS2_NODETYPE_INODE;
- ri.totlen = sizeof(ri) + mdatalen;
- ri.hdr_crc = crc32(0, &ri, sizeof(struct jffs2_unknown_node)-4);
-
- ri.ino = inode->i_ino;
- ri.version = ++f->highest_version;
- ri.mode = inode->i_mode;
- ri.uid = inode->i_uid;
- ri.gid = inode->i_gid;
- ri.isize = inode->i_size;
- ri.atime = inode->i_atime;
- ri.ctime = inode->i_ctime;
- ri.mtime = inode->i_mtime;
- ri.offset = 0;
- ri.csize = mdatalen;
- ri.dsize = mdatalen;
+ ri.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
+ ri.nodetype = cpu_to_je16(JFFS2_NODETYPE_INODE);
+ ri.totlen = cpu_to_je32(sizeof(ri) + mdatalen);
+ ri.hdr_crc = cpu_to_je32(crc32(0, &ri, sizeof(struct jffs2_unknown_node)-4));
+
+ ri.ino = cpu_to_je32(f->inocache->ino);
+ ri.version = cpu_to_je32(++f->highest_version);
+ ri.mode = cpu_to_jemode(JFFS2_F_I_MODE(f));
+ ri.uid = cpu_to_je16(JFFS2_F_I_UID(f));
+ ri.gid = cpu_to_je16(JFFS2_F_I_GID(f));
+ ri.isize = cpu_to_je32(ilen);
+ ri.atime = cpu_to_je32(JFFS2_F_I_ATIME(f));
+ ri.ctime = cpu_to_je32(JFFS2_F_I_CTIME(f));
+ ri.mtime = cpu_to_je32(JFFS2_F_I_MTIME(f));
+ ri.offset = cpu_to_je32(0);
+ ri.csize = cpu_to_je32(mdatalen);
+ ri.dsize = cpu_to_je32(mdatalen);
ri.compr = JFFS2_COMPR_NONE;
- ri.node_crc = crc32(0, &ri, sizeof(ri)-8);
- ri.data_crc = crc32(0, mdata, mdatalen);
+ ri.node_crc = cpu_to_je32(crc32(0, &ri, sizeof(ri)-8));
+ ri.data_crc = cpu_to_je32(crc32(0, mdata, mdatalen));
- new_fn = jffs2_write_dnode(inode, &ri, mdata, mdatalen, phys_ofs, NULL);
+ new_fn = jffs2_write_dnode(c, f, &ri, mdata, mdatalen, phys_ofs, ALLOC_GC);
if (IS_ERR(new_fn)) {
printk(KERN_WARNING "Error writing new dnode: %ld\n", PTR_ERR(new_fn));
@@ -333,41 +811,46 @@ static int jffs2_garbage_collect_metadata(struct jffs2_sb_info *c, struct jffs2_
jffs2_free_full_dnode(fn);
f->metadata = new_fn;
out:
- if (S_ISLNK(inode->i_mode))
+ if (S_ISLNK(JFFS2_F_I_MODE(f)))
kfree(mdata);
return ret;
}
-static int jffs2_garbage_collect_dirent(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
- struct inode *inode, struct jffs2_full_dirent *fd)
+static int jffs2_garbage_collect_dirent(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
+ struct jffs2_inode_info *f, struct jffs2_full_dirent *fd)
{
- struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
struct jffs2_full_dirent *new_fd;
struct jffs2_raw_dirent rd;
- __u32 alloclen, phys_ofs;
+ uint32_t alloclen, phys_ofs;
int ret;
- rd.magic = JFFS2_MAGIC_BITMASK;
- rd.nodetype = JFFS2_NODETYPE_DIRENT;
+ rd.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
+ rd.nodetype = cpu_to_je16(JFFS2_NODETYPE_DIRENT);
rd.nsize = strlen(fd->name);
- rd.totlen = sizeof(rd) + rd.nsize;
- rd.hdr_crc = crc32(0, &rd, sizeof(struct jffs2_unknown_node)-4);
+ rd.totlen = cpu_to_je32(sizeof(rd) + rd.nsize);
+ rd.hdr_crc = cpu_to_je32(crc32(0, &rd, sizeof(struct jffs2_unknown_node)-4));
- rd.pino = inode->i_ino;
- rd.version = ++f->highest_version;
- rd.ino = fd->ino;
- rd.mctime = max(inode->i_mtime, inode->i_ctime);
+ rd.pino = cpu_to_je32(f->inocache->ino);
+ rd.version = cpu_to_je32(++f->highest_version);
+ rd.ino = cpu_to_je32(fd->ino);
+ /* If the times on this inode were set by explicit utime() they can be different,
+ so refrain from splatting them. */
+ if (JFFS2_F_I_MTIME(f) == JFFS2_F_I_CTIME(f))
+ rd.mctime = cpu_to_je32(JFFS2_F_I_MTIME(f));
+ else
+ rd.mctime = cpu_to_je32(0);
rd.type = fd->type;
- rd.node_crc = crc32(0, &rd, sizeof(rd)-8);
- rd.name_crc = crc32(0, fd->name, rd.nsize);
-
- ret = jffs2_reserve_space_gc(c, sizeof(rd)+rd.nsize, &phys_ofs, &alloclen);
+ rd.node_crc = cpu_to_je32(crc32(0, &rd, sizeof(rd)-8));
+ rd.name_crc = cpu_to_je32(crc32(0, fd->name, rd.nsize));
+
+ ret = jffs2_reserve_space_gc(c, sizeof(rd)+rd.nsize, &phys_ofs, &alloclen,
+ JFFS2_SUMMARY_DIRENT_SIZE(rd.nsize));
if (ret) {
- printk(KERN_WARNING "jffs2_reserve_space_gc of %d bytes for garbage_collect_dirent failed: %d\n",
+ printk(KERN_WARNING "jffs2_reserve_space_gc of %zd bytes for garbage_collect_dirent failed: %d\n",
sizeof(rd)+rd.nsize, ret);
return ret;
}
- new_fd = jffs2_write_dirent(inode, &rd, fd->name, rd.nsize, phys_ofs, NULL);
+ new_fd = jffs2_write_dirent(c, f, &rd, fd->name, rd.nsize, phys_ofs, ALLOC_GC);
if (IS_ERR(new_fd)) {
printk(KERN_WARNING "jffs2_write_dirent in garbage_collect_dirent failed: %ld\n", PTR_ERR(new_fd));
@@ -377,20 +860,92 @@ static int jffs2_garbage_collect_dirent(struct jffs2_sb_info *c, struct jffs2_er
return 0;
}
-static int jffs2_garbage_collect_deletion_dirent(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
- struct inode *inode, struct jffs2_full_dirent *fd)
+static int jffs2_garbage_collect_deletion_dirent(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
+ struct jffs2_inode_info *f, struct jffs2_full_dirent *fd)
{
- struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
struct jffs2_full_dirent **fdp = &f->dents;
int found = 0;
- /* FIXME: When we run on NAND flash, we need to work out whether
- this deletion dirent is still needed to actively delete a
- 'real' dirent with the same name that's still somewhere else
- on the flash. For now, we know that we've actually obliterated
- all the older dirents when they became obsolete, so we didn't
- really need to write the deletion to flash in the first place.
- */
+ /* On a medium where we can't actually mark nodes obsolete
+ pernamently, such as NAND flash, we need to work out
+ whether this deletion dirent is still needed to actively
+ delete a 'real' dirent with the same name that's still
+ somewhere else on the flash. */
+ if (!jffs2_can_mark_obsolete(c)) {
+ struct jffs2_raw_dirent *rd;
+ struct jffs2_raw_node_ref *raw;
+ int name_len = strlen(fd->name);
+ uint32_t name_crc = crc32(0, fd->name, name_len);
+ uint32_t rawlen = ref_totlen(c, jeb, fd->raw);
+
+ rd = kmalloc(rawlen, GFP_KERNEL);
+ if (!rd)
+ return -ENOMEM;
+
+ /* Prevent the erase code from nicking the obsolete node refs while
+ we're looking at them. I really don't like this extra lock but
+ can't see any alternative. Suggestions on a postcard to... */
+ down(&c->erase_free_sem);
+
+ for (raw = f->inocache->nodes; raw != (void *)f->inocache; raw = raw->next_in_ino) {
+
+ /* We only care about obsolete ones */
+ if (!(ref_obsolete(raw)))
+ continue;
+
+ /* Any dirent with the same name is going to have the same length... */
+ if (ref_totlen(c, NULL, raw) != rawlen)
+ continue;
+
+ /* Doesn't matter if there's one in the same erase block. We're going to
+ delete it too at the same time. */
+ if (SECTOR_ADDR(raw->flash_offset) == SECTOR_ADDR(fd->raw->flash_offset))
+ continue;
+
+ D1(printk(KERN_DEBUG "Check potential deletion dirent at %08x\n", ref_offset(raw)));
+
+ /* This is an obsolete node belonging to the same directory, and it's of the right
+ length. We need to take a closer look...*/
+ if (jffs2_flash_read_safe(c, ref_offset(raw), rawlen,
+ (char *)rd))
+ /* If we can't read it, we don't need to continue to obsolete it. Continue */
+ continue;
+
+ if (je16_to_cpu(rd->nodetype) != JFFS2_NODETYPE_DIRENT)
+ continue;
+
+ /* If the name CRC doesn't match, skip */
+ if (je32_to_cpu(rd->name_crc) != name_crc)
+ continue;
+
+ /* If the name length doesn't match, or it's another deletion dirent, skip */
+ if (rd->nsize != name_len || !je32_to_cpu(rd->ino))
+ continue;
+
+ /* OK, check the actual name now */
+ if (memcmp(rd->name, fd->name, name_len))
+ continue;
+
+ /* OK. The name really does match. There really is still an older node on
+ the flash which our deletion dirent obsoletes. So we have to write out
+ a new deletion dirent to replace it */
+ up(&c->erase_free_sem);
+
+ D1(printk(KERN_DEBUG "Deletion dirent at %08x still obsoletes real dirent \"%s\" at %08x for ino #%u\n",
+ ref_offset(fd->raw), fd->name, ref_offset(raw), je32_to_cpu(rd->ino)));
+ kfree(rd);
+
+ return jffs2_garbage_collect_dirent(c, jeb, f, fd);
+ }
+
+ up(&c->erase_free_sem);
+ kfree(rd);
+ }
+
+ /* FIXME: If we're deleting a dirent which contains the current mtime and ctime,
+ we should update the metadata node with those times accordingly */
+
+ /* No need for it any more. Just mark it obsolete and remove it from the list */
while (*fdp) {
if ((*fdp) == fd) {
found = 1;
@@ -400,7 +955,7 @@ static int jffs2_garbage_collect_deletion_dirent(struct jffs2_sb_info *c, struct
fdp = &(*fdp)->next;
}
if (!found) {
- printk(KERN_WARNING "Deletion dirent \"%s\" not found in list for ino #%lu\n", fd->name, inode->i_ino);
+ printk(KERN_WARNING "Deletion dirent \"%s\" not found in list for ino #%u\n", fd->name, f->inocache->ino);
}
jffs2_mark_node_obsolete(c, fd->raw);
jffs2_free_full_dirent(fd);
@@ -408,93 +963,105 @@ static int jffs2_garbage_collect_deletion_dirent(struct jffs2_sb_info *c, struct
}
static int jffs2_garbage_collect_hole(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
- struct inode *inode, struct jffs2_full_dnode *fn,
- __u32 start, __u32 end)
+ struct jffs2_inode_info *f, struct jffs2_full_dnode *fn,
+ uint32_t start, uint32_t end)
{
- struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
struct jffs2_raw_inode ri;
struct jffs2_node_frag *frag;
struct jffs2_full_dnode *new_fn;
- __u32 alloclen, phys_ofs;
+ uint32_t alloclen, phys_ofs, ilen;
int ret;
- D1(printk(KERN_DEBUG "Writing replacement hole node for ino #%lu from offset 0x%x to 0x%x\n",
- inode->i_ino, start, end));
-
+ D1(printk(KERN_DEBUG "Writing replacement hole node for ino #%u from offset 0x%x to 0x%x\n",
+ f->inocache->ino, start, end));
+
memset(&ri, 0, sizeof(ri));
if(fn->frags > 1) {
size_t readlen;
- __u32 crc;
- /* It's partially obsoleted by a later write. So we have to
+ uint32_t crc;
+ /* It's partially obsoleted by a later write. So we have to
write it out again with the _same_ version as before */
- ret = c->mtd->read(c->mtd, fn->raw->flash_offset & ~3, sizeof(ri), &readlen, (char *)&ri);
+ ret = jffs2_flash_read(c, ref_offset(fn->raw), sizeof(ri), &readlen, (char *)&ri);
if (readlen != sizeof(ri) || ret) {
- printk(KERN_WARNING "Node read failed in jffs2_garbage_collect_hole. Ret %d, retlen %d. Data will be lost by writing new hold node\n", ret, readlen);
+ printk(KERN_WARNING "Node read failed in jffs2_garbage_collect_hole. Ret %d, retlen %zd. Data will be lost by writing new hole node\n", ret, readlen);
goto fill;
}
- if (ri.nodetype != JFFS2_NODETYPE_INODE) {
+ if (je16_to_cpu(ri.nodetype) != JFFS2_NODETYPE_INODE) {
printk(KERN_WARNING "jffs2_garbage_collect_hole: Node at 0x%08x had node type 0x%04x instead of JFFS2_NODETYPE_INODE(0x%04x)\n",
- fn->raw->flash_offset & ~3, ri.nodetype, JFFS2_NODETYPE_INODE);
+ ref_offset(fn->raw),
+ je16_to_cpu(ri.nodetype), JFFS2_NODETYPE_INODE);
return -EIO;
}
- if (ri.totlen != sizeof(ri)) {
- printk(KERN_WARNING "jffs2_garbage_collect_hole: Node at 0x%08x had totlen 0x%x instead of expected 0x%x\n",
- fn->raw->flash_offset & ~3, ri.totlen, sizeof(ri));
+ if (je32_to_cpu(ri.totlen) != sizeof(ri)) {
+ printk(KERN_WARNING "jffs2_garbage_collect_hole: Node at 0x%08x had totlen 0x%x instead of expected 0x%zx\n",
+ ref_offset(fn->raw),
+ je32_to_cpu(ri.totlen), sizeof(ri));
return -EIO;
}
crc = crc32(0, &ri, sizeof(ri)-8);
- if (crc != ri.node_crc) {
+ if (crc != je32_to_cpu(ri.node_crc)) {
printk(KERN_WARNING "jffs2_garbage_collect_hole: Node at 0x%08x had CRC 0x%08x which doesn't match calculated CRC 0x%08x\n",
- fn->raw->flash_offset & ~3, ri.node_crc, crc);
+ ref_offset(fn->raw),
+ je32_to_cpu(ri.node_crc), crc);
/* FIXME: We could possibly deal with this by writing new holes for each frag */
- printk(KERN_WARNING "Data in the range 0x%08x to 0x%08x of inode #%lu will be lost\n",
- start, end, inode->i_ino);
+ printk(KERN_WARNING "Data in the range 0x%08x to 0x%08x of inode #%u will be lost\n",
+ start, end, f->inocache->ino);
goto fill;
}
if (ri.compr != JFFS2_COMPR_ZERO) {
- printk(KERN_WARNING "jffs2_garbage_collect_hole: Node 0x%08x wasn't a hole node!\n", fn->raw->flash_offset & ~3);
- printk(KERN_WARNING "Data in the range 0x%08x to 0x%08x of inode #%lu will be lost\n",
- start, end, inode->i_ino);
+ printk(KERN_WARNING "jffs2_garbage_collect_hole: Node 0x%08x wasn't a hole node!\n", ref_offset(fn->raw));
+ printk(KERN_WARNING "Data in the range 0x%08x to 0x%08x of inode #%u will be lost\n",
+ start, end, f->inocache->ino);
goto fill;
}
} else {
fill:
- ri.magic = JFFS2_MAGIC_BITMASK;
- ri.nodetype = JFFS2_NODETYPE_INODE;
- ri.totlen = sizeof(ri);
- ri.hdr_crc = crc32(0, &ri, sizeof(struct jffs2_unknown_node)-4);
-
- ri.ino = inode->i_ino;
- ri.version = ++f->highest_version;
- ri.offset = start;
- ri.dsize = end - start;
- ri.csize = 0;
+ ri.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
+ ri.nodetype = cpu_to_je16(JFFS2_NODETYPE_INODE);
+ ri.totlen = cpu_to_je32(sizeof(ri));
+ ri.hdr_crc = cpu_to_je32(crc32(0, &ri, sizeof(struct jffs2_unknown_node)-4));
+
+ ri.ino = cpu_to_je32(f->inocache->ino);
+ ri.version = cpu_to_je32(++f->highest_version);
+ ri.offset = cpu_to_je32(start);
+ ri.dsize = cpu_to_je32(end - start);
+ ri.csize = cpu_to_je32(0);
ri.compr = JFFS2_COMPR_ZERO;
}
- ri.mode = inode->i_mode;
- ri.uid = inode->i_uid;
- ri.gid = inode->i_gid;
- ri.isize = inode->i_size;
- ri.atime = inode->i_atime;
- ri.ctime = inode->i_ctime;
- ri.mtime = inode->i_mtime;
- ri.data_crc = 0;
- ri.node_crc = crc32(0, &ri, sizeof(ri)-8);
-
- ret = jffs2_reserve_space_gc(c, sizeof(ri), &phys_ofs, &alloclen);
+
+ frag = frag_last(&f->fragtree);
+ if (frag)
+ /* Fetch the inode length from the fragtree rather then
+ * from i_size since i_size may have not been updated yet */
+ ilen = frag->ofs + frag->size;
+ else
+ ilen = JFFS2_F_I_SIZE(f);
+
+ ri.mode = cpu_to_jemode(JFFS2_F_I_MODE(f));
+ ri.uid = cpu_to_je16(JFFS2_F_I_UID(f));
+ ri.gid = cpu_to_je16(JFFS2_F_I_GID(f));
+ ri.isize = cpu_to_je32(ilen);
+ ri.atime = cpu_to_je32(JFFS2_F_I_ATIME(f));
+ ri.ctime = cpu_to_je32(JFFS2_F_I_CTIME(f));
+ ri.mtime = cpu_to_je32(JFFS2_F_I_MTIME(f));
+ ri.data_crc = cpu_to_je32(0);
+ ri.node_crc = cpu_to_je32(crc32(0, &ri, sizeof(ri)-8));
+
+ ret = jffs2_reserve_space_gc(c, sizeof(ri), &phys_ofs, &alloclen,
+ JFFS2_SUMMARY_INODE_SIZE);
if (ret) {
- printk(KERN_WARNING "jffs2_reserve_space_gc of %d bytes for garbage_collect_hole failed: %d\n",
+ printk(KERN_WARNING "jffs2_reserve_space_gc of %zd bytes for garbage_collect_hole failed: %d\n",
sizeof(ri), ret);
return ret;
}
- new_fn = jffs2_write_dnode(inode, &ri, NULL, 0, phys_ofs, NULL);
+ new_fn = jffs2_write_dnode(c, f, &ri, NULL, 0, phys_ofs, ALLOC_GC);
if (IS_ERR(new_fn)) {
printk(KERN_WARNING "Error writing new hole node: %ld\n", PTR_ERR(new_fn));
return PTR_ERR(new_fn);
}
- if (ri.version == f->highest_version) {
+ if (je32_to_cpu(ri.version) == f->highest_version) {
jffs2_add_full_dnode_to_inode(c, f, new_fn);
if (f->metadata) {
jffs2_mark_node_obsolete(c, f->metadata->raw);
@@ -504,18 +1071,23 @@ static int jffs2_garbage_collect_hole(struct jffs2_sb_info *c, struct jffs2_eras
return 0;
}
- /*
+ /*
* We should only get here in the case where the node we are
* replacing had more than one frag, so we kept the same version
- * number as before. (Except in case of error -- see 'goto fill;'
+ * number as before. (Except in case of error -- see 'goto fill;'
* above.)
*/
- D1(if(fn->frags <= 1) {
+ D1(if(unlikely(fn->frags <= 1)) {
printk(KERN_WARNING "jffs2_garbage_collect_hole: Replacing fn with %d frag(s) but new ver %d != highest_version %d of ino #%d\n",
- fn->frags, ri.version, f->highest_version, ri.ino);
+ fn->frags, je32_to_cpu(ri.version), f->highest_version,
+ je32_to_cpu(ri.ino));
});
- for (frag = f->fraglist; frag; frag = frag->next) {
+ /* This is a partially-overlapped hole node. Mark it REF_NORMAL not REF_PRISTINE */
+ mark_ref_normal(new_fn->raw);
+
+ for (frag = jffs2_lookup_node_frag(&f->fragtree, fn->ofs);
+ frag; frag = frag_next(frag)) {
if (frag->ofs > fn->size + fn->ofs)
break;
if (frag->node == fn) {
@@ -532,59 +1104,156 @@ static int jffs2_garbage_collect_hole(struct jffs2_sb_info *c, struct jffs2_eras
printk(KERN_WARNING "jffs2_garbage_collect_hole: New node has no frags!\n");
BUG();
}
-
+
jffs2_mark_node_obsolete(c, fn->raw);
jffs2_free_full_dnode(fn);
-
+
return 0;
}
static int jffs2_garbage_collect_dnode(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
- struct inode *inode, struct jffs2_full_dnode *fn,
- __u32 start, __u32 end)
+ struct jffs2_inode_info *f, struct jffs2_full_dnode *fn,
+ uint32_t start, uint32_t end)
{
- struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
struct jffs2_full_dnode *new_fn;
struct jffs2_raw_inode ri;
- __u32 alloclen, phys_ofs, offset, orig_end;
+ uint32_t alloclen, phys_ofs, offset, orig_end, orig_start;
int ret = 0;
unsigned char *comprbuf = NULL, *writebuf;
- struct page *pg;
+ unsigned long pg;
unsigned char *pg_ptr;
-
memset(&ri, 0, sizeof(ri));
- D1(printk(KERN_DEBUG "Writing replacement dnode for ino #%lu from offset 0x%x to 0x%x\n",
- inode->i_ino, start, end));
+ D1(printk(KERN_DEBUG "Writing replacement dnode for ino #%u from offset 0x%x to 0x%x\n",
+ f->inocache->ino, start, end));
orig_end = end;
+ orig_start = start;
+ if (c->nr_free_blocks + c->nr_erasing_blocks > c->resv_blocks_gcmerge) {
+ /* Attempt to do some merging. But only expand to cover logically
+ adjacent frags if the block containing them is already considered
+ to be dirty. Otherwise we end up with GC just going round in
+ circles dirtying the nodes it already wrote out, especially
+ on NAND where we have small eraseblocks and hence a much higher
+ chance of nodes having to be split to cross boundaries. */
+
+ struct jffs2_node_frag *frag;
+ uint32_t min, max;
+
+ min = start & ~(PAGE_CACHE_SIZE-1);
+ max = min + PAGE_CACHE_SIZE;
+
+ frag = jffs2_lookup_node_frag(&f->fragtree, start);
+
+ /* BUG_ON(!frag) but that'll happen anyway... */
+
+ BUG_ON(frag->ofs != start);
+
+ /* First grow down... */
+ while((frag = frag_prev(frag)) && frag->ofs >= min) {
+
+ /* If the previous frag doesn't even reach the beginning, there's
+ excessive fragmentation. Just merge. */
+ if (frag->ofs > min) {
+ D1(printk(KERN_DEBUG "Expanding down to cover partial frag (0x%x-0x%x)\n",
+ frag->ofs, frag->ofs+frag->size));
+ start = frag->ofs;
+ continue;
+ }
+ /* OK. This frag holds the first byte of the page. */
+ if (!frag->node || !frag->node->raw) {
+ D1(printk(KERN_DEBUG "First frag in page is hole (0x%x-0x%x). Not expanding down.\n",
+ frag->ofs, frag->ofs+frag->size));
+ break;
+ } else {
+
+ /* OK, it's a frag which extends to the beginning of the page. Does it live
+ in a block which is still considered clean? If so, don't obsolete it.
+ If not, cover it anyway. */
+
+ struct jffs2_raw_node_ref *raw = frag->node->raw;
+ struct jffs2_eraseblock *jeb;
+
+ jeb = c->blocks[raw->flash_offset / c->sector_size];
+
+ if (jeb == c->gcblock) {
+ D1(printk(KERN_DEBUG "Expanding down to cover frag (0x%x-0x%x) in gcblock at %08x\n",
+ frag->ofs, frag->ofs+frag->size, ref_offset(raw)));
+ start = frag->ofs;
+ break;
+ }
+ if (!ISDIRTY(jeb->dirty_size + jeb->wasted_size)) {
+ D1(printk(KERN_DEBUG "Not expanding down to cover frag (0x%x-0x%x) in clean block %08x\n",
+ frag->ofs, frag->ofs+frag->size, jeb->offset));
+ break;
+ }
+
+ D1(printk(KERN_DEBUG "Expanding down to cover frag (0x%x-0x%x) in dirty block %08x\n",
+ frag->ofs, frag->ofs+frag->size, jeb->offset));
+ start = frag->ofs;
+ break;
+ }
+ }
+
+ /* ... then up */
+
+ /* Find last frag which is actually part of the node we're to GC. */
+ frag = jffs2_lookup_node_frag(&f->fragtree, end-1);
+
+ while((frag = frag_next(frag)) && frag->ofs+frag->size <= max) {
+
+ /* If the previous frag doesn't even reach the beginning, there's lots
+ of fragmentation. Just merge. */
+ if (frag->ofs+frag->size < max) {
+ D1(printk(KERN_DEBUG "Expanding up to cover partial frag (0x%x-0x%x)\n",
+ frag->ofs, frag->ofs+frag->size));
+ end = frag->ofs + frag->size;
+ continue;
+ }
+
+ if (!frag->node || !frag->node->raw) {
+ D1(printk(KERN_DEBUG "Last frag in page is hole (0x%x-0x%x). Not expanding up.\n",
+ frag->ofs, frag->ofs+frag->size));
+ break;
+ } else {
+
+ /* OK, it's a frag which extends to the beginning of the page. Does it live
+ in a block which is still considered clean? If so, don't obsolete it.
+ If not, cover it anyway. */
+
+ struct jffs2_raw_node_ref *raw = frag->node->raw;
+ struct jffs2_eraseblock *jeb;
+
+ jeb = c->blocks[raw->flash_offset / c->sector_size];
+
+ if (jeb == c->gcblock) {
+ D1(printk(KERN_DEBUG "Expanding up to cover frag (0x%x-0x%x) in gcblock at %08x\n",
+ frag->ofs, frag->ofs+frag->size, ref_offset(raw)));
+ end = frag->ofs + frag->size;
+ break;
+ }
+ if (!ISDIRTY(jeb->dirty_size + jeb->wasted_size)) {
+ D1(printk(KERN_DEBUG "Not expanding up to cover frag (0x%x-0x%x) in clean block %08x\n",
+ frag->ofs, frag->ofs+frag->size, jeb->offset));
+ break;
+ }
+
+ D1(printk(KERN_DEBUG "Expanding up to cover frag (0x%x-0x%x) in dirty block %08x\n",
+ frag->ofs, frag->ofs+frag->size, jeb->offset));
+ end = frag->ofs + frag->size;
+ break;
+ }
+ }
+ D1(printk(KERN_DEBUG "Expanded dnode to write from (0x%x-0x%x) to (0x%x-0x%x)\n",
+ orig_start, orig_end, start, end));
+
+ D1(BUG_ON(end > frag_last(&f->fragtree)->ofs + frag_last(&f->fragtree)->size));
+ BUG_ON(end < orig_end);
+ BUG_ON(start > orig_start);
+ }
- /* If we're looking at the last node in the block we're
- garbage-collecting, we allow ourselves to merge as if the
- block was already erasing. We're likely to be GC'ing a
- partial page, and the next block we GC is likely to have
- the other half of this page right at the beginning, which
- means we'd expand it _then_, as nr_erasing_blocks would have
- increased since we checked, and in doing so would obsolete
- the partial node which we'd have written here. Meaning that
- the GC would churn and churn, and just leave dirty blocks in
- it's wake.
- */
- if(c->nr_free_blocks + c->nr_erasing_blocks > JFFS2_RESERVED_BLOCKS_GCMERGE - (fn->raw->next_phys?0:1)) {
- /* Shitloads of space */
- /* FIXME: Integrate this properly with GC calculations */
- start &= ~(PAGE_CACHE_SIZE-1);
- end = min_t(__u32, start + PAGE_CACHE_SIZE, inode->i_size);
- D1(printk(KERN_DEBUG "Plenty of free space, so expanding to write from offset 0x%x to 0x%x\n",
- start, end));
- if (end < orig_end) {
- printk(KERN_WARNING "Eep. jffs2_garbage_collect_dnode extended node to write, but it got smaller: start 0x%x, orig_end 0x%x, end 0x%x\n", start, orig_end, end);
- end = orig_end;
- }
- }
-
/* First, use readpage() to read the appropriate page into the page cache */
/* Q: What happens if we actually try to GC the _same_ page for which commit_write()
* triggered garbage collection in the first place?
@@ -592,63 +1261,59 @@ static int jffs2_garbage_collect_dnode(struct jffs2_sb_info *c, struct jffs2_era
* page OK. We'll actually write it out again in commit_write, which is a little
* suboptimal, but at least we're correct.
*/
- pg = read_cache_page(inode->i_mapping, start >> PAGE_CACHE_SHIFT, (void *)jffs2_do_readpage_unlock, inode);
+ pg_ptr = jffs2_gc_fetch_page(c, f, start, &pg);
- if (IS_ERR(pg)) {
- printk(KERN_WARNING "read_cache_page() returned error: %ld\n", PTR_ERR(pg));
- return PTR_ERR(pg);
+ if (IS_ERR(pg_ptr)) {
+ printk(KERN_WARNING "read_cache_page() returned error: %ld\n", PTR_ERR(pg_ptr));
+ return PTR_ERR(pg_ptr);
}
- pg_ptr = (char *)kmap(pg);
- comprbuf = kmalloc(end - start, GFP_KERNEL);
offset = start;
while(offset < orig_end) {
- __u32 datalen;
- __u32 cdatalen;
- char comprtype = JFFS2_COMPR_NONE;
+ uint32_t datalen;
+ uint32_t cdatalen;
+ uint16_t comprtype = JFFS2_COMPR_NONE;
- ret = jffs2_reserve_space_gc(c, sizeof(ri) + JFFS2_MIN_DATA_LEN, &phys_ofs, &alloclen);
+ ret = jffs2_reserve_space_gc(c, sizeof(ri) + JFFS2_MIN_DATA_LEN, &phys_ofs,
+ &alloclen, JFFS2_SUMMARY_INODE_SIZE);
if (ret) {
- printk(KERN_WARNING "jffs2_reserve_space_gc of %d bytes for garbage_collect_dnode failed: %d\n",
+ printk(KERN_WARNING "jffs2_reserve_space_gc of %zd bytes for garbage_collect_dnode failed: %d\n",
sizeof(ri)+ JFFS2_MIN_DATA_LEN, ret);
break;
}
- cdatalen = min(alloclen - sizeof(ri), end - offset);
+ cdatalen = min_t(uint32_t, alloclen - sizeof(ri), end - offset);
datalen = end - offset;
writebuf = pg_ptr + (offset & (PAGE_CACHE_SIZE -1));
- if (comprbuf) {
- comprtype = jffs2_compress(writebuf, comprbuf, &datalen, &cdatalen);
- }
- if (comprtype) {
- writebuf = comprbuf;
- } else {
- datalen = cdatalen;
- }
- ri.magic = JFFS2_MAGIC_BITMASK;
- ri.nodetype = JFFS2_NODETYPE_INODE;
- ri.totlen = sizeof(ri) + cdatalen;
- ri.hdr_crc = crc32(0, &ri, sizeof(struct jffs2_unknown_node)-4);
-
- ri.ino = inode->i_ino;
- ri.version = ++f->highest_version;
- ri.mode = inode->i_mode;
- ri.uid = inode->i_uid;
- ri.gid = inode->i_gid;
- ri.isize = inode->i_size;
- ri.atime = inode->i_atime;
- ri.ctime = inode->i_ctime;
- ri.mtime = inode->i_mtime;
- ri.offset = offset;
- ri.csize = cdatalen;
- ri.dsize = datalen;
- ri.compr = comprtype;
- ri.node_crc = crc32(0, &ri, sizeof(ri)-8);
- ri.data_crc = crc32(0, writebuf, cdatalen);
-
- new_fn = jffs2_write_dnode(inode, &ri, writebuf, cdatalen, phys_ofs, NULL);
+ comprtype = jffs2_compress(c, f, writebuf, &comprbuf, &datalen, &cdatalen);
+
+ ri.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
+ ri.nodetype = cpu_to_je16(JFFS2_NODETYPE_INODE);
+ ri.totlen = cpu_to_je32(sizeof(ri) + cdatalen);
+ ri.hdr_crc = cpu_to_je32(crc32(0, &ri, sizeof(struct jffs2_unknown_node)-4));
+
+ ri.ino = cpu_to_je32(f->inocache->ino);
+ ri.version = cpu_to_je32(++f->highest_version);
+ ri.mode = cpu_to_jemode(JFFS2_F_I_MODE(f));
+ ri.uid = cpu_to_je16(JFFS2_F_I_UID(f));
+ ri.gid = cpu_to_je16(JFFS2_F_I_GID(f));
+ ri.isize = cpu_to_je32(JFFS2_F_I_SIZE(f));
+ ri.atime = cpu_to_je32(JFFS2_F_I_ATIME(f));
+ ri.ctime = cpu_to_je32(JFFS2_F_I_CTIME(f));
+ ri.mtime = cpu_to_je32(JFFS2_F_I_MTIME(f));
+ ri.offset = cpu_to_je32(offset);
+ ri.csize = cpu_to_je32(cdatalen);
+ ri.dsize = cpu_to_je32(datalen);
+ ri.compr = comprtype & 0xff;
+ ri.usercompr = (comprtype >> 8) & 0xff;
+ ri.node_crc = cpu_to_je32(crc32(0, &ri, sizeof(ri)-8));
+ ri.data_crc = cpu_to_je32(crc32(0, comprbuf, cdatalen));
+
+ new_fn = jffs2_write_dnode(c, f, &ri, comprbuf, cdatalen, phys_ofs, ALLOC_GC);
+
+ jffs2_free_comprbuf(comprbuf, writebuf);
if (IS_ERR(new_fn)) {
printk(KERN_WARNING "Error writing new dnode: %ld\n", PTR_ERR(new_fn));
@@ -663,12 +1328,7 @@ static int jffs2_garbage_collect_dnode(struct jffs2_sb_info *c, struct jffs2_era
f->metadata = NULL;
}
}
- if (comprbuf) kfree(comprbuf);
- kunmap(pg);
- /* XXX: Does the page get freed automatically? */
- /* AAA: Judging by the unmount getting stuck in __wait_on_page, nope. */
- page_cache_release(pg);
+ jffs2_gc_release_page(c, pg_ptr, &pg);
return ret;
}
-
diff --git a/linux-2.4.x/fs/jffs2/histo.h b/linux-2.4.x/fs/jffs2/histo.h
index 84f184f..22a93a0 100644
--- a/linux-2.4.x/fs/jffs2/histo.h
+++ b/linux-2.4.x/fs/jffs2/histo.h
@@ -1,3 +1,3 @@
/* This file provides the bit-probabilities for the input file */
-#define BIT_DIVIDER 629
+#define BIT_DIVIDER 629
static int bits[9] = { 179,167,183,165,159,198,178,119,}; /* ia32 .so files */
diff --git a/linux-2.4.x/fs/jffs2/histo_mips.h b/linux-2.4.x/fs/jffs2/histo_mips.h
index 9a44326..fa3dac1 100644
--- a/linux-2.4.x/fs/jffs2/histo_mips.h
+++ b/linux-2.4.x/fs/jffs2/histo_mips.h
@@ -1,2 +1,2 @@
-#define BIT_DIVIDER_MIPS 1043
+#define BIT_DIVIDER_MIPS 1043
static int bits_mips[8] = { 277,249,290,267,229,341,212,241}; /* mips32 */
diff --git a/linux-2.4.x/fs/jffs2/ioctl.c b/linux-2.4.x/fs/jffs2/ioctl.c
index 38a52d3..6909983 100644
--- a/linux-2.4.x/fs/jffs2/ioctl.c
+++ b/linux-2.4.x/fs/jffs2/ioctl.c
@@ -1,47 +1,23 @@
/*
* JFFS2 -- Journalling Flash File System, Version 2.
*
- * Copyright (C) 2001 Red Hat, Inc.
+ * Copyright (C) 2001-2003 Red Hat, Inc.
*
- * Created by David Woodhouse <dwmw2@cambridge.redhat.com>
+ * Created by David Woodhouse <dwmw2@infradead.org>
*
- * The original JFFS, from which the design for JFFS2 was derived,
- * was designed and implemented by Axis Communications AB.
+ * For licensing information, see the file 'LICENCE' in this directory.
*
- * The contents of this file are subject to the Red Hat eCos Public
- * License Version 1.1 (the "Licence"); you may not use this file
- * except in compliance with the Licence. You may obtain a copy of
- * the Licence at http://www.redhat.com/
- *
- * Software distributed under the Licence is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied.
- * See the Licence for the specific language governing rights and
- * limitations under the Licence.
- *
- * The Original Code is JFFS2 - Journalling Flash File System, version 2
- *
- * Alternatively, the contents of this file may be used under the
- * terms of the GNU General Public License version 2 (the "GPL"), in
- * which case the provisions of the GPL are applicable instead of the
- * above. If you wish to allow the use of your version of this file
- * only under the terms of the GPL and not to allow others to use your
- * version of this file under the RHEPL, indicate your decision by
- * deleting the provisions above and replace them with the notice and
- * other provisions required by the GPL. If you do not delete the
- * provisions above, a recipient may use your version of this file
- * under either the RHEPL or the GPL.
- *
- * $Id: ioctl.c,v 1.5 2001/03/15 15:38:24 dwmw2 Exp $
+ * $Id: ioctl.c,v 1.10 2005/11/07 11:14:40 gleixner Exp $
*
*/
#include <linux/fs.h>
-int jffs2_ioctl(struct inode *inode, struct file *filp, unsigned int cmd,
+int jffs2_ioctl(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg)
{
/* Later, this will provide for lsattr.jffs2 and chattr.jffs2, which
will include compression support etc. */
- return -EINVAL;
+ return -ENOTTY;
}
-
+
diff --git a/linux-2.4.x/fs/jffs2/malloc.c b/linux-2.4.x/fs/jffs2/malloc.c
index 6ce14c9..6ae6952 100644
--- a/linux-2.4.x/fs/jffs2/malloc.c
+++ b/linux-2.4.x/fs/jffs2/malloc.c
@@ -1,52 +1,23 @@
/*
* JFFS2 -- Journalling Flash File System, Version 2.
*
- * Copyright (C) 2001 Red Hat, Inc.
+ * Copyright (C) 2001-2003 Red Hat, Inc.
*
- * Created by David Woodhouse <dwmw2@cambridge.redhat.com>
+ * Created by David Woodhouse <dwmw2@infradead.org>
*
- * The original JFFS, from which the design for JFFS2 was derived,
- * was designed and implemented by Axis Communications AB.
+ * For licensing information, see the file 'LICENCE' in this directory.
*
- * The contents of this file are subject to the Red Hat eCos Public
- * License Version 1.1 (the "Licence"); you may not use this file
- * except in compliance with the Licence. You may obtain a copy of
- * the Licence at http://www.redhat.com/
- *
- * Software distributed under the Licence is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied.
- * See the Licence for the specific language governing rights and
- * limitations under the Licence.
- *
- * The Original Code is JFFS2 - Journalling Flash File System, version 2
- *
- * Alternatively, the contents of this file may be used under the
- * terms of the GNU General Public License version 2 (the "GPL"), in
- * which case the provisions of the GPL are applicable instead of the
- * above. If you wish to allow the use of your version of this file
- * only under the terms of the GPL and not to allow others to use your
- * version of this file under the RHEPL, indicate your decision by
- * deleting the provisions above and replace them with the notice and
- * other provisions required by the GPL. If you do not delete the
- * provisions above, a recipient may use your version of this file
- * under either the RHEPL or the GPL.
- *
- * $Id: malloc.c,v 1.16 2001/03/15 15:38:24 dwmw2 Exp $
+ * $Id: malloc.c,v 1.34 2005/11/29 14:34:38 gleixner Exp $
*
*/
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/init.h>
+#include <linux/vmalloc.h>
#include <linux/jffs2.h>
#include "nodelist.h"
-#if 0
-#define JFFS2_SLAB_POISON SLAB_POISON
-#else
-#define JFFS2_SLAB_POISON 0
-#endif
-
/* These are initialised to NULL in the kernel startup code.
If you're porting to other operating systems, beware */
static kmem_cache_t *full_dnode_slab;
@@ -56,58 +27,60 @@ static kmem_cache_t *tmp_dnode_info_slab;
static kmem_cache_t *raw_node_ref_slab;
static kmem_cache_t *node_frag_slab;
static kmem_cache_t *inode_cache_slab;
+static kmem_cache_t *eraseblock_slab;
-void jffs2_free_tmp_dnode_info_list(struct jffs2_tmp_dnode_info *tn)
-{
- struct jffs2_tmp_dnode_info *next;
-
- while (tn) {
- next = tn;
- tn = tn->next;
- jffs2_free_full_dnode(next->fn);
- jffs2_free_tmp_dnode_info(next);
- }
-}
-
-void jffs2_free_full_dirent_list(struct jffs2_full_dirent *fd)
+static inline int jffs2_blocks_use_vmalloc(struct jffs2_sb_info *c)
{
- struct jffs2_full_dirent *next;
-
- while (fd) {
- next = fd->next;
- jffs2_free_full_dirent(fd);
- fd = next;
- }
+ return ((c->flash_size / c->sector_size) * sizeof(void*)) > (128 * 1024);
}
int __init jffs2_create_slab_caches(void)
{
- full_dnode_slab = kmem_cache_create("jffs2_full_dnode", sizeof(struct jffs2_full_dnode), 0, JFFS2_SLAB_POISON, NULL, NULL);
+ full_dnode_slab = kmem_cache_create("jffs2_full_dnode",
+ sizeof(struct jffs2_full_dnode),
+ 0, 0, NULL, NULL);
if (!full_dnode_slab)
goto err;
- raw_dirent_slab = kmem_cache_create("jffs2_raw_dirent", sizeof(struct jffs2_raw_dirent), 0, JFFS2_SLAB_POISON, NULL, NULL);
+ raw_dirent_slab = kmem_cache_create("jffs2_raw_dirent",
+ sizeof(struct jffs2_raw_dirent),
+ 0, 0, NULL, NULL);
if (!raw_dirent_slab)
goto err;
- raw_inode_slab = kmem_cache_create("jffs2_raw_inode", sizeof(struct jffs2_raw_inode), 0, JFFS2_SLAB_POISON, NULL, NULL);
+ raw_inode_slab = kmem_cache_create("jffs2_raw_inode",
+ sizeof(struct jffs2_raw_inode),
+ 0, 0, NULL, NULL);
if (!raw_inode_slab)
goto err;
- tmp_dnode_info_slab = kmem_cache_create("jffs2_tmp_dnode", sizeof(struct jffs2_tmp_dnode_info), 0, JFFS2_SLAB_POISON, NULL, NULL);
+ tmp_dnode_info_slab = kmem_cache_create("jffs2_tmp_dnode",
+ sizeof(struct jffs2_tmp_dnode_info),
+ 0, 0, NULL, NULL);
if (!tmp_dnode_info_slab)
goto err;
- raw_node_ref_slab = kmem_cache_create("jffs2_raw_node_ref", sizeof(struct jffs2_raw_node_ref), 0, JFFS2_SLAB_POISON, NULL, NULL);
+ raw_node_ref_slab = kmem_cache_create("jffs2_raw_node_ref",
+ sizeof(struct jffs2_raw_node_ref),
+ 0, 0, NULL, NULL);
if (!raw_node_ref_slab)
goto err;
- node_frag_slab = kmem_cache_create("jffs2_node_frag", sizeof(struct jffs2_node_frag), 0, JFFS2_SLAB_POISON, NULL, NULL);
+ node_frag_slab = kmem_cache_create("jffs2_node_frag",
+ sizeof(struct jffs2_node_frag),
+ 0, 0, NULL, NULL);
if (!node_frag_slab)
goto err;
- inode_cache_slab = kmem_cache_create("jffs2_inode_cache", sizeof(struct jffs2_inode_cache), 0, JFFS2_SLAB_POISON, NULL, NULL);
+ eraseblock_slab = kmem_cache_create("jffs2_eraseblock",
+ sizeof(struct jffs2_eraseblock),
+ 0, 0, NULL, NULL);
+ if (!eraseblock_slab)
+ goto err;
+ inode_cache_slab = kmem_cache_create("jffs2_inode_cache",
+ sizeof(struct jffs2_inode_cache),
+ 0, 0, NULL, NULL);
if (inode_cache_slab)
return 0;
err:
@@ -131,90 +104,174 @@ void jffs2_destroy_slab_caches(void)
kmem_cache_destroy(node_frag_slab);
if(inode_cache_slab)
kmem_cache_destroy(inode_cache_slab);
-
+ if (eraseblock_slab)
+ kmem_cache_destroy(eraseblock_slab);
}
struct jffs2_full_dirent *jffs2_alloc_full_dirent(int namesize)
{
- return kmalloc(sizeof(struct jffs2_full_dirent) + namesize, GFP_KERNEL);
+ struct jffs2_full_dirent *ret;
+ ret = kmalloc(sizeof(struct jffs2_full_dirent) + namesize, GFP_KERNEL);
+ dbg_memalloc("%p\n", ret);
+ return ret;
}
void jffs2_free_full_dirent(struct jffs2_full_dirent *x)
{
+ dbg_memalloc("%p\n", x);
kfree(x);
}
struct jffs2_full_dnode *jffs2_alloc_full_dnode(void)
{
- void *ret = kmem_cache_alloc(full_dnode_slab, GFP_KERNEL);
+ struct jffs2_full_dnode *ret;
+ ret = kmem_cache_alloc(full_dnode_slab, GFP_KERNEL);
+ dbg_memalloc("%p\n", ret);
return ret;
}
void jffs2_free_full_dnode(struct jffs2_full_dnode *x)
{
+ dbg_memalloc("%p\n", x);
kmem_cache_free(full_dnode_slab, x);
}
struct jffs2_raw_dirent *jffs2_alloc_raw_dirent(void)
{
- return kmem_cache_alloc(raw_dirent_slab, GFP_KERNEL);
+ struct jffs2_raw_dirent *ret;
+ ret = kmem_cache_alloc(raw_dirent_slab, GFP_KERNEL);
+ dbg_memalloc("%p\n", ret);
+ return ret;
}
void jffs2_free_raw_dirent(struct jffs2_raw_dirent *x)
{
+ dbg_memalloc("%p\n", x);
kmem_cache_free(raw_dirent_slab, x);
}
struct jffs2_raw_inode *jffs2_alloc_raw_inode(void)
{
- return kmem_cache_alloc(raw_inode_slab, GFP_KERNEL);
+ struct jffs2_raw_inode *ret;
+ ret = kmem_cache_alloc(raw_inode_slab, GFP_KERNEL);
+ dbg_memalloc("%p\n", ret);
+ return ret;
}
void jffs2_free_raw_inode(struct jffs2_raw_inode *x)
{
+ dbg_memalloc("%p\n", x);
kmem_cache_free(raw_inode_slab, x);
}
struct jffs2_tmp_dnode_info *jffs2_alloc_tmp_dnode_info(void)
{
- return kmem_cache_alloc(tmp_dnode_info_slab, GFP_KERNEL);
+ struct jffs2_tmp_dnode_info *ret;
+ ret = kmem_cache_alloc(tmp_dnode_info_slab, GFP_KERNEL);
+ dbg_memalloc("%p\n",
+ ret);
+ return ret;
}
void jffs2_free_tmp_dnode_info(struct jffs2_tmp_dnode_info *x)
{
+ dbg_memalloc("%p\n", x);
kmem_cache_free(tmp_dnode_info_slab, x);
}
struct jffs2_raw_node_ref *jffs2_alloc_raw_node_ref(void)
{
- return kmem_cache_alloc(raw_node_ref_slab, GFP_KERNEL);
+ struct jffs2_raw_node_ref *ret;
+ ret = kmem_cache_alloc(raw_node_ref_slab, GFP_KERNEL);
+ dbg_memalloc("%p\n", ret);
+ return ret;
}
void jffs2_free_raw_node_ref(struct jffs2_raw_node_ref *x)
{
+ dbg_memalloc("%p\n", x);
kmem_cache_free(raw_node_ref_slab, x);
}
struct jffs2_node_frag *jffs2_alloc_node_frag(void)
{
- return kmem_cache_alloc(node_frag_slab, GFP_KERNEL);
+ struct jffs2_node_frag *ret;
+ ret = kmem_cache_alloc(node_frag_slab, GFP_KERNEL);
+ dbg_memalloc("%p\n", ret);
+ return ret;
}
void jffs2_free_node_frag(struct jffs2_node_frag *x)
{
+ dbg_memalloc("%p\n", x);
kmem_cache_free(node_frag_slab, x);
}
struct jffs2_inode_cache *jffs2_alloc_inode_cache(void)
{
- struct jffs2_inode_cache *ret = kmem_cache_alloc(inode_cache_slab, GFP_KERNEL);
- D1(printk(KERN_DEBUG "Allocated inocache at %p\n", ret));
+ struct jffs2_inode_cache *ret;
+ ret = kmem_cache_alloc(inode_cache_slab, GFP_KERNEL);
+ dbg_memalloc("%p\n", ret);
return ret;
}
void jffs2_free_inode_cache(struct jffs2_inode_cache *x)
{
- D1(printk(KERN_DEBUG "Freeing inocache at %p\n", x));
+ dbg_memalloc("%p\n", x);
kmem_cache_free(inode_cache_slab, x);
}
+int jffs2_alloc_eraseblocks(struct jffs2_sb_info *c)
+{
+ uint32_t i;
+#ifndef __ECOS
+ if (jffs2_blocks_use_vmalloc(c))
+ c->blocks = vmalloc(sizeof(void *) * c->nr_blocks);
+ else
+#endif
+ c->blocks = kmalloc(sizeof(void *) * c->nr_blocks, GFP_KERNEL);
+ if (!c->blocks)
+ return -ENOMEM;
+ memset(c->blocks, 0, sizeof(void *) * c->nr_blocks);
+
+ for (i=0; i<c->nr_blocks; i++) {
+ c->blocks[i] = kmem_cache_alloc(eraseblock_slab, GFP_KERNEL);
+ dbg_memalloc("%p\n", c->blocks[i]);
+ if (!c->blocks[i]) {
+ jffs2_free_eraseblocks(c);
+ return -ENOMEM;
+ }
+ memset(c->blocks[i], 0, sizeof(struct jffs2_eraseblock));
+ }
+
+
+ for (i=0; i<c->nr_blocks; i++) {
+ INIT_LIST_HEAD(&c->blocks[i]->list);
+ INIT_LIST_HEAD(&c->blocks[i]->hash_list);
+ c->blocks[i]->offset = i * c->sector_size;
+ c->blocks[i]->free_size = c->sector_size;
+ c->blocks[i]->first_node = NULL;
+ c->blocks[i]->last_node = NULL;
+ }
+
+ return 0;
+}
+
+void jffs2_free_eraseblocks(struct jffs2_sb_info *c)
+{
+ uint32_t i;
+
+ for (i=0; i<c->nr_blocks; i++) {
+ if (c->blocks[i]) {
+ dbg_memalloc("%p\n", c->blocks[i]);
+ kmem_cache_free(eraseblock_slab, c->blocks[i]);
+ }
+ }
+#ifndef __ECOS
+ if (jffs2_blocks_use_vmalloc(c))
+ vfree(c->blocks);
+ else
+#endif
+ kfree(c->blocks);
+}
+
diff --git a/linux-2.4.x/fs/jffs2/nodelist.c b/linux-2.4.x/fs/jffs2/nodelist.c
index 0d6b525..569772f 100644
--- a/linux-2.4.x/fs/jffs2/nodelist.c
+++ b/linux-2.4.x/fs/jffs2/nodelist.c
@@ -1,297 +1,877 @@
/*
* JFFS2 -- Journalling Flash File System, Version 2.
*
- * Copyright (C) 2001, 2002 Red Hat, Inc.
+ * Copyright (C) 2001-2003 Red Hat, Inc.
*
- * Created by David Woodhouse <dwmw2@cambridge.redhat.com>
+ * Created by David Woodhouse <dwmw2@infradead.org>
*
- * The original JFFS, from which the design for JFFS2 was derived,
- * was designed and implemented by Axis Communications AB.
+ * For licensing information, see the file 'LICENCE' in this directory.
*
- * The contents of this file are subject to the Red Hat eCos Public
- * License Version 1.1 (the "Licence"); you may not use this file
- * except in compliance with the Licence. You may obtain a copy of
- * the Licence at http://www.redhat.com/
- *
- * Software distributed under the Licence is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied.
- * See the Licence for the specific language governing rights and
- * limitations under the Licence.
- *
- * The Original Code is JFFS2 - Journalling Flash File System, version 2
- *
- * Alternatively, the contents of this file may be used under the
- * terms of the GNU General Public License version 2 (the "GPL"), in
- * which case the provisions of the GPL are applicable instead of the
- * above. If you wish to allow the use of your version of this file
- * only under the terms of the GPL and not to allow others to use your
- * version of this file under the RHEPL, indicate your decision by
- * deleting the provisions above and replace them with the notice and
- * other provisions required by the GPL. If you do not delete the
- * provisions above, a recipient may use your version of this file
- * under either the RHEPL or the GPL.
- *
- * $Id: nodelist.c,v 1.30.2.5 2002/08/20 21:02:00 dwmw2 Exp $
+ * $Id: nodelist.c,v 1.119 2006/01/13 10:25:28 havasi Exp $
*
*/
#include <linux/kernel.h>
-#include <linux/jffs2.h>
+#include <linux/sched.h>
#include <linux/fs.h>
#include <linux/mtd/mtd.h>
+#include <linux/rbtree.h>
+#include <linux/crc32.h>
+#include <linux/slab.h>
+#include <linux/pagemap.h>
#include "nodelist.h"
void jffs2_add_fd_to_list(struct jffs2_sb_info *c, struct jffs2_full_dirent *new, struct jffs2_full_dirent **list)
{
struct jffs2_full_dirent **prev = list;
- D1(printk(KERN_DEBUG "jffs2_add_fd_to_list( %p, %p (->%p))\n", new, list, *list));
+
+ dbg_dentlist("add dirent \"%s\", ino #%u\n", new->name, new->ino);
while ((*prev) && (*prev)->nhash <= new->nhash) {
if ((*prev)->nhash == new->nhash && !strcmp((*prev)->name, new->name)) {
/* Duplicate. Free one */
if (new->version < (*prev)->version) {
- D1(printk(KERN_DEBUG "Eep! Marking new dirent node obsolete\n"));
- D1(printk(KERN_DEBUG "New dirent is \"%s\"->ino #%u. Old is \"%s\"->ino #%u\n", new->name, new->ino, (*prev)->name, (*prev)->ino));
+ dbg_dentlist("Eep! Marking new dirent node is obsolete, old is \"%s\", ino #%u\n",
+ (*prev)->name, (*prev)->ino);
jffs2_mark_node_obsolete(c, new->raw);
jffs2_free_full_dirent(new);
} else {
- D1(printk(KERN_DEBUG "Marking old dirent node (ino #%u) obsolete\n", (*prev)->ino));
+ dbg_dentlist("marking old dirent \"%s\", ino #%u bsolete\n",
+ (*prev)->name, (*prev)->ino);
new->next = (*prev)->next;
jffs2_mark_node_obsolete(c, ((*prev)->raw));
jffs2_free_full_dirent(*prev);
*prev = new;
}
- goto out;
+ return;
}
prev = &((*prev)->next);
}
new->next = *prev;
*prev = new;
+}
+
+void jffs2_truncate_fragtree(struct jffs2_sb_info *c, struct rb_root *list, uint32_t size)
+{
+ struct jffs2_node_frag *frag = jffs2_lookup_node_frag(list, size);
- out:
- D2(while(*list) {
- printk(KERN_DEBUG "Dirent \"%s\" (hash 0x%08x, ino #%u\n", (*list)->name, (*list)->nhash, (*list)->ino);
- list = &(*list)->next;
- });
+ dbg_fragtree("truncating fragtree to 0x%08x bytes\n", size);
+
+ /* We know frag->ofs <= size. That's what lookup does for us */
+ if (frag && frag->ofs != size) {
+ if (frag->ofs+frag->size > size) {
+ frag->size = size - frag->ofs;
+ }
+ frag = frag_next(frag);
+ }
+ while (frag && frag->ofs >= size) {
+ struct jffs2_node_frag *next = frag_next(frag);
+
+ frag_erase(frag, list);
+ jffs2_obsolete_node_frag(c, frag);
+ frag = next;
+ }
+
+ if (size == 0)
+ return;
+
+ /*
+ * If the last fragment starts at the RAM page boundary, it is
+ * REF_PRISTINE irrespective of its size.
+ */
+ frag = frag_last(list);
+ if (frag->node && (frag->ofs & (PAGE_CACHE_SIZE - 1)) == 0) {
+ dbg_fragtree2("marking the last fragment 0x%08x-0x%08x REF_PRISTINE.\n",
+ frag->ofs, frag->ofs + frag->size);
+ frag->node->raw->flash_offset = ref_offset(frag->node->raw) | REF_PRISTINE;
+ }
}
-/* Put a new tmp_dnode_info into the list, keeping the list in
- order of increasing version
-*/
-void jffs2_add_tn_to_list(struct jffs2_tmp_dnode_info *tn, struct jffs2_tmp_dnode_info **list)
+void jffs2_obsolete_node_frag(struct jffs2_sb_info *c, struct jffs2_node_frag *this)
{
- struct jffs2_tmp_dnode_info **prev = list;
-
- while ((*prev) && (*prev)->version < tn->version) {
- prev = &((*prev)->next);
+ if (this->node) {
+ this->node->frags--;
+ if (!this->node->frags) {
+ /* The node has no valid frags left. It's totally obsoleted */
+ dbg_fragtree2("marking old node @0x%08x (0x%04x-0x%04x) obsolete\n",
+ ref_offset(this->node->raw), this->node->ofs, this->node->ofs+this->node->size);
+ jffs2_mark_node_obsolete(c, this->node->raw);
+ jffs2_free_full_dnode(this->node);
+ } else {
+ dbg_fragtree2("marking old node @0x%08x (0x%04x-0x%04x) REF_NORMAL. frags is %d\n",
+ ref_offset(this->node->raw), this->node->ofs, this->node->ofs+this->node->size, this->node->frags);
+ mark_ref_normal(this->node->raw);
+ }
+
}
- tn->next = (*prev);
- *prev = tn;
+ jffs2_free_node_frag(this);
}
-/* Get tmp_dnode_info and full_dirent for all non-obsolete nodes associated
- with this ino, returning the former in order of version */
+static void jffs2_fragtree_insert(struct jffs2_node_frag *newfrag, struct jffs2_node_frag *base)
+{
+ struct rb_node *parent = &base->rb;
+ struct rb_node **link = &parent;
+
+ dbg_fragtree2("insert frag (0x%04x-0x%04x)\n", newfrag->ofs, newfrag->ofs + newfrag->size);
+
+ while (*link) {
+ parent = *link;
+ base = rb_entry(parent, struct jffs2_node_frag, rb);
+
+ if (newfrag->ofs > base->ofs)
+ link = &base->rb.rb_right;
+ else if (newfrag->ofs < base->ofs)
+ link = &base->rb.rb_left;
+ else {
+ JFFS2_ERROR("duplicate frag at %08x (%p,%p)\n", newfrag->ofs, newfrag, base);
+ BUG();
+ }
+ }
+
+ rb_link_node(&newfrag->rb, &base->rb, link);
+}
-int jffs2_get_inode_nodes(struct jffs2_sb_info *c, ino_t ino, struct jffs2_inode_info *f,
- struct jffs2_tmp_dnode_info **tnp, struct jffs2_full_dirent **fdp,
- __u32 *highest_version, __u32 *latest_mctime,
- __u32 *mctime_ver)
+/*
+ * Allocate and initializes a new fragment.
+ */
+static inline struct jffs2_node_frag * new_fragment(struct jffs2_full_dnode *fn, uint32_t ofs, uint32_t size)
{
- struct jffs2_raw_node_ref *ref = f->inocache->nodes;
- struct jffs2_tmp_dnode_info *tn, *ret_tn = NULL;
- struct jffs2_full_dirent *fd, *ret_fd = NULL;
+ struct jffs2_node_frag *newfrag;
- union jffs2_node_union node;
- size_t retlen;
- int err;
+ newfrag = jffs2_alloc_node_frag();
+ if (likely(newfrag)) {
+ newfrag->ofs = ofs;
+ newfrag->size = size;
+ newfrag->node = fn;
+ } else {
+ JFFS2_ERROR("cannot allocate a jffs2_node_frag object\n");
+ }
- *mctime_ver = 0;
+ return newfrag;
+}
- D1(printk(KERN_DEBUG "jffs2_get_inode_nodes(): ino #%lu\n", ino));
- if (!f->inocache->nodes) {
- printk(KERN_WARNING "Eep. no nodes for ino #%lu\n", ino);
+/*
+ * Called when there is no overlapping fragment exist. Inserts a hole before the new
+ * fragment and inserts the new fragment to the fragtree.
+ */
+static int no_overlapping_node(struct jffs2_sb_info *c, struct rb_root *root,
+ struct jffs2_node_frag *newfrag,
+ struct jffs2_node_frag *this, uint32_t lastend)
+{
+ if (lastend < newfrag->node->ofs) {
+ /* put a hole in before the new fragment */
+ struct jffs2_node_frag *holefrag;
+
+ holefrag= new_fragment(NULL, lastend, newfrag->node->ofs - lastend);
+ if (unlikely(!holefrag)) {
+ jffs2_free_node_frag(newfrag);
+ return -ENOMEM;
+ }
+
+ if (this) {
+ /* By definition, the 'this' node has no right-hand child,
+ because there are no frags with offset greater than it.
+ So that's where we want to put the hole */
+ dbg_fragtree2("add hole frag %#04x-%#04x on the right of the new frag.\n",
+ holefrag->ofs, holefrag->ofs + holefrag->size);
+ rb_link_node(&holefrag->rb, &this->rb, &this->rb.rb_right);
+ } else {
+ dbg_fragtree2("Add hole frag %#04x-%#04x to the root of the tree.\n",
+ holefrag->ofs, holefrag->ofs + holefrag->size);
+ rb_link_node(&holefrag->rb, NULL, &root->rb_node);
+ }
+ rb_insert_color(&holefrag->rb, root);
+ this = holefrag;
}
- for (ref = f->inocache->nodes; ref && ref->next_in_ino; ref = ref->next_in_ino) {
- /* Work out whether it's a data node or a dirent node */
- if (ref->flash_offset & 1) {
- /* FIXME: On NAND flash we may need to read these */
- D1(printk(KERN_DEBUG "node at 0x%08x is obsoleted. Ignoring.\n", ref->flash_offset &~3));
- continue;
+
+ if (this) {
+ /* By definition, the 'this' node has no right-hand child,
+ because there are no frags with offset greater than it.
+ So that's where we want to put new fragment */
+ dbg_fragtree2("add the new node at the right\n");
+ rb_link_node(&newfrag->rb, &this->rb, &this->rb.rb_right);
+ } else {
+ dbg_fragtree2("insert the new node at the root of the tree\n");
+ rb_link_node(&newfrag->rb, NULL, &root->rb_node);
+ }
+ rb_insert_color(&newfrag->rb, root);
+
+ return 0;
+}
+
+/* Doesn't set inode->i_size */
+static int jffs2_add_frag_to_fragtree(struct jffs2_sb_info *c, struct rb_root *root, struct jffs2_node_frag *newfrag)
+{
+ struct jffs2_node_frag *this;
+ uint32_t lastend;
+
+ /* Skip all the nodes which are completed before this one starts */
+ this = jffs2_lookup_node_frag(root, newfrag->node->ofs);
+
+ if (this) {
+ dbg_fragtree2("lookup gave frag 0x%04x-0x%04x; phys 0x%08x (*%p)\n",
+ this->ofs, this->ofs+this->size, this->node?(ref_offset(this->node->raw)):0xffffffff, this);
+ lastend = this->ofs + this->size;
+ } else {
+ dbg_fragtree2("lookup gave no frag\n");
+ lastend = 0;
+ }
+
+ /* See if we ran off the end of the fragtree */
+ if (lastend <= newfrag->ofs) {
+ /* We did */
+
+ /* Check if 'this' node was on the same page as the new node.
+ If so, both 'this' and the new node get marked REF_NORMAL so
+ the GC can take a look.
+ */
+ if (lastend && (lastend-1) >> PAGE_CACHE_SHIFT == newfrag->ofs >> PAGE_CACHE_SHIFT) {
+ if (this->node)
+ mark_ref_normal(this->node->raw);
+ mark_ref_normal(newfrag->node->raw);
}
- err = c->mtd->read(c->mtd, (ref->flash_offset & ~3), min(ref->totlen, sizeof(node)), &retlen, (void *)&node);
- if (err) {
- printk(KERN_WARNING "error %d reading node at 0x%08x in get_inode_nodes()\n", err, (ref->flash_offset) & ~3);
- goto free_out;
+
+ return no_overlapping_node(c, root, newfrag, this, lastend);
+ }
+
+ if (this->node)
+ dbg_fragtree2("dealing with frag %u-%u, phys %#08x(%d).\n",
+ this->ofs, this->ofs + this->size,
+ ref_offset(this->node->raw), ref_flags(this->node->raw));
+ else
+ dbg_fragtree2("dealing with hole frag %u-%u.\n",
+ this->ofs, this->ofs + this->size);
+
+ /* OK. 'this' is pointing at the first frag that newfrag->ofs at least partially obsoletes,
+ * - i.e. newfrag->ofs < this->ofs+this->size && newfrag->ofs >= this->ofs
+ */
+ if (newfrag->ofs > this->ofs) {
+ /* This node isn't completely obsoleted. The start of it remains valid */
+
+ /* Mark the new node and the partially covered node REF_NORMAL -- let
+ the GC take a look at them */
+ mark_ref_normal(newfrag->node->raw);
+ if (this->node)
+ mark_ref_normal(this->node->raw);
+
+ if (this->ofs + this->size > newfrag->ofs + newfrag->size) {
+ /* The new node splits 'this' frag into two */
+ struct jffs2_node_frag *newfrag2;
+
+ if (this->node)
+ dbg_fragtree2("split old frag 0x%04x-0x%04x, phys 0x%08x\n",
+ this->ofs, this->ofs+this->size, ref_offset(this->node->raw));
+ else
+ dbg_fragtree2("split old hole frag 0x%04x-0x%04x\n",
+ this->ofs, this->ofs+this->size);
+
+ /* New second frag pointing to this's node */
+ newfrag2 = new_fragment(this->node, newfrag->ofs + newfrag->size,
+ this->ofs + this->size - newfrag->ofs - newfrag->size);
+ if (unlikely(!newfrag2))
+ return -ENOMEM;
+ if (this->node)
+ this->node->frags++;
+
+ /* Adjust size of original 'this' */
+ this->size = newfrag->ofs - this->ofs;
+
+ /* Now, we know there's no node with offset
+ greater than this->ofs but smaller than
+ newfrag2->ofs or newfrag->ofs, for obvious
+ reasons. So we can do a tree insert from
+ 'this' to insert newfrag, and a tree insert
+ from newfrag to insert newfrag2. */
+ jffs2_fragtree_insert(newfrag, this);
+ rb_insert_color(&newfrag->rb, root);
+
+ jffs2_fragtree_insert(newfrag2, newfrag);
+ rb_insert_color(&newfrag2->rb, root);
+
+ return 0;
}
-
+ /* New node just reduces 'this' frag in size, doesn't split it */
+ this->size = newfrag->ofs - this->ofs;
- /* Check we've managed to read at least the common node header */
- if (retlen < min(ref->totlen, sizeof(node.u))) {
- printk(KERN_WARNING "short read in get_inode_nodes()\n");
- err = -EIO;
- goto free_out;
+ /* Again, we know it lives down here in the tree */
+ jffs2_fragtree_insert(newfrag, this);
+ rb_insert_color(&newfrag->rb, root);
+ } else {
+ /* New frag starts at the same point as 'this' used to. Replace
+ it in the tree without doing a delete and insertion */
+ dbg_fragtree2("inserting newfrag (*%p),%d-%d in before 'this' (*%p),%d-%d\n",
+ newfrag, newfrag->ofs, newfrag->ofs+newfrag->size, this, this->ofs, this->ofs+this->size);
+
+ rb_replace_node(&this->rb, &newfrag->rb, root);
+
+ if (newfrag->ofs + newfrag->size >= this->ofs+this->size) {
+ dbg_fragtree2("obsoleting node frag %p (%x-%x)\n", this, this->ofs, this->ofs+this->size);
+ jffs2_obsolete_node_frag(c, this);
+ } else {
+ this->ofs += newfrag->size;
+ this->size -= newfrag->size;
+
+ jffs2_fragtree_insert(this, newfrag);
+ rb_insert_color(&this->rb, root);
+ return 0;
}
-
- switch (node.u.nodetype) {
- case JFFS2_NODETYPE_DIRENT:
- D1(printk(KERN_DEBUG "Node at %08x is a dirent node\n", ref->flash_offset &~3));
- if (retlen < sizeof(node.d)) {
- printk(KERN_WARNING "short read in get_inode_nodes()\n");
- err = -EIO;
- goto free_out;
- }
- if (node.d.version > *highest_version)
- *highest_version = node.d.version;
- if (ref->flash_offset & 1) {
- /* Obsoleted */
- continue;
- }
- fd = jffs2_alloc_full_dirent(node.d.nsize+1);
- if (!fd) {
- err = -ENOMEM;
- goto free_out;
- }
- memset(fd,0,sizeof(struct jffs2_full_dirent) + node.d.nsize+1);
- fd->raw = ref;
- fd->version = node.d.version;
- fd->ino = node.d.ino;
- fd->type = node.d.type;
-
- /* Pick out the mctime of the latest dirent */
- if(fd->version > *mctime_ver) {
- *mctime_ver = fd->version;
- *latest_mctime = node.d.mctime;
- }
+ }
+ /* OK, now we have newfrag added in the correct place in the tree, but
+ frag_next(newfrag) may be a fragment which is overlapped by it
+ */
+ while ((this = frag_next(newfrag)) && newfrag->ofs + newfrag->size >= this->ofs + this->size) {
+ /* 'this' frag is obsoleted completely. */
+ dbg_fragtree2("obsoleting node frag %p (%x-%x) and removing from tree\n",
+ this, this->ofs, this->ofs+this->size);
+ rb_erase(&this->rb, root);
+ jffs2_obsolete_node_frag(c, this);
+ }
+ /* Now we're pointing at the first frag which isn't totally obsoleted by
+ the new frag */
- /* memcpy as much of the name as possible from the raw
- dirent we've already read from the flash
- */
- if (retlen > sizeof(struct jffs2_raw_dirent))
- memcpy(&fd->name[0], &node.d.name[0], min((__u32)node.d.nsize, (retlen-sizeof(struct jffs2_raw_dirent))));
-
- /* Do we need to copy any more of the name directly
- from the flash?
- */
- if (node.d.nsize + sizeof(struct jffs2_raw_dirent) > retlen) {
- int already = retlen - sizeof(struct jffs2_raw_dirent);
-
- err = c->mtd->read(c->mtd, (ref->flash_offset & ~3) + retlen,
- node.d.nsize - already, &retlen, &fd->name[already]);
- if (!err && retlen != node.d.nsize - already)
- err = -EIO;
-
- if (err) {
- printk(KERN_WARNING "Read remainder of name in jffs2_get_inode_nodes(): error %d\n", err);
- jffs2_free_full_dirent(fd);
- goto free_out;
- }
+ if (!this || newfrag->ofs + newfrag->size == this->ofs)
+ return 0;
+
+ /* Still some overlap but we don't need to move it in the tree */
+ this->size = (this->ofs + this->size) - (newfrag->ofs + newfrag->size);
+ this->ofs = newfrag->ofs + newfrag->size;
+
+ /* And mark them REF_NORMAL so the GC takes a look at them */
+ if (this->node)
+ mark_ref_normal(this->node->raw);
+ mark_ref_normal(newfrag->node->raw);
+
+ return 0;
+}
+
+/*
+ * Given an inode, probably with existing tree of fragments, add the new node
+ * to the fragment tree.
+ */
+int jffs2_add_full_dnode_to_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, struct jffs2_full_dnode *fn)
+{
+ int ret;
+ struct jffs2_node_frag *newfrag;
+
+ if (unlikely(!fn->size))
+ return 0;
+
+ newfrag = new_fragment(fn, fn->ofs, fn->size);
+ if (unlikely(!newfrag))
+ return -ENOMEM;
+ newfrag->node->frags = 1;
+
+ dbg_fragtree("adding node %#04x-%#04x @0x%08x on flash, newfrag *%p\n",
+ fn->ofs, fn->ofs+fn->size, ref_offset(fn->raw), newfrag);
+
+ ret = jffs2_add_frag_to_fragtree(c, &f->fragtree, newfrag);
+ if (unlikely(ret))
+ return ret;
+
+ /* If we now share a page with other nodes, mark either previous
+ or next node REF_NORMAL, as appropriate. */
+ if (newfrag->ofs & (PAGE_CACHE_SIZE-1)) {
+ struct jffs2_node_frag *prev = frag_prev(newfrag);
+
+ mark_ref_normal(fn->raw);
+ /* If we don't start at zero there's _always_ a previous */
+ if (prev->node)
+ mark_ref_normal(prev->node->raw);
+ }
+
+ if ((newfrag->ofs+newfrag->size) & (PAGE_CACHE_SIZE-1)) {
+ struct jffs2_node_frag *next = frag_next(newfrag);
+
+ if (next) {
+ mark_ref_normal(fn->raw);
+ if (next->node)
+ mark_ref_normal(next->node->raw);
+ }
+ }
+ jffs2_dbg_fragtree_paranoia_check_nolock(f);
+
+ return 0;
+}
+
+/*
+ * Check the data CRC of the node.
+ *
+ * Returns: 0 if the data CRC is correct;
+ * 1 - if incorrect;
+ * error code if an error occured.
+ */
+static int check_node_data(struct jffs2_sb_info *c, struct jffs2_tmp_dnode_info *tn)
+{
+ struct jffs2_raw_node_ref *ref = tn->fn->raw;
+ int err = 0, pointed = 0;
+ struct jffs2_eraseblock *jeb;
+ unsigned char *buffer;
+ uint32_t crc, ofs, retlen, len;
+
+ BUG_ON(tn->csize == 0);
+
+ if (!jffs2_is_writebuffered(c))
+ goto adj_acc;
+
+ /* Calculate how many bytes were already checked */
+ ofs = ref_offset(ref) + sizeof(struct jffs2_raw_inode);
+ len = ofs % c->wbuf_pagesize;
+ if (likely(len))
+ len = c->wbuf_pagesize - len;
+
+ if (len >= tn->csize) {
+ dbg_readinode("no need to check node at %#08x, data length %u, data starts at %#08x - it has already been checked.\n",
+ ref_offset(ref), tn->csize, ofs);
+ goto adj_acc;
+ }
+
+ ofs += len;
+ len = tn->csize - len;
+
+ dbg_readinode("check node at %#08x, data length %u, partial CRC %#08x, correct CRC %#08x, data starts at %#08x, start checking from %#08x - %u bytes.\n",
+ ref_offset(ref), tn->csize, tn->partial_crc, tn->data_crc, ofs - len, ofs, len);
+
+#ifndef __ECOS
+ /* TODO: instead, incapsulate point() stuff to jffs2_flash_read(),
+ * adding and jffs2_flash_read_end() interface. */
+ if (c->mtd->point) {
+ err = c->mtd->point(c->mtd, ofs, len, &retlen, &buffer);
+ if (!err && retlen < len) {
+ JFFS2_WARNING("MTD point returned len too short: %u instead of %u.\n", retlen, tn->csize);
+ c->mtd->unpoint(c->mtd, buffer, ofs, len);
+ } else if (err)
+ JFFS2_WARNING("MTD point failed: error code %d.\n", err);
+ else
+ pointed = 1; /* succefully pointed to device */
+ }
+#endif
+
+ if (!pointed) {
+ buffer = kmalloc(len, GFP_KERNEL);
+ if (unlikely(!buffer))
+ return -ENOMEM;
+
+ if (jffs2_flash_read_safe(c, ofs, len, buffer)) {
+ kfree(buffer);
+ return -EIO;
+ }
+ }
+
+ /* Continue calculating CRC */
+ crc = crc32(tn->partial_crc, buffer, len);
+ if(!pointed)
+ kfree(buffer);
+#ifndef __ECOS
+ else
+ c->mtd->unpoint(c->mtd, buffer, ofs, len);
+#endif
+
+ if (crc != tn->data_crc) {
+ JFFS2_NOTICE("wrong data CRC in data node at 0x%08x: read %#08x, calculated %#08x.\n",
+ ofs, tn->data_crc, crc);
+ return 1;
+ }
+
+adj_acc:
+ jeb = c->blocks[ref->flash_offset / c->sector_size];
+ len = ref_totlen(c, jeb, ref);
+
+ /*
+ * Mark the node as having been checked and fix the
+ * accounting accordingly.
+ */
+ spin_lock(&c->erase_completion_lock);
+ jeb->used_size += len;
+ jeb->unchecked_size -= len;
+ c->used_size += len;
+ c->unchecked_size -= len;
+ spin_unlock(&c->erase_completion_lock);
+
+ return 0;
+}
+
+/*
+ * Helper function for jffs2_add_older_frag_to_fragtree().
+ *
+ * Checks the node if we are in the checking stage.
+ */
+static inline int check_node(struct jffs2_sb_info *c, struct jffs2_inode_info *f, struct jffs2_tmp_dnode_info *tn)
+{
+ int ret;
+
+ BUG_ON(ref_obsolete(tn->fn->raw));
+
+ /* We only check the data CRC of unchecked nodes */
+ if (ref_flags(tn->fn->raw) != REF_UNCHECKED)
+ return 0;
+
+ dbg_fragtree2("check node %#04x-%#04x, phys offs %#08x.\n",
+ tn->fn->ofs, tn->fn->ofs + tn->fn->size, ref_offset(tn->fn->raw));
+
+ ret = check_node_data(c, tn);
+ if (unlikely(ret < 0)) {
+ JFFS2_ERROR("check_node_data() returned error: %d.\n",
+ ret);
+ } else if (unlikely(ret > 0)) {
+ dbg_fragtree2("CRC error, mark it obsolete.\n");
+ jffs2_mark_node_obsolete(c, tn->fn->raw);
+ }
+
+ return ret;
+}
+
+/*
+ * Helper function for jffs2_add_older_frag_to_fragtree().
+ *
+ * Called when the new fragment that is being inserted
+ * splits a hole fragment.
+ */
+static int split_hole(struct jffs2_sb_info *c, struct rb_root *root,
+ struct jffs2_node_frag *newfrag, struct jffs2_node_frag *hole)
+{
+ dbg_fragtree2("fragment %#04x-%#04x splits the hole %#04x-%#04x\n",
+ newfrag->ofs, newfrag->ofs + newfrag->size, hole->ofs, hole->ofs + hole->size);
+
+ if (hole->ofs == newfrag->ofs) {
+ /*
+ * Well, the new fragment actually starts at the same offset as
+ * the hole.
+ */
+ if (hole->ofs + hole->size > newfrag->ofs + newfrag->size) {
+ /*
+ * We replace the overlapped left part of the hole by
+ * the new node.
+ */
+
+ dbg_fragtree2("insert fragment %#04x-%#04x and cut the left part of the hole\n",
+ newfrag->ofs, newfrag->ofs + newfrag->size);
+ rb_replace_node(&hole->rb, &newfrag->rb, root);
+
+ hole->ofs += newfrag->size;
+ hole->size -= newfrag->size;
+
+ /*
+ * We know that 'hole' should be the right hand
+ * fragment.
+ */
+ jffs2_fragtree_insert(hole, newfrag);
+ rb_insert_color(&hole->rb, root);
+ } else {
+ /*
+ * Ah, the new fragment is of the same size as the hole.
+ * Relace the hole by it.
+ */
+ dbg_fragtree2("insert fragment %#04x-%#04x and overwrite hole\n",
+ newfrag->ofs, newfrag->ofs + newfrag->size);
+ rb_replace_node(&hole->rb, &newfrag->rb, root);
+ jffs2_free_node_frag(hole);
+ }
+ } else {
+ /* The new fragment lefts some hole space at the left */
+
+ struct jffs2_node_frag * newfrag2 = NULL;
+
+ if (hole->ofs + hole->size > newfrag->ofs + newfrag->size) {
+ /* The new frag also lefts some space at the right */
+ newfrag2 = new_fragment(NULL, newfrag->ofs +
+ newfrag->size, hole->ofs + hole->size
+ - newfrag->ofs - newfrag->size);
+ if (unlikely(!newfrag2)) {
+ jffs2_free_node_frag(newfrag);
+ return -ENOMEM;
}
- fd->nhash = full_name_hash(fd->name, node.d.nsize);
- fd->next = NULL;
- /* Wheee. We now have a complete jffs2_full_dirent structure, with
- the name in it and everything. Link it into the list
- */
- D1(printk(KERN_DEBUG "Adding fd \"%s\", ino #%u\n", fd->name, fd->ino));
- jffs2_add_fd_to_list(c, fd, &ret_fd);
- break;
-
- case JFFS2_NODETYPE_INODE:
- D1(printk(KERN_DEBUG "Node at %08x is a data node\n", ref->flash_offset &~3));
- if (retlen < sizeof(node.i)) {
- printk(KERN_WARNING "read too short for dnode\n");
- err = -EIO;
- goto free_out;
+ }
+
+ hole->size = newfrag->ofs - hole->ofs;
+ dbg_fragtree2("left the hole %#04x-%#04x at the left and inserd fragment %#04x-%#04x\n",
+ hole->ofs, hole->ofs + hole->size, newfrag->ofs, newfrag->ofs + newfrag->size);
+
+ jffs2_fragtree_insert(newfrag, hole);
+ rb_insert_color(&newfrag->rb, root);
+
+ if (newfrag2) {
+ dbg_fragtree2("left the hole %#04x-%#04x at the right\n",
+ newfrag2->ofs, newfrag2->ofs + newfrag2->size);
+ jffs2_fragtree_insert(newfrag2, newfrag);
+ rb_insert_color(&newfrag2->rb, root);
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * This function is used when we build inode. It expects the nodes are passed
+ * in the decreasing version order. The whole point of this is to improve the
+ * inodes checking on NAND: we check the nodes' data CRC only when they are not
+ * obsoleted. Previously, add_frag_to_fragtree() function was used and
+ * nodes were passed to it in the increasing version ordes and CRCs of all
+ * nodes were checked.
+ *
+ * Note: tn->fn->size shouldn't be zero.
+ *
+ * Returns 0 if the node was inserted
+ * 1 if it wasn't inserted (since it is obsolete)
+ * < 0 an if error occured
+ */
+int jffs2_add_older_frag_to_fragtree(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
+ struct jffs2_tmp_dnode_info *tn)
+{
+ struct jffs2_node_frag *this, *newfrag;
+ uint32_t lastend;
+ struct jffs2_full_dnode *fn = tn->fn;
+ struct rb_root *root = &f->fragtree;
+ uint32_t fn_size = fn->size, fn_ofs = fn->ofs;
+ int err, checked = 0;
+ int ref_flag;
+
+ dbg_fragtree("insert fragment %#04x-%#04x, ver %u\n", fn_ofs, fn_ofs + fn_size, tn->version);
+
+ /* Skip all the nodes which are completed before this one starts */
+ this = jffs2_lookup_node_frag(root, fn_ofs);
+ if (this)
+ dbg_fragtree2("'this' found %#04x-%#04x (%s)\n", this->ofs, this->ofs + this->size, this->node ? "data" : "hole");
+
+ if (this)
+ lastend = this->ofs + this->size;
+ else
+ lastend = 0;
+
+ /* Detect the preliminary type of node */
+ if (fn->size >= PAGE_CACHE_SIZE)
+ ref_flag = REF_PRISTINE;
+ else
+ ref_flag = REF_NORMAL;
+
+ /* See if we ran off the end of the root */
+ if (lastend <= fn_ofs) {
+ /* We did */
+
+ /*
+ * We are going to insert the new node into the
+ * fragment tree, so check it.
+ */
+ err = check_node(c, f, tn);
+ if (err != 0)
+ return err;
+
+ fn->frags = 1;
+
+ newfrag = new_fragment(fn, fn_ofs, fn_size);
+ if (unlikely(!newfrag))
+ return -ENOMEM;
+
+ err = no_overlapping_node(c, root, newfrag, this, lastend);
+ if (unlikely(err != 0)) {
+ jffs2_free_node_frag(newfrag);
+ return err;
+ }
+
+ goto out_ok;
+ }
+
+ fn->frags = 0;
+
+ while (1) {
+ /*
+ * Here we have:
+ * fn_ofs < this->ofs + this->size && fn_ofs >= this->ofs.
+ *
+ * Remember, 'this' has higher version, any non-hole node
+ * which is already in the fragtree is newer then the newly
+ * inserted.
+ */
+ if (!this->node) {
+ /*
+ * 'this' is the hole fragment, so at least the
+ * beginning of the new fragment is valid.
+ */
+
+ /*
+ * We are going to insert the new node into the
+ * fragment tree, so check it.
+ */
+ if (!checked) {
+ err = check_node(c, f, tn);
+ if (unlikely(err != 0))
+ return err;
+ checked = 1;
}
- if (node.i.version > *highest_version)
- *highest_version = node.i.version;
- D1(printk(KERN_DEBUG "version %d, highest_version now %d\n", node.i.version, *highest_version));
-
- if (ref->flash_offset & 1) {
- D1(printk(KERN_DEBUG "obsoleted\n"));
- /* Obsoleted */
- continue;
+
+ if (this->ofs + this->size >= fn_ofs + fn_size) {
+ /* We split the hole on two parts */
+
+ fn->frags += 1;
+ newfrag = new_fragment(fn, fn_ofs, fn_size);
+ if (unlikely(!newfrag))
+ return -ENOMEM;
+
+ err = split_hole(c, root, newfrag, this);
+ if (unlikely(err))
+ return err;
+ goto out_ok;
}
- tn = jffs2_alloc_tmp_dnode_info();
- if (!tn) {
- D1(printk(KERN_DEBUG "alloc tn failed\n"));
- err = -ENOMEM;
- goto free_out;
+
+ /*
+ * The beginning of the new fragment is valid since it
+ * overlaps the hole node.
+ */
+
+ ref_flag = REF_NORMAL;
+
+ fn->frags += 1;
+ newfrag = new_fragment(fn, fn_ofs,
+ this->ofs + this->size - fn_ofs);
+ if (unlikely(!newfrag))
+ return -ENOMEM;
+
+ if (fn_ofs == this->ofs) {
+ /*
+ * The new node starts at the same offset as
+ * the hole and supersieds the hole.
+ */
+ dbg_fragtree2("add the new fragment instead of hole %#04x-%#04x, refcnt %d\n",
+ fn_ofs, fn_ofs + this->ofs + this->size - fn_ofs, fn->frags);
+
+ rb_replace_node(&this->rb, &newfrag->rb, root);
+ jffs2_free_node_frag(this);
+ } else {
+ /*
+ * The hole becomes shorter as its right part
+ * is supersieded by the new fragment.
+ */
+ dbg_fragtree2("reduce size of hole %#04x-%#04x to %#04x-%#04x\n",
+ this->ofs, this->ofs + this->size, this->ofs, this->ofs + this->size - newfrag->size);
+
+ dbg_fragtree2("add new fragment %#04x-%#04x, refcnt %d\n", fn_ofs,
+ fn_ofs + this->ofs + this->size - fn_ofs, fn->frags);
+
+ this->size -= newfrag->size;
+ jffs2_fragtree_insert(newfrag, this);
+ rb_insert_color(&newfrag->rb, root);
}
- tn->fn = jffs2_alloc_full_dnode();
- if (!tn->fn) {
- D1(printk(KERN_DEBUG "alloc fn failed\n"));
- err = -ENOMEM;
- jffs2_free_tmp_dnode_info(tn);
- goto free_out;
+ fn_ofs += newfrag->size;
+ fn_size -= newfrag->size;
+ this = rb_entry(rb_next(&newfrag->rb),
+ struct jffs2_node_frag, rb);
+
+ dbg_fragtree2("switch to the next 'this' fragment: %#04x-%#04x %s\n",
+ this->ofs, this->ofs + this->size, this->node ? "(data)" : "(hole)");
+ }
+
+ /*
+ * 'This' node is not the hole so it obsoletes the new fragment
+ * either fully or partially.
+ */
+ if (this->ofs + this->size >= fn_ofs + fn_size) {
+ /* The new node is obsolete, drop it */
+ if (fn->frags == 0) {
+ dbg_fragtree2("%#04x-%#04x is obsolete, mark it obsolete\n", fn_ofs, fn_ofs + fn_size);
+ ref_flag = REF_OBSOLETE;
}
- tn->version = node.i.version;
- tn->fn->ofs = node.i.offset;
- /* There was a bug where we wrote hole nodes out with
- csize/dsize swapped. Deal with it */
- if (node.i.compr == JFFS2_COMPR_ZERO && !node.i.dsize && node.i.csize)
- tn->fn->size = node.i.csize;
- else // normal case...
- tn->fn->size = node.i.dsize;
- tn->fn->raw = ref;
- D1(printk(KERN_DEBUG "dnode @%08x: ver %u, offset %04x, dsize %04x\n", ref->flash_offset &~3, node.i.version, node.i.offset, node.i.dsize));
- jffs2_add_tn_to_list(tn, &ret_tn);
- break;
-
- default:
- switch(node.u.nodetype & JFFS2_COMPAT_MASK) {
- case JFFS2_FEATURE_INCOMPAT:
- printk(KERN_NOTICE "Unknown INCOMPAT nodetype %04X at %08X\n", node.u.nodetype, ref->flash_offset & ~3);
- break;
- case JFFS2_FEATURE_ROCOMPAT:
- printk(KERN_NOTICE "Unknown ROCOMPAT nodetype %04X at %08X\n", node.u.nodetype, ref->flash_offset & ~3);
- break;
- case JFFS2_FEATURE_RWCOMPAT_COPY:
- printk(KERN_NOTICE "Unknown RWCOMPAT_COPY nodetype %04X at %08X\n", node.u.nodetype, ref->flash_offset & ~3);
- break;
- case JFFS2_FEATURE_RWCOMPAT_DELETE:
- printk(KERN_NOTICE "Unknown RWCOMPAT_DELETE nodetype %04X at %08X\n", node.u.nodetype, ref->flash_offset & ~3);
- break;
+ goto out_ok;
+ } else {
+ struct jffs2_node_frag *new_this;
+
+ /* 'This' node obsoletes the beginning of the new node */
+ dbg_fragtree2("the beginning %#04x-%#04x is obsolete\n", fn_ofs, this->ofs + this->size);
+
+ ref_flag = REF_NORMAL;
+
+ fn_size -= this->ofs + this->size - fn_ofs;
+ fn_ofs = this->ofs + this->size;
+ dbg_fragtree2("now considering %#04x-%#04x\n", fn_ofs, fn_ofs + fn_size);
+
+ new_this = rb_entry(rb_next(&this->rb), struct jffs2_node_frag, rb);
+ if (!new_this) {
+ /*
+ * There is no next fragment. Add the rest of
+ * the new node as the right-hand child.
+ */
+ if (!checked) {
+ err = check_node(c, f, tn);
+ if (unlikely(err != 0))
+ return err;
+ checked = 1;
+ }
+
+ fn->frags += 1;
+ newfrag = new_fragment(fn, fn_ofs, fn_size);
+ if (unlikely(!newfrag))
+ return -ENOMEM;
+
+ dbg_fragtree2("there are no more fragments, insert %#04x-%#04x\n",
+ newfrag->ofs, newfrag->ofs + newfrag->size);
+ rb_link_node(&newfrag->rb, &this->rb, &this->rb.rb_right);
+ rb_insert_color(&newfrag->rb, root);
+ goto out_ok;
+ } else {
+ this = new_this;
+ dbg_fragtree2("switch to the next 'this' fragment: %#04x-%#04x %s\n",
+ this->ofs, this->ofs + this->size, this->node ? "(data)" : "(hole)");
}
}
}
- *tnp = ret_tn;
- *fdp = ret_fd;
+
+out_ok:
+ BUG_ON(fn->size < PAGE_CACHE_SIZE && ref_flag == REF_PRISTINE);
+
+ if (ref_flag == REF_OBSOLETE) {
+ dbg_fragtree2("the node is obsolete now\n");
+ /* jffs2_mark_node_obsolete() will adjust space accounting */
+ jffs2_mark_node_obsolete(c, fn->raw);
+ return 1;
+ }
+
+ dbg_fragtree2("the node is \"%s\" now\n", ref_flag == REF_NORMAL ? "REF_NORMAL" : "REF_PRISTINE");
+
+ /* Space accounting was adjusted at check_node_data() */
+ spin_lock(&c->erase_completion_lock);
+ fn->raw->flash_offset = ref_offset(fn->raw) | ref_flag;
+ spin_unlock(&c->erase_completion_lock);
return 0;
+}
- free_out:
- jffs2_free_tmp_dnode_info_list(ret_tn);
- jffs2_free_full_dirent_list(ret_fd);
- return err;
+void jffs2_set_inocache_state(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic, int state)
+{
+ spin_lock(&c->inocache_lock);
+ ic->state = state;
+ wake_up(&c->inocache_wq);
+ spin_unlock(&c->inocache_lock);
}
-struct jffs2_inode_cache *jffs2_get_ino_cache(struct jffs2_sb_info *c, int ino)
+/* During mount, this needs no locking. During normal operation, its
+ callers want to do other stuff while still holding the inocache_lock.
+ Rather than introducing special case get_ino_cache functions or
+ callbacks, we just let the caller do the locking itself. */
+
+struct jffs2_inode_cache *jffs2_get_ino_cache(struct jffs2_sb_info *c, uint32_t ino)
{
struct jffs2_inode_cache *ret;
- D2(printk(KERN_DEBUG "jffs2_get_ino_cache(): ino %u\n", ino));
- spin_lock (&c->inocache_lock);
ret = c->inocache_list[ino % INOCACHE_HASHSIZE];
while (ret && ret->ino < ino) {
ret = ret->next;
}
- spin_unlock(&c->inocache_lock);
-
if (ret && ret->ino != ino)
ret = NULL;
- D2(printk(KERN_DEBUG "jffs2_get_ino_cache found %p for ino %u\n", ret, ino));
return ret;
}
void jffs2_add_ino_cache (struct jffs2_sb_info *c, struct jffs2_inode_cache *new)
{
struct jffs2_inode_cache **prev;
- D2(printk(KERN_DEBUG "jffs2_add_ino_cache: Add %p (ino #%u)\n", new, new->ino));
+
spin_lock(&c->inocache_lock);
-
+ if (!new->ino)
+ new->ino = ++c->highest_ino;
+
+ dbg_inocache("add %p (ino #%u)\n", new, new->ino);
+
prev = &c->inocache_list[new->ino % INOCACHE_HASHSIZE];
while ((*prev) && (*prev)->ino < new->ino) {
@@ -299,23 +879,34 @@ void jffs2_add_ino_cache (struct jffs2_sb_info *c, struct jffs2_inode_cache *new
}
new->next = *prev;
*prev = new;
+
spin_unlock(&c->inocache_lock);
}
void jffs2_del_ino_cache(struct jffs2_sb_info *c, struct jffs2_inode_cache *old)
{
struct jffs2_inode_cache **prev;
- D2(printk(KERN_DEBUG "jffs2_del_ino_cache: Del %p (ino #%u)\n", old, old->ino));
+
+ dbg_inocache("del %p (ino #%u)\n", old, old->ino);
spin_lock(&c->inocache_lock);
-
+
prev = &c->inocache_list[old->ino % INOCACHE_HASHSIZE];
-
+
while ((*prev) && (*prev)->ino < old->ino) {
prev = &(*prev)->next;
}
if ((*prev) == old) {
*prev = old->next;
}
+
+ /* Free it now unless it's in READING or CLEARING state, which
+ are the transitions upon read_inode() and clear_inode(). The
+ rest of the time we know nobody else is looking at it, and
+ if it's held by read_inode() or clear_inode() they'll free it
+ for themselves. */
+ if (old->state != INO_STATE_READING && old->state != INO_STATE_CLEARING)
+ jffs2_free_inode_cache(old);
+
spin_unlock(&c->inocache_lock);
}
@@ -323,12 +914,11 @@ void jffs2_free_ino_caches(struct jffs2_sb_info *c)
{
int i;
struct jffs2_inode_cache *this, *next;
-
+
for (i=0; i<INOCACHE_HASHSIZE; i++) {
this = c->inocache_list[i];
while (this) {
next = this->next;
- D2(printk(KERN_DEBUG "jffs2_free_ino_caches: Freeing ino #%u at %p\n", this->ino, this));
jffs2_free_inode_cache(this);
this = next;
}
@@ -342,13 +932,97 @@ void jffs2_free_raw_node_refs(struct jffs2_sb_info *c)
struct jffs2_raw_node_ref *this, *next;
for (i=0; i<c->nr_blocks; i++) {
- this = c->blocks[i].first_node;
+ this = c->blocks[i]->first_node;
while(this) {
next = this->next_phys;
jffs2_free_raw_node_ref(this);
this = next;
}
- c->blocks[i].first_node = c->blocks[i].last_node = NULL;
+ c->blocks[i]->first_node = c->blocks[i]->last_node = NULL;
+ }
+}
+
+struct jffs2_node_frag *jffs2_lookup_node_frag(struct rb_root *fragtree, uint32_t offset)
+{
+ /* The common case in lookup is that there will be a node
+ which precisely matches. So we go looking for that first */
+ struct rb_node *next;
+ struct jffs2_node_frag *prev = NULL;
+ struct jffs2_node_frag *frag = NULL;
+
+ dbg_fragtree2("root %p, offset %d\n", fragtree, offset);
+
+ next = fragtree->rb_node;
+
+ while(next) {
+ frag = rb_entry(next, struct jffs2_node_frag, rb);
+
+ if (frag->ofs + frag->size <= offset) {
+ /* Remember the closest smaller match on the way down */
+ if (!prev || frag->ofs > prev->ofs)
+ prev = frag;
+ next = frag->rb.rb_right;
+ } else if (frag->ofs > offset) {
+ next = frag->rb.rb_left;
+ } else {
+ return frag;
+ }
+ }
+
+ /* Exact match not found. Go back up looking at each parent,
+ and return the closest smaller one */
+
+ if (prev)
+ dbg_fragtree2("no match. Returning frag %#04x-%#04x, closest previous\n",
+ prev->ofs, prev->ofs+prev->size);
+ else
+ dbg_fragtree2("returning NULL, empty fragtree\n");
+
+ return prev;
+}
+
+/* Pass 'c' argument to indicate that nodes should be marked obsolete as
+ they're killed. */
+void jffs2_kill_fragtree(struct rb_root *root, struct jffs2_sb_info *c)
+{
+ struct jffs2_node_frag *frag;
+ struct jffs2_node_frag *parent;
+
+ if (!root->rb_node)
+ return;
+
+ dbg_fragtree("killing\n");
+
+ frag = (rb_entry(root->rb_node, struct jffs2_node_frag, rb));
+ while(frag) {
+ if (frag->rb.rb_left) {
+ frag = frag_left(frag);
+ continue;
+ }
+ if (frag->rb.rb_right) {
+ frag = frag_right(frag);
+ continue;
+ }
+
+ if (frag->node && !(--frag->node->frags)) {
+ /* Not a hole, and it's the final remaining frag
+ of this node. Free the node */
+ if (c)
+ jffs2_mark_node_obsolete(c, frag->node->raw);
+
+ jffs2_free_full_dnode(frag->node);
+ }
+ parent = frag_parent(frag);
+ if (parent) {
+ if (frag_left(parent) == frag)
+ parent->rb.rb_left = NULL;
+ else
+ parent->rb.rb_right = NULL;
+ }
+
+ jffs2_free_node_frag(frag);
+ frag = parent;
+
+ cond_resched();
}
}
-
diff --git a/linux-2.4.x/fs/jffs2/nodelist.h b/linux-2.4.x/fs/jffs2/nodelist.h
index ac073ed..4a763c5 100644
--- a/linux-2.4.x/fs/jffs2/nodelist.h
+++ b/linux-2.4.x/fs/jffs2/nodelist.h
@@ -1,66 +1,74 @@
/*
* JFFS2 -- Journalling Flash File System, Version 2.
*
- * Copyright (C) 2001 Red Hat, Inc.
+ * Copyright (C) 2001-2003 Red Hat, Inc.
*
- * Created by David Woodhouse <dwmw2@cambridge.redhat.com>
+ * Created by David Woodhouse <dwmw2@infradead.org>
*
- * The original JFFS, from which the design for JFFS2 was derived,
- * was designed and implemented by Axis Communications AB.
+ * For licensing information, see the file 'LICENCE' in this directory.
*
- * The contents of this file are subject to the Red Hat eCos Public
- * License Version 1.1 (the "Licence"); you may not use this file
- * except in compliance with the Licence. You may obtain a copy of
- * the Licence at http://www.redhat.com/
- *
- * Software distributed under the Licence is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied.
- * See the Licence for the specific language governing rights and
- * limitations under the Licence.
- *
- * The Original Code is JFFS2 - Journalling Flash File System, version 2
- *
- * Alternatively, the contents of this file may be used under the
- * terms of the GNU General Public License version 2 (the "GPL"), in
- * which case the provisions of the GPL are applicable instead of the
- * above. If you wish to allow the use of your version of this file
- * only under the terms of the GPL and not to allow others to use your
- * version of this file under the RHEPL, indicate your decision by
- * deleting the provisions above and replace them with the notice and
- * other provisions required by the GPL. If you do not delete the
- * provisions above, a recipient may use your version of this file
- * under either the RHEPL or the GPL.
- *
- * $Id: nodelist.h,v 1.46.2.3 2002/10/11 09:04:44 dwmw2 Exp $
+ * $Id: nodelist.h,v 1.146 2005/11/18 07:27:45 forrest Exp $
*
*/
+#ifndef __JFFS2_NODELIST_H__
+#define __JFFS2_NODELIST_H__
+
#include <linux/config.h>
#include <linux/fs.h>
-
+#include <linux/types.h>
+#include <linux/jffs2.h>
#include <linux/jffs2_fs_sb.h>
#include <linux/jffs2_fs_i.h>
+#include "summary.h"
-#ifndef CONFIG_JFFS2_FS_DEBUG
-#define CONFIG_JFFS2_FS_DEBUG 2
-#endif
-
-#if CONFIG_JFFS2_FS_DEBUG > 0
-#define D1(x) x
+#ifdef __ECOS
+#include "os-ecos.h"
#else
-#define D1(x)
+#include <linux/mtd/compatmac.h> /* For compatibility with older kernels */
+#include "os-linux.h"
#endif
-#if CONFIG_JFFS2_FS_DEBUG > 1
-#define D2(x) x
+#define JFFS2_NATIVE_ENDIAN
+
+/* Note we handle mode bits conversion from JFFS2 (i.e. Linux) to/from
+ whatever OS we're actually running on here too. */
+
+#if defined(JFFS2_NATIVE_ENDIAN)
+#define cpu_to_je16(x) ((jint16_t){x})
+#define cpu_to_je32(x) ((jint32_t){x})
+#define cpu_to_jemode(x) ((jmode_t){os_to_jffs2_mode(x)})
+
+#define je16_to_cpu(x) ((x).v16)
+#define je32_to_cpu(x) ((x).v32)
+#define jemode_to_cpu(x) (jffs2_to_os_mode((x).m))
+#elif defined(JFFS2_BIG_ENDIAN)
+#define cpu_to_je16(x) ((jint16_t){cpu_to_be16(x)})
+#define cpu_to_je32(x) ((jint32_t){cpu_to_be32(x)})
+#define cpu_to_jemode(x) ((jmode_t){cpu_to_be32(os_to_jffs2_mode(x))})
+
+#define je16_to_cpu(x) (be16_to_cpu(x.v16))
+#define je32_to_cpu(x) (be32_to_cpu(x.v32))
+#define jemode_to_cpu(x) (be32_to_cpu(jffs2_to_os_mode((x).m)))
+#elif defined(JFFS2_LITTLE_ENDIAN)
+#define cpu_to_je16(x) ((jint16_t){cpu_to_le16(x)})
+#define cpu_to_je32(x) ((jint32_t){cpu_to_le32(x)})
+#define cpu_to_jemode(x) ((jmode_t){cpu_to_le32(os_to_jffs2_mode(x))})
+
+#define je16_to_cpu(x) (le16_to_cpu(x.v16))
+#define je32_to_cpu(x) (le32_to_cpu(x.v32))
+#define jemode_to_cpu(x) (le32_to_cpu(jffs2_to_os_mode((x).m)))
#else
-#define D2(x)
+#error wibble
#endif
+/* The minimal node header size */
+#define JFFS2_MIN_NODE_HEADER sizeof(struct jffs2_raw_dirent)
+
/*
This is all we need to keep in-core for each raw node during normal
operation. As and when we do read_inode on a particular inode, we can
- scan the nodes which are listed for it and build up a proper map of
+ scan the nodes which are listed for it and build up a proper map of
which nodes are currently valid. JFFSv1 always used to keep that whole
map in core for each inode.
*/
@@ -71,213 +79,294 @@ struct jffs2_raw_node_ref
for this inode instead. The inode_cache will have NULL in the first
word so you know when you've got there :) */
struct jffs2_raw_node_ref *next_phys;
- // __u32 ino;
- __u32 flash_offset;
- __u32 totlen;
-// __u16 nodetype;
-
- /* flash_offset & 3 always has to be zero, because nodes are
- always aligned at 4 bytes. So we have a couple of extra bits
- to play with. So we set the least significant bit to 1 to
- signify that the node is obsoleted by later nodes.
- */
+ uint32_t flash_offset;
+ uint32_t __totlen; /* This may die; use ref_totlen(c, jeb, ) below */
};
-/*
- Used for keeping track of deletion nodes &c, which can only be marked
- as obsolete when the node which they mark as deleted has actually been
- removed from the flash.
-*/
-struct jffs2_raw_node_ref_list {
- struct jffs2_raw_node_ref *rew;
- struct jffs2_raw_node_ref_list *next;
-};
+ /* flash_offset & 3 always has to be zero, because nodes are
+ always aligned at 4 bytes. So we have a couple of extra bits
+ to play with, which indicate the node's status; see below: */
+#define REF_UNCHECKED 0 /* We haven't yet checked the CRC or built its inode */
+#define REF_OBSOLETE 1 /* Obsolete, can be completely ignored */
+#define REF_PRISTINE 2 /* Completely clean. GC without looking */
+#define REF_NORMAL 3 /* Possibly overlapped. Read the page and write again on GC */
+#define ref_flags(ref) ((ref)->flash_offset & 3)
+#define ref_offset(ref) ((ref)->flash_offset & ~3)
+#define ref_obsolete(ref) (((ref)->flash_offset & 3) == REF_OBSOLETE)
+#define mark_ref_normal(ref) do { (ref)->flash_offset = ref_offset(ref) | REF_NORMAL; } while(0)
/* For each inode in the filesystem, we need to keep a record of
nlink, because it would be a PITA to scan the whole directory tree
at read_inode() time to calculate it, and to keep sufficient information
- in the raw_node_ref (basically both parent and child inode number for
+ in the raw_node_ref (basically both parent and child inode number for
dirent nodes) would take more space than this does. We also keep
a pointer to the first physical node which is part of this inode, too.
*/
struct jffs2_inode_cache {
- struct jffs2_scan_info *scan; /* Used during scan to hold
- temporary lists of nodes, and later must be set to
+ struct jffs2_full_dirent *scan_dents; /* Used during scan to hold
+ temporary lists of dirents, and later must be set to
NULL to mark the end of the raw_node_ref->next_in_ino
chain. */
struct jffs2_inode_cache *next;
struct jffs2_raw_node_ref *nodes;
- __u32 ino;
+ uint32_t ino;
int nlink;
+ int state;
};
-struct jffs2_scan_info {
- struct jffs2_full_dirent *dents;
- struct jffs2_tmp_dnode_info *tmpnodes;
-};
+/* Inode states for 'state' above. We need the 'GC' state to prevent
+ someone from doing a read_inode() while we're moving a 'REF_PRISTINE'
+ node without going through all the iget() nonsense */
+#define INO_STATE_UNCHECKED 0 /* CRC checks not yet done */
+#define INO_STATE_CHECKING 1 /* CRC checks in progress */
+#define INO_STATE_PRESENT 2 /* In core */
+#define INO_STATE_CHECKEDABSENT 3 /* Checked, cleared again */
+#define INO_STATE_GC 4 /* GCing a 'pristine' node */
+#define INO_STATE_READING 5 /* In read_inode() */
+#define INO_STATE_CLEARING 6 /* In clear_inode() */
+
+#define INOCACHE_HASHSIZE 128
+
/*
- Larger representation of a raw node, kept in-core only when the
+ Larger representation of a raw node, kept in-core only when the
struct inode for this particular ino is instantiated.
*/
struct jffs2_full_dnode
{
struct jffs2_raw_node_ref *raw;
- __u32 ofs; /* Don't really need this, but optimisation */
- __u32 size;
- __u32 frags; /* Number of fragments which currently refer
- to this node. When this reaches zero,
- the node is obsolete.
- */
+ uint32_t ofs; /* The offset to which the data of this node belongs */
+ uint32_t size;
+ uint32_t frags; /* Number of fragments which currently refer
+ to this node. When this reaches zero,
+ the node is obsolete. */
};
-/*
+/*
Even larger representation of a raw node, kept in-core only while
we're actually building up the original map of which nodes go where,
in read_inode()
*/
struct jffs2_tmp_dnode_info
{
- struct jffs2_tmp_dnode_info *next;
+ struct rb_node rb;
struct jffs2_full_dnode *fn;
- __u32 version;
-};
+ uint32_t version;
+ uint32_t data_crc;
+ uint32_t partial_crc;
+ uint32_t csize;
+};
struct jffs2_full_dirent
{
struct jffs2_raw_node_ref *raw;
struct jffs2_full_dirent *next;
- __u32 version;
- __u32 ino; /* == zero for unlink */
+ uint32_t version;
+ uint32_t ino; /* == zero for unlink */
unsigned int nhash;
unsigned char type;
unsigned char name[0];
};
+
/*
- Fragments - used to build a map of which raw node to obtain
+ Fragments - used to build a map of which raw node to obtain
data from for each part of the ino
*/
struct jffs2_node_frag
{
- struct jffs2_node_frag *next;
+ struct rb_node rb;
struct jffs2_full_dnode *node; /* NULL for holes */
- __u32 size;
- __u32 ofs; /* Don't really need this, but optimisation */
+ uint32_t size;
+ uint32_t ofs; /* The offset to which this fragment belongs */
};
struct jffs2_eraseblock
{
struct list_head list;
- int bad_count;
- __u32 offset; /* of this block in the MTD */
-
- __u32 used_size;
- __u32 dirty_size;
- __u32 free_size; /* Note that sector_size - free_size
+ struct list_head hash_list;
+ uint16_t bad_count;
+ uint16_t flags;
+ uint32_t offset; /* of this block in the MTD */
+
+ uint32_t unchecked_size;
+ uint32_t used_size;
+ uint32_t dirty_size;
+ uint32_t wasted_size;
+ uint32_t free_size; /* Note that sector_size - free_size
is the address of the first free space */
struct jffs2_raw_node_ref *first_node;
struct jffs2_raw_node_ref *last_node;
struct jffs2_raw_node_ref *gc_node; /* Next node to be garbage collected */
- /* For deletia. When a dirent node in this eraseblock is
- deleted by a node elsewhere, that other node can only
- be marked as obsolete when this block is actually erased.
- So we keep a list of the nodes to mark as obsolete when
- the erase is completed.
- */
- // MAYBE struct jffs2_raw_node_ref_list *deletia;
+ uint32_t erase_count;
};
-#define ACCT_SANITY_CHECK(c, jeb) do { \
- if (jeb->used_size + jeb->dirty_size + jeb->free_size != c->sector_size) { \
- printk(KERN_NOTICE "Eeep. Space accounting for block at 0x%08x is screwed\n", jeb->offset); \
- printk(KERN_NOTICE "free 0x%08x + dirty 0x%08x + used %08x != total %08x\n", \
- jeb->free_size, jeb->dirty_size, jeb->used_size, c->sector_size); \
- BUG(); \
- } \
- if (c->used_size + c->dirty_size + c->free_size + c->erasing_size + c->bad_size != c->flash_size) { \
- printk(KERN_NOTICE "Eeep. Space accounting superblock info is screwed\n"); \
- printk(KERN_NOTICE "free 0x%08x + dirty 0x%08x + used %08x + erasing %08x + bad %08x != total %08x\n", \
- c->free_size, c->dirty_size, c->used_size, c->erasing_size, c->bad_size, c->flash_size); \
- BUG(); \
- } \
-} while(0)
-
-#define ACCT_PARANOIA_CHECK(jeb) do { \
- __u32 my_used_size = 0; \
- struct jffs2_raw_node_ref *ref2 = jeb->first_node; \
- while (ref2) { \
- if (!(ref2->flash_offset & 1)) \
- my_used_size += ref2->totlen; \
- ref2 = ref2->next_phys; \
- } \
- if (my_used_size != jeb->used_size) { \
- printk(KERN_NOTICE "Calculated used size %08x != stored used size %08x\n", my_used_size, jeb->used_size); \
- BUG(); \
- } \
- } while(0)
+#define EBFLAGS_SET_EBH(jeb) (jeb->flags |= 1)
+#define EBFLAGS_CLR_EBH(jeb) (jeb->flags &= ~1)
+#define EBFLAGS_HAS_EBH(jeb) ((jeb->flags & 1) == 1)
+
+/* Calculate totlen from surrounding nodes or eraseblock */
+static inline uint32_t __ref_totlen(struct jffs2_sb_info *c,
+ struct jffs2_eraseblock *jeb,
+ struct jffs2_raw_node_ref *ref)
+{
+ uint32_t ref_end;
+
+ if (ref->next_phys)
+ ref_end = ref_offset(ref->next_phys);
+ else {
+ if (!jeb)
+ jeb = c->blocks[ref->flash_offset / c->sector_size];
+
+ /* Last node in block. Use free_space */
+ BUG_ON(ref != jeb->last_node);
+ ref_end = jeb->offset + c->sector_size - jeb->free_size;
+ }
+ return ref_end - ref_offset(ref);
+}
+
+static inline uint32_t ref_totlen(struct jffs2_sb_info *c,
+ struct jffs2_eraseblock *jeb,
+ struct jffs2_raw_node_ref *ref)
+{
+ uint32_t ret;
+
+#if CONFIG_JFFS2_FS_DEBUG > 0
+ if (jeb && jeb != c->blocks[ref->flash_offset / c->sector_size]) {
+ printk(KERN_CRIT "ref_totlen called with wrong block -- at 0x%08x instead of 0x%08x; ref 0x%08x\n",
+ jeb->offset, c->blocks[ref->flash_offset / c->sector_size]->offset, ref_offset(ref));
+ BUG();
+ }
+#endif
+
+#if 1
+ ret = ref->__totlen;
+#else
+ /* This doesn't actually work yet */
+ ret = __ref_totlen(c, jeb, ref);
+ if (ret != ref->__totlen) {
+ printk(KERN_CRIT "Totlen for ref at %p (0x%08x-0x%08x) miscalculated as 0x%x instead of %x\n",
+ ref, ref_offset(ref), ref_offset(ref)+ref->__totlen,
+ ret, ref->__totlen);
+ if (!jeb)
+ jeb = c->blocks[ref->flash_offset / c->sector_size];
+ jffs2_dbg_dump_node_refs_nolock(c, jeb);
+ BUG();
+ }
+#endif
+ return ret;
+}
#define ALLOC_NORMAL 0 /* Normal allocation */
#define ALLOC_DELETION 1 /* Deletion node. Best to allow it */
#define ALLOC_GC 2 /* Space requested for GC. Give it or die */
+#define ALLOC_NORETRY 3 /* For jffs2_write_dnode: On failure, return -EAGAIN instead of retrying */
-#define JFFS2_RESERVED_BLOCKS_BASE 3 /* Number of free blocks there must be before we... */
-#define JFFS2_RESERVED_BLOCKS_WRITE (JFFS2_RESERVED_BLOCKS_BASE + 2) /* ... allow a normal filesystem write */
-#define JFFS2_RESERVED_BLOCKS_DELETION (JFFS2_RESERVED_BLOCKS_BASE + 1) /* ... allow a normal filesystem deletion */
-#define JFFS2_RESERVED_BLOCKS_GCTRIGGER (JFFS2_RESERVED_BLOCKS_BASE + 3) /* ... wake up the GC thread */
-#define JFFS2_RESERVED_BLOCKS_GCBAD (JFFS2_RESERVED_BLOCKS_BASE + 1) /* ... pick a block from the bad_list to GC */
-#define JFFS2_RESERVED_BLOCKS_GCMERGE (JFFS2_RESERVED_BLOCKS_BASE) /* ... merge pages when garbage collecting */
+/* How much dirty space before it goes on the very_dirty_list */
+#define VERYDIRTY(c, size) ((size) >= ((c)->sector_size / 2))
+/* check if dirty space is more than 255 Byte */
+#define ISDIRTY(size) ((size) > sizeof (struct jffs2_raw_inode) + JFFS2_MIN_DATA_LEN)
#define PAD(x) (((x)+3)&~3)
-static inline int jffs2_raw_ref_to_inum(struct jffs2_raw_node_ref *raw)
+static inline struct jffs2_inode_cache *jffs2_raw_ref_to_ic(struct jffs2_raw_node_ref *raw)
{
while(raw->next_in_ino) {
raw = raw->next_in_ino;
}
- return ((struct jffs2_inode_cache *)raw)->ino;
+ return ((struct jffs2_inode_cache *)raw);
}
+static inline struct jffs2_node_frag *frag_first(struct rb_root *root)
+{
+ struct rb_node *node = root->rb_node;
+
+ if (!node)
+ return NULL;
+ while(node->rb_left)
+ node = node->rb_left;
+ return rb_entry(node, struct jffs2_node_frag, rb);
+}
+
+static inline struct jffs2_node_frag *frag_last(struct rb_root *root)
+{
+ struct rb_node *node = root->rb_node;
+
+ if (!node)
+ return NULL;
+ while(node->rb_right)
+ node = node->rb_right;
+ return rb_entry(node, struct jffs2_node_frag, rb);
+}
+
+static inline void record_erase_count(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb)
+{
+ c->total_erase_count += jeb->erase_count;
+ c->nr_blocks_with_ebh++;
+ if (jeb->erase_count > c->max_erase_count) {
+ c->max_erase_count = jeb->erase_count;
+ }
+}
+
+#define rb_parent(rb) ((rb)->rb_parent)
+#define frag_next(frag) rb_entry(rb_next(&(frag)->rb), struct jffs2_node_frag, rb)
+#define frag_prev(frag) rb_entry(rb_prev(&(frag)->rb), struct jffs2_node_frag, rb)
+#define frag_parent(frag) rb_entry(rb_parent(&(frag)->rb), struct jffs2_node_frag, rb)
+#define frag_left(frag) rb_entry((frag)->rb.rb_left, struct jffs2_node_frag, rb)
+#define frag_right(frag) rb_entry((frag)->rb.rb_right, struct jffs2_node_frag, rb)
+#define frag_erase(frag, list) rb_erase(&frag->rb, list);
+
/* nodelist.c */
-D1(void jffs2_print_frag_list(struct jffs2_inode_info *f));
void jffs2_add_fd_to_list(struct jffs2_sb_info *c, struct jffs2_full_dirent *new, struct jffs2_full_dirent **list);
-void jffs2_add_tn_to_list(struct jffs2_tmp_dnode_info *tn, struct jffs2_tmp_dnode_info **list);
-int jffs2_get_inode_nodes(struct jffs2_sb_info *c, ino_t ino, struct jffs2_inode_info *f,
- struct jffs2_tmp_dnode_info **tnp, struct jffs2_full_dirent **fdp,
- __u32 *highest_version, __u32 *latest_mctime,
- __u32 *mctime_ver);
-struct jffs2_inode_cache *jffs2_get_ino_cache(struct jffs2_sb_info *c, int ino);
+void jffs2_set_inocache_state(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic, int state);
+struct jffs2_inode_cache *jffs2_get_ino_cache(struct jffs2_sb_info *c, uint32_t ino);
void jffs2_add_ino_cache (struct jffs2_sb_info *c, struct jffs2_inode_cache *new);
void jffs2_del_ino_cache(struct jffs2_sb_info *c, struct jffs2_inode_cache *old);
void jffs2_free_ino_caches(struct jffs2_sb_info *c);
void jffs2_free_raw_node_refs(struct jffs2_sb_info *c);
+struct jffs2_node_frag *jffs2_lookup_node_frag(struct rb_root *fragtree, uint32_t offset);
+void jffs2_kill_fragtree(struct rb_root *root, struct jffs2_sb_info *c_delete);
+struct rb_node *rb_next(struct rb_node *);
+struct rb_node *rb_prev(struct rb_node *);
+void rb_replace_node(struct rb_node *victim, struct rb_node *new, struct rb_root *root);
+void jffs2_obsolete_node_frag(struct jffs2_sb_info *c, struct jffs2_node_frag *this);
+int jffs2_add_full_dnode_to_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, struct jffs2_full_dnode *fn);
+void jffs2_truncate_fragtree (struct jffs2_sb_info *c, struct rb_root *list, uint32_t size);
+int jffs2_add_older_frag_to_fragtree(struct jffs2_sb_info *c, struct jffs2_inode_info *f, struct jffs2_tmp_dnode_info *tn);
/* nodemgmt.c */
-int jffs2_reserve_space(struct jffs2_sb_info *c, __u32 minsize, __u32 *ofs, __u32 *len, int prio);
-int jffs2_reserve_space_gc(struct jffs2_sb_info *c, __u32 minsize, __u32 *ofs, __u32 *len);
-int jffs2_add_physical_node_ref(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *new, __u32 len, int dirty);
+int jffs2_thread_should_wake(struct jffs2_sb_info *c);
+int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *ofs,
+ uint32_t *len, int prio, uint32_t sumsize);
+int jffs2_reserve_space_gc(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *ofs,
+ uint32_t *len, uint32_t sumsize);
+int jffs2_add_physical_node_ref(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *new);
void jffs2_complete_reservation(struct jffs2_sb_info *c);
void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *raw);
/* write.c */
-struct inode *jffs2_new_inode (struct inode *dir_i, int mode, struct jffs2_raw_inode *ri);
-struct jffs2_full_dnode *jffs2_write_dnode(struct inode *inode, struct jffs2_raw_inode *ri, const unsigned char *data, __u32 datalen, __u32 flash_ofs, __u32 *writelen);
-struct jffs2_full_dirent *jffs2_write_dirent(struct inode *inode, struct jffs2_raw_dirent *rd, const unsigned char *name, __u32 namelen, __u32 flash_ofs, __u32 *writelen);
+int jffs2_do_new_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, uint32_t mode, struct jffs2_raw_inode *ri);
+
+struct jffs2_full_dnode *jffs2_write_dnode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, struct jffs2_raw_inode *ri, const unsigned char *data, uint32_t datalen, uint32_t flash_ofs, int alloc_mode);
+struct jffs2_full_dirent *jffs2_write_dirent(struct jffs2_sb_info *c, struct jffs2_inode_info *f, struct jffs2_raw_dirent *rd, const unsigned char *name, uint32_t namelen, uint32_t flash_ofs, int alloc_mode);
+int jffs2_write_inode_range(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
+ struct jffs2_raw_inode *ri, unsigned char *buf,
+ uint32_t offset, uint32_t writelen, uint32_t *retlen);
+int jffs2_do_create(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, struct jffs2_inode_info *f, struct jffs2_raw_inode *ri, const char *name, int namelen);
+int jffs2_do_unlink(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, const char *name, int namelen, struct jffs2_inode_info *dead_f, uint32_t time);
+int jffs2_do_link (struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, uint32_t ino, uint8_t type, const char *name, int namelen, uint32_t time);
+
/* readinode.c */
-void jffs2_truncate_fraglist (struct jffs2_sb_info *c, struct jffs2_node_frag **list, __u32 size);
-int jffs2_add_full_dnode_to_fraglist(struct jffs2_sb_info *c, struct jffs2_node_frag **list, struct jffs2_full_dnode *fn);
-int jffs2_add_full_dnode_to_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, struct jffs2_full_dnode *fn);
-void jffs2_read_inode (struct inode *);
-void jffs2_clear_inode (struct inode *);
+int jffs2_do_read_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
+ uint32_t ino, struct jffs2_raw_inode *latest_node);
+int jffs2_do_crccheck_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic);
+void jffs2_do_clear_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f);
/* malloc.c */
-void jffs2_free_tmp_dnode_info_list(struct jffs2_tmp_dnode_info *tn);
-void jffs2_free_full_dirent_list(struct jffs2_full_dirent *fd);
-
int jffs2_create_slab_caches(void);
void jffs2_destroy_slab_caches(void);
@@ -297,58 +386,45 @@ struct jffs2_node_frag *jffs2_alloc_node_frag(void);
void jffs2_free_node_frag(struct jffs2_node_frag *);
struct jffs2_inode_cache *jffs2_alloc_inode_cache(void);
void jffs2_free_inode_cache(struct jffs2_inode_cache *);
-
+int jffs2_alloc_eraseblocks(struct jffs2_sb_info *c);
+void jffs2_free_eraseblocks(struct jffs2_sb_info *c);
/* gc.c */
int jffs2_garbage_collect_pass(struct jffs2_sb_info *c);
-/* background.c */
-int jffs2_start_garbage_collect_thread(struct jffs2_sb_info *c);
-void jffs2_stop_garbage_collect_thread(struct jffs2_sb_info *c);
-void jffs2_garbage_collect_trigger(struct jffs2_sb_info *c);
-
-/* dir.c */
-extern struct file_operations jffs2_dir_operations;
-extern struct inode_operations jffs2_dir_inode_operations;
-
-/* file.c */
-extern struct file_operations jffs2_file_operations;
-extern struct inode_operations jffs2_file_inode_operations;
-extern struct address_space_operations jffs2_file_address_operations;
-int jffs2_null_fsync(struct file *, struct dentry *, int);
-int jffs2_setattr (struct dentry *dentry, struct iattr *iattr);
-int jffs2_do_readpage_nolock (struct inode *inode, struct page *pg);
-int jffs2_do_readpage_unlock (struct inode *inode, struct page *pg);
-int jffs2_readpage (struct file *, struct page *);
-int jffs2_prepare_write (struct file *, struct page *, unsigned, unsigned);
-int jffs2_commit_write (struct file *, struct page *, unsigned, unsigned);
-
-/* ioctl.c */
-int jffs2_ioctl(struct inode *, struct file *, unsigned int, unsigned long);
-
/* read.c */
-int jffs2_read_dnode(struct jffs2_sb_info *c, struct jffs2_full_dnode *fd, unsigned char *buf, int ofs, int len);
-
-/* compr.c */
-unsigned char jffs2_compress(unsigned char *data_in, unsigned char *cpage_out,
- __u32 *datalen, __u32 *cdatalen);
-int jffs2_decompress(unsigned char comprtype, unsigned char *cdata_in,
- unsigned char *data_out, __u32 cdatalen, __u32 datalen);
+int jffs2_read_dnode(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
+ struct jffs2_full_dnode *fd, unsigned char *buf,
+ int ofs, int len);
+int jffs2_read_inode_range(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
+ unsigned char *buf, uint32_t offset, uint32_t len);
+char *jffs2_getlink(struct jffs2_sb_info *c, struct jffs2_inode_info *f);
/* scan.c */
int jffs2_scan_medium(struct jffs2_sb_info *c);
+void jffs2_rotate_lists(struct jffs2_sb_info *c);
+struct jffs2_inode_cache *jffs2_scan_make_ino_cache(struct jffs2_sb_info *c, uint32_t ino);
+int jffs2_scan_classify_jeb(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb);
/* build.c */
-int jffs2_build_filesystem(struct jffs2_sb_info *c);
-
-/* symlink.c */
-extern struct inode_operations jffs2_symlink_inode_operations;
+int jffs2_do_mount_fs(struct jffs2_sb_info *c);
/* erase.c */
-void jffs2_erase_block(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb);
-void jffs2_erase_pending_blocks(struct jffs2_sb_info *c);
-void jffs2_mark_erased_blocks(struct jffs2_sb_info *c);
-void jffs2_erase_pending_trigger(struct jffs2_sb_info *c);
-
-/* compr_zlib.c */
-int jffs2_zlib_init(void);
-void jffs2_zlib_exit(void);
+void jffs2_erase_pending_blocks(struct jffs2_sb_info *c, int count);
+
+#ifdef CONFIG_JFFS2_FS_WRITEBUFFER
+/* wbuf.c */
+int jffs2_flush_wbuf_gc(struct jffs2_sb_info *c, uint32_t ino);
+int jffs2_flush_wbuf_pad(struct jffs2_sb_info *c);
+int jffs2_check_nand_cleanmarker_ebh(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, uint32_t *data_len);
+int jffs2_write_nand_ebh(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb);
+#endif
+
+/* wear_leveling.c */
+void jffs2_add_to_hash_table(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, uint8_t flag);
+void jffs2_remove_from_hash_table(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, uint8_t flag);
+struct jffs2_eraseblock *jffs2_get_free_block(struct jffs2_sb_info *c);
+struct jffs2_eraseblock *jffs2_get_used_block(struct jffs2_sb_info *c);
+
+#include "debug.h"
+
+#endif /* __JFFS2_NODELIST_H__ */
diff --git a/linux-2.4.x/fs/jffs2/nodemgmt.c b/linux-2.4.x/fs/jffs2/nodemgmt.c
index 066decf..4687663 100644
--- a/linux-2.4.x/fs/jffs2/nodemgmt.c
+++ b/linux-2.4.x/fs/jffs2/nodemgmt.c
@@ -1,46 +1,23 @@
/*
* JFFS2 -- Journalling Flash File System, Version 2.
*
- * Copyright (C) 2001 Red Hat, Inc.
+ * Copyright (C) 2001-2003 Red Hat, Inc.
*
- * Created by David Woodhouse <dwmw2@cambridge.redhat.com>
+ * Created by David Woodhouse <dwmw2@infradead.org>
*
- * The original JFFS, from which the design for JFFS2 was derived,
- * was designed and implemented by Axis Communications AB.
+ * For licensing information, see the file 'LICENCE' in this directory.
*
- * The contents of this file are subject to the Red Hat eCos Public
- * License Version 1.1 (the "Licence"); you may not use this file
- * except in compliance with the Licence. You may obtain a copy of
- * the Licence at http://www.redhat.com/
- *
- * Software distributed under the Licence is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied.
- * See the Licence for the specific language governing rights and
- * limitations under the Licence.
- *
- * The Original Code is JFFS2 - Journalling Flash File System, version 2
- *
- * Alternatively, the contents of this file may be used under the
- * terms of the GNU General Public License version 2 (the "GPL"), in
- * which case the provisions of the GPL are applicable instead of the
- * above. If you wish to allow the use of your version of this file
- * only under the terms of the GPL and not to allow others to use your
- * version of this file under the RHEPL, indicate your decision by
- * deleting the provisions above and replace them with the notice and
- * other provisions required by the GPL. If you do not delete the
- * provisions above, a recipient may use your version of this file
- * under either the RHEPL or the GPL.
- *
- * $Id: nodemgmt.c,v 1.45.2.1 2002/02/23 14:13:34 dwmw2 Exp $
+ * $Id: nodemgmt.c,v 1.131 2005/11/18 07:27:45 forrest Exp $
*
*/
#include <linux/kernel.h>
#include <linux/slab.h>
-#include <linux/jffs2.h>
#include <linux/mtd/mtd.h>
-#include <linux/interrupt.h>
+#include <linux/compiler.h>
+#include <linux/sched.h> /* For cond_resched() */
#include "nodelist.h"
+#include "debug.h"
/**
* jffs2_reserve_space - request physical space to write nodes to flash
@@ -62,146 +39,332 @@
* for the requested allocation.
*/
-static int jffs2_do_reserve_space(struct jffs2_sb_info *c, __u32 minsize, __u32 *ofs, __u32 *len);
+static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize,
+ uint32_t *ofs, uint32_t *len, uint32_t sumsize);
-int jffs2_reserve_space(struct jffs2_sb_info *c, __u32 minsize, __u32 *ofs, __u32 *len, int prio)
+int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *ofs,
+ uint32_t *len, int prio, uint32_t sumsize)
{
int ret = -EAGAIN;
- int blocksneeded = JFFS2_RESERVED_BLOCKS_WRITE;
+ int blocksneeded = c->resv_blocks_write;
/* align it */
minsize = PAD(minsize);
- if (prio == ALLOC_DELETION)
- blocksneeded = JFFS2_RESERVED_BLOCKS_DELETION;
-
D1(printk(KERN_DEBUG "jffs2_reserve_space(): Requested 0x%x bytes\n", minsize));
down(&c->alloc_sem);
D1(printk(KERN_DEBUG "jffs2_reserve_space(): alloc sem got\n"));
- spin_lock_bh(&c->erase_completion_lock);
+ spin_lock(&c->erase_completion_lock);
- /* this needs a little more thought */
+ /* this needs a little more thought (true <tglx> :)) */
while(ret == -EAGAIN) {
while(c->nr_free_blocks + c->nr_erasing_blocks < blocksneeded) {
int ret;
+ uint32_t dirty, avail;
+
+ /* calculate real dirty size
+ * dirty_size contains blocks on erase_pending_list
+ * those blocks are counted in c->nr_erasing_blocks.
+ * If one block is actually erased, it is not longer counted as dirty_space
+ * but it is counted in c->nr_erasing_blocks, so we add it and subtract it
+ * with c->nr_erasing_blocks * c->sector_size again.
+ * Blocks on erasable_list are counted as dirty_size, but not in c->nr_erasing_blocks
+ * This helps us to force gc and pick eventually a clean block to spread the load.
+ * We add unchecked_size here, as we hopefully will find some space to use.
+ * This will affect the sum only once, as gc first finishes checking
+ * of nodes.
+ */
+ dirty = c->dirty_size + c->erasing_size - c->nr_erasing_blocks * c->sector_size + c->unchecked_size;
+ if (dirty < c->nospc_dirty_size) {
+ if (prio == ALLOC_DELETION && c->nr_free_blocks + c->nr_erasing_blocks >= c->resv_blocks_deletion) {
+ D1(printk(KERN_NOTICE "jffs2_reserve_space(): Low on dirty space to GC, but it's a deletion. Allowing...\n"));
+ break;
+ }
+ D1(printk(KERN_DEBUG "dirty size 0x%08x + unchecked_size 0x%08x < nospc_dirty_size 0x%08x, returning -ENOSPC\n",
+ dirty, c->unchecked_size, c->sector_size));
+
+ spin_unlock(&c->erase_completion_lock);
+ up(&c->alloc_sem);
+ return -ENOSPC;
+ }
- up(&c->alloc_sem);
- if (c->dirty_size < c->sector_size) {
- D1(printk(KERN_DEBUG "Short on space, but total dirty size 0x%08x < sector size 0x%08x, so -ENOSPC\n", c->dirty_size, c->sector_size));
- spin_unlock_bh(&c->erase_completion_lock);
+ /* Calc possibly available space. Possibly available means that we
+ * don't know, if unchecked size contains obsoleted nodes, which could give us some
+ * more usable space. This will affect the sum only once, as gc first finishes checking
+ * of nodes.
+ + Return -ENOSPC, if the maximum possibly available space is less or equal than
+ * blocksneeded * sector_size.
+ * This blocks endless gc looping on a filesystem, which is nearly full, even if
+ * the check above passes.
+ */
+ avail = c->free_size + c->dirty_size + c->erasing_size + c->unchecked_size;
+ if ( (avail / c->sector_size) <= blocksneeded) {
+ if (prio == ALLOC_DELETION && c->nr_free_blocks + c->nr_erasing_blocks >= c->resv_blocks_deletion) {
+ D1(printk(KERN_NOTICE "jffs2_reserve_space(): Low on possibly available space, but it's a deletion. Allowing...\n"));
+ break;
+ }
+
+ D1(printk(KERN_DEBUG "max. available size 0x%08x < blocksneeded * sector_size 0x%08x, returning -ENOSPC\n",
+ avail, blocksneeded * c->sector_size));
+ spin_unlock(&c->erase_completion_lock);
+ up(&c->alloc_sem);
return -ENOSPC;
}
- D1(printk(KERN_DEBUG "Triggering GC pass. nr_free_blocks %d, nr_erasing_blocks %d, free_size 0x%08x, dirty_size 0x%08x, used_size 0x%08x, erasing_size 0x%08x, bad_size 0x%08x (total 0x%08x of 0x%08x)\n",
- c->nr_free_blocks, c->nr_erasing_blocks, c->free_size, c->dirty_size, c->used_size, c->erasing_size, c->bad_size,
- c->free_size + c->dirty_size + c->used_size + c->erasing_size + c->bad_size, c->flash_size));
- spin_unlock_bh(&c->erase_completion_lock);
-
+
+ up(&c->alloc_sem);
+
+ D1(printk(KERN_DEBUG "Triggering GC pass. nr_free_blocks %d, nr_erasing_blocks %d, free_size 0x%08x, dirty_size 0x%08x, wasted_size 0x%08x, used_size 0x%08x, erasing_size 0x%08x, bad_size 0x%08x (total 0x%08x of 0x%08x)\n",
+ c->nr_free_blocks, c->nr_erasing_blocks, c->free_size, c->dirty_size, c->wasted_size, c->used_size, c->erasing_size, c->bad_size,
+ c->free_size + c->dirty_size + c->wasted_size + c->used_size + c->erasing_size + c->bad_size, c->flash_size));
+ spin_unlock(&c->erase_completion_lock);
+
ret = jffs2_garbage_collect_pass(c);
if (ret)
return ret;
- if (current->need_resched)
- schedule();
+ cond_resched();
if (signal_pending(current))
return -EINTR;
down(&c->alloc_sem);
- spin_lock_bh(&c->erase_completion_lock);
+ spin_lock(&c->erase_completion_lock);
}
- ret = jffs2_do_reserve_space(c, minsize, ofs, len);
+ ret = jffs2_do_reserve_space(c, minsize, ofs, len, sumsize);
if (ret) {
D1(printk(KERN_DEBUG "jffs2_reserve_space: ret is %d\n", ret));
}
}
- spin_unlock_bh(&c->erase_completion_lock);
+ spin_unlock(&c->erase_completion_lock);
if (ret)
up(&c->alloc_sem);
return ret;
}
-int jffs2_reserve_space_gc(struct jffs2_sb_info *c, __u32 minsize, __u32 *ofs, __u32 *len)
+int jffs2_reserve_space_gc(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *ofs,
+ uint32_t *len, uint32_t sumsize)
{
int ret = -EAGAIN;
minsize = PAD(minsize);
D1(printk(KERN_DEBUG "jffs2_reserve_space_gc(): Requested 0x%x bytes\n", minsize));
- spin_lock_bh(&c->erase_completion_lock);
+ spin_lock(&c->erase_completion_lock);
while(ret == -EAGAIN) {
- ret = jffs2_do_reserve_space(c, minsize, ofs, len);
+ ret = jffs2_do_reserve_space(c, minsize, ofs, len, sumsize);
if (ret) {
D1(printk(KERN_DEBUG "jffs2_reserve_space_gc: looping, ret is %d\n", ret));
}
}
- spin_unlock_bh(&c->erase_completion_lock);
+ spin_unlock(&c->erase_completion_lock);
return ret;
}
+
+/* Classify nextblock (clean, dirty of verydirty) and force to select an other one */
+
+static void jffs2_close_nextblock(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb)
+{
+
+ /* Check, if we have a dirty block now, or if it was dirty already */
+ if (ISDIRTY (jeb->wasted_size + jeb->dirty_size)) {
+ c->dirty_size += jeb->wasted_size;
+ c->wasted_size -= jeb->wasted_size;
+ jeb->dirty_size += jeb->wasted_size;
+ jeb->wasted_size = 0;
+ if (VERYDIRTY(c, jeb->dirty_size)) {
+ D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to very_dirty_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
+ jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
+ list_add_tail(&jeb->list, &c->very_dirty_list);
+ } else {
+ D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to dirty_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
+ jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
+ list_add_tail(&jeb->list, &c->dirty_list);
+ }
+ } else {
+ D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to clean_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
+ jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
+ list_add_tail(&jeb->list, &c->clean_list);
+ }
+ jffs2_add_to_hash_table(c, jeb, 1);
+ c->nextblock = NULL;
+
+}
+
+/* Select a new jeb for nextblock */
+
+static int jffs2_find_nextblock(struct jffs2_sb_info *c)
+{
+
+ /* Take the next block off the 'free' list */
+
+ if (list_empty(&c->free_list)) {
+
+ if (!c->nr_erasing_blocks &&
+ !list_empty(&c->erasable_list)) {
+ struct jffs2_eraseblock *ejeb;
+
+ ejeb = list_entry(c->erasable_list.next, struct jffs2_eraseblock, list);
+ list_del(&ejeb->list);
+ list_add_tail(&ejeb->list, &c->erase_pending_list);
+ c->nr_erasing_blocks++;
+ jffs2_erase_pending_trigger(c);
+ D1(printk(KERN_DEBUG "jffs2_find_nextblock: Triggering erase of erasable block at 0x%08x\n",
+ ejeb->offset));
+ }
+
+ if (!c->nr_erasing_blocks &&
+ !list_empty(&c->erasable_pending_wbuf_list)) {
+ D1(printk(KERN_DEBUG "jffs2_find_nextblock: Flushing write buffer\n"));
+ /* c->nextblock is NULL, no update to c->nextblock allowed */
+ spin_unlock(&c->erase_completion_lock);
+ jffs2_flush_wbuf_pad(c);
+ spin_lock(&c->erase_completion_lock);
+ /* Have another go. It'll be on the erasable_list now */
+ return -EAGAIN;
+ }
+
+ if (!c->nr_erasing_blocks) {
+ /* Ouch. We're in GC, or we wouldn't have got here.
+ And there's no space left. At all. */
+ printk(KERN_CRIT "Argh. No free space left for GC. nr_erasing_blocks is %d. nr_free_blocks is %d. (erasableempty: %s, erasingempty: %s, erasependingempty: %s)\n",
+ c->nr_erasing_blocks, c->nr_free_blocks, list_empty(&c->erasable_list)?"yes":"no",
+ list_empty(&c->erasing_list)?"yes":"no", list_empty(&c->erase_pending_list)?"yes":"no");
+ return -ENOSPC;
+ }
+
+ spin_unlock(&c->erase_completion_lock);
+ /* Don't wait for it; just erase one right now */
+ jffs2_erase_pending_blocks(c, 1);
+ spin_lock(&c->erase_completion_lock);
+
+ /* An erase may have failed, decreasing the
+ amount of free space available. So we must
+ restart from the beginning */
+ return -EAGAIN;
+ }
+
+ c->nextblock = jffs2_get_free_block(c);
+
+ jffs2_sum_reset_collected(c->summary); /* reset collected summary */
+
+ D1(printk(KERN_DEBUG "jffs2_find_nextblock(): new nextblock = 0x%08x\n", c->nextblock->offset));
+
+ return 0;
+}
+
+/* To check if eraseblock has expected free size */
+static int has_expected_free_size(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb)
+{
+ if (!EBFLAGS_HAS_EBH(jeb) && jeb->free_size != c->sector_size - c->cleanmarker_size)
+ return 0;
+
+ if (EBFLAGS_HAS_EBH(jeb) && c->ebh_size && jeb->free_size != c->sector_size - ref_totlen(c, jeb, jeb->first_node))
+ return 0;
+
+ if (EBFLAGS_HAS_EBH(jeb) && !c->ebh_size && jeb->free_size != c->sector_size)
+ return 0;
+
+ return 1;
+}
+
+static int need_mark_first_node_obsolete(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb)
+{
+ if (!EBFLAGS_HAS_EBH(jeb) && c->cleanmarker_size
+ && jeb->used_size == c->cleanmarker_size && !jeb->first_node->next_in_ino)
+ return 1;
+ return 0;
+}
+
+
/* Called with alloc sem _and_ erase_completion_lock */
-static int jffs2_do_reserve_space(struct jffs2_sb_info *c, __u32 minsize, __u32 *ofs, __u32 *len)
+static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *ofs, uint32_t *len, uint32_t sumsize)
{
struct jffs2_eraseblock *jeb = c->nextblock;
-
+ uint32_t reserved_size; /* for summary information at the end of the jeb */
+ int ret;
+
restart:
- if (jeb && minsize > jeb->free_size) {
- /* Skip the end of this block and file it as having some dirty space */
- c->dirty_size += jeb->free_size;
- c->free_size -= jeb->free_size;
- jeb->dirty_size += jeb->free_size;
- jeb->free_size = 0;
- D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to dirty_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
- jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
- list_add_tail(&jeb->list, &c->dirty_list);
- c->nextblock = jeb = NULL;
- }
-
- if (!jeb) {
- struct list_head *next;
- /* Take the next block off the 'free' list */
-
- if (list_empty(&c->free_list)) {
-
- DECLARE_WAITQUEUE(wait, current);
-
- if (!c->nr_erasing_blocks) {
-// if (list_empty(&c->erasing_list) && list_empty(&c->erase_pending_list) && list_empty(c->erase_complete_list)) {
- /* Ouch. We're in GC, or we wouldn't have got here.
- And there's no space left. At all. */
- printk(KERN_CRIT "Argh. No free space left for GC. nr_erasing_blocks is %d. nr_free_blocks is %d. (erasingempty: %s, erasependingempty: %s)\n",
- c->nr_erasing_blocks, c->nr_free_blocks, list_empty(&c->erasing_list)?"yes":"no", list_empty(&c->erase_pending_list)?"yes":"no");
- return -ENOSPC;
+ reserved_size = 0;
+
+ if (jffs2_sum_active() && (sumsize != JFFS2_SUMMARY_NOSUM_SIZE)) {
+ /* NOSUM_SIZE means not to generate summary */
+
+ if (jeb) {
+ reserved_size = PAD(sumsize + c->summary->sum_size + JFFS2_SUMMARY_FRAME_SIZE);
+ dbg_summary("minsize=%d , jeb->free=%d ,"
+ "summary->size=%d , sumsize=%d\n",
+ minsize, jeb->free_size,
+ c->summary->sum_size, sumsize);
+ }
+
+ /* Is there enough space for writing out the current node, or we have to
+ write out summary information now, close this jeb and select new nextblock? */
+ if (jeb && (PAD(minsize) + PAD(c->summary->sum_size + sumsize +
+ JFFS2_SUMMARY_FRAME_SIZE) > jeb->free_size)) {
+
+ /* Has summary been disabled for this jeb? */
+ if (jffs2_sum_is_disabled(c->summary)) {
+ sumsize = JFFS2_SUMMARY_NOSUM_SIZE;
+ goto restart;
}
- /* Make sure this can't deadlock. Someone has to start the erases
- of erase_pending blocks */
- set_current_state(TASK_INTERRUPTIBLE);
- add_wait_queue(&c->erase_wait, &wait);
- D1(printk(KERN_DEBUG "Waiting for erases to complete. erasing_blocks is %d. (erasingempty: %s, erasependingempty: %s)\n",
- c->nr_erasing_blocks, list_empty(&c->erasing_list)?"yes":"no", list_empty(&c->erase_pending_list)?"yes":"no"));
- if (!list_empty(&c->erase_pending_list)) {
- D1(printk(KERN_DEBUG "Triggering pending erases\n"));
- jffs2_erase_pending_trigger(c);
+
+ /* Writing out the collected summary information */
+ dbg_summary("generating summary for 0x%08x.\n", jeb->offset);
+ ret = jffs2_sum_write_sumnode(c);
+
+ if (ret)
+ return ret;
+
+ if (jffs2_sum_is_disabled(c->summary)) {
+ /* jffs2_write_sumnode() couldn't write out the summary information
+ diabling summary for this jeb and free the collected information
+ */
+ sumsize = JFFS2_SUMMARY_NOSUM_SIZE;
+ goto restart;
}
- spin_unlock_bh(&c->erase_completion_lock);
- schedule();
- remove_wait_queue(&c->erase_wait, &wait);
- spin_lock_bh(&c->erase_completion_lock);
- if (signal_pending(current)) {
- return -EINTR;
+
+ jffs2_close_nextblock(c, jeb);
+ jeb = NULL;
+ /* keep always valid value in reserved_size */
+ reserved_size = PAD(sumsize + c->summary->sum_size + JFFS2_SUMMARY_FRAME_SIZE);
+ }
+ } else {
+ if (jeb && minsize > jeb->free_size) {
+ /* Skip the end of this block and file it as having some dirty space */
+ /* If there's a pending write to it, flush now */
+
+ if (jffs2_wbuf_dirty(c)) {
+ spin_unlock(&c->erase_completion_lock);
+ D1(printk(KERN_DEBUG "jffs2_do_reserve_space: Flushing write buffer\n"));
+ jffs2_flush_wbuf_pad(c);
+ spin_lock(&c->erase_completion_lock);
+ jeb = c->nextblock;
+ goto restart;
}
- /* An erase may have failed, decreasing the
- amount of free space available. So we must
- restart from the beginning */
- return -EAGAIN;
+
+ c->wasted_size += jeb->free_size;
+ c->free_size -= jeb->free_size;
+ jeb->wasted_size += jeb->free_size;
+ jeb->free_size = 0;
+
+ jffs2_close_nextblock(c, jeb);
+ jeb = NULL;
}
+ }
- next = c->free_list.next;
- list_del(next);
- c->nextblock = jeb = list_entry(next, struct jffs2_eraseblock, list);
- c->nr_free_blocks--;
- if (jeb->free_size != c->sector_size - sizeof(struct jffs2_unknown_node)) {
+ if (!jeb) {
+
+ ret = jffs2_find_nextblock(c);
+ if (ret)
+ return ret;
+
+ jeb = c->nextblock;
+
+ if (!has_expected_free_size(c, jeb))
+ {
printk(KERN_WARNING "Eep. Block 0x%08x taken from free_list had free_size of 0x%08x!!\n", jeb->offset, jeb->free_size);
goto restart;
}
@@ -209,7 +372,20 @@ static int jffs2_do_reserve_space(struct jffs2_sb_info *c, __u32 minsize, __u32
/* OK, jeb (==c->nextblock) is now pointing at a block which definitely has
enough space */
*ofs = jeb->offset + (c->sector_size - jeb->free_size);
- *len = jeb->free_size;
+ *len = jeb->free_size - reserved_size;
+
+ if (need_mark_first_node_obsolete(c, jeb)) {
+ /* Only node in it beforehand was a CLEANMARKER node (we think).
+ So mark it obsolete now that there's going to be another node
+ in the block. This will reduce used_size to zero but We've
+ already set c->nextblock so that jffs2_mark_node_obsolete()
+ won't try to refile it to the dirty_list.
+ */
+ spin_unlock(&c->erase_completion_lock);
+ jffs2_mark_node_obsolete(c, jeb->first_node);
+ spin_lock(&c->erase_completion_lock);
+ }
+
D1(printk(KERN_DEBUG "jffs2_do_reserve_space(): Giving 0x%x bytes at 0x%x\n", *len, *ofs));
return 0;
}
@@ -217,57 +393,72 @@ static int jffs2_do_reserve_space(struct jffs2_sb_info *c, __u32 minsize, __u32
/**
* jffs2_add_physical_node_ref - add a physical node reference to the list
* @c: superblock info
- * @ofs: physical location of this physical node
+ * @new: new node reference to add
* @len: length of this physical node
- * @ino: inode number with which this physical node is associated
+ * @dirty: dirty flag for new node
*
- * Should only be used to report nodes for which space has been allocated
+ * Should only be used to report nodes for which space has been allocated
* by jffs2_reserve_space.
*
* Must be called with the alloc_sem held.
*/
-
-int jffs2_add_physical_node_ref(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *new, __u32 len, int dirty)
+
+int jffs2_add_physical_node_ref(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *new)
{
struct jffs2_eraseblock *jeb;
+ uint32_t len;
+
+ jeb = c->blocks[new->flash_offset / c->sector_size];
+ len = ref_totlen(c, jeb, new);
- len = PAD(len);
- jeb = &c->blocks[(new->flash_offset & ~3) / c->sector_size];
- D1(printk(KERN_DEBUG "jffs2_add_physical_node_ref(): Node at 0x%x, size 0x%x\n", new->flash_offset & ~3, len));
+ D1(printk(KERN_DEBUG "jffs2_add_physical_node_ref(): Node at 0x%x(%d), size 0x%x\n", ref_offset(new), ref_flags(new), len));
#if 1
- if (jeb != c->nextblock || (new->flash_offset & ~3) != jeb->offset + (c->sector_size - jeb->free_size)) {
+ /* we could get some obsolete nodes after nextblock was refiled
+ in wbuf.c */
+ if ((c->nextblock || !ref_obsolete(new))
+ &&(jeb != c->nextblock || ref_offset(new) != jeb->offset + (c->sector_size - jeb->free_size))) {
printk(KERN_WARNING "argh. node added in wrong place\n");
jffs2_free_raw_node_ref(new);
return -EINVAL;
}
#endif
+ spin_lock(&c->erase_completion_lock);
+
if (!jeb->first_node)
jeb->first_node = new;
if (jeb->last_node)
jeb->last_node->next_phys = new;
jeb->last_node = new;
- spin_lock_bh(&c->erase_completion_lock);
jeb->free_size -= len;
c->free_size -= len;
- if (dirty) {
- new->flash_offset |= 1;
+ if (ref_obsolete(new)) {
jeb->dirty_size += len;
c->dirty_size += len;
} else {
jeb->used_size += len;
c->used_size += len;
}
- spin_unlock_bh(&c->erase_completion_lock);
- if (!jeb->free_size && !jeb->dirty_size) {
+
+ if (!jeb->free_size && !jeb->dirty_size && !ISDIRTY(jeb->wasted_size)) {
/* If it lives on the dirty_list, jffs2_reserve_space will put it there */
D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to clean_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
+ if (jffs2_wbuf_dirty(c)) {
+ /* Flush the last write in the block if it's outstanding */
+ spin_unlock(&c->erase_completion_lock);
+ jffs2_flush_wbuf_pad(c);
+ spin_lock(&c->erase_completion_lock);
+ }
+
list_add_tail(&jeb->list, &c->clean_list);
+ jffs2_add_to_hash_table(c, jeb, 1);
c->nextblock = NULL;
}
- ACCT_SANITY_CHECK(c,jeb);
- ACCT_PARANOIA_CHECK(jeb);
+ jffs2_dbg_acct_sanity_check_nolock(c,jeb);
+ jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
+
+ spin_unlock(&c->erase_completion_lock);
return 0;
}
@@ -280,20 +471,34 @@ void jffs2_complete_reservation(struct jffs2_sb_info *c)
up(&c->alloc_sem);
}
+static inline int on_list(struct list_head *obj, struct list_head *head)
+{
+ struct list_head *this;
+
+ list_for_each(this, head) {
+ if (this == obj) {
+ D1(printk("%p is on list at %p\n", obj, head));
+ return 1;
+
+ }
+ }
+ return 0;
+}
+
void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref)
{
struct jffs2_eraseblock *jeb;
int blocknr;
struct jffs2_unknown_node n;
- int ret;
- ssize_t retlen;
+ int ret, addedsize;
+ size_t retlen;
if(!ref) {
printk(KERN_NOTICE "EEEEEK. jffs2_mark_node_obsolete called with NULL node\n");
return;
}
- if (ref->flash_offset & 1) {
- D1(printk(KERN_DEBUG "jffs2_mark_node_obsolete called with already obsolete node at 0x%08x\n", ref->flash_offset &~3));
+ if (ref_obsolete(ref)) {
+ D1(printk(KERN_DEBUG "jffs2_mark_node_obsolete called with already obsolete node at 0x%08x\n", ref_offset(ref)));
return;
}
blocknr = ref->flash_offset / c->sector_size;
@@ -301,92 +506,282 @@ void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref
printk(KERN_NOTICE "raw node at 0x%08x is off the end of device!\n", ref->flash_offset);
BUG();
}
- jeb = &c->blocks[blocknr];
- if (jeb->used_size < ref->totlen) {
- printk(KERN_NOTICE "raw node of size 0x%08x freed from erase block %d at 0x%08x, but used_size was already 0x%08x\n",
- ref->totlen, blocknr, ref->flash_offset, jeb->used_size);
- BUG();
+ jeb = c->blocks[blocknr];
+
+ if (jffs2_can_mark_obsolete(c) && !jffs2_is_readonly(c) &&
+ !(c->flags & (JFFS2_SB_FLAG_SCANNING | JFFS2_SB_FLAG_BUILDING))) {
+ /* Hm. This may confuse static lock analysis. If any of the above
+ three conditions is false, we're going to return from this
+ function without actually obliterating any nodes or freeing
+ any jffs2_raw_node_refs. So we don't need to stop erases from
+ happening, or protect against people holding an obsolete
+ jffs2_raw_node_ref without the erase_completion_lock. */
+ down(&c->erase_free_sem);
}
- spin_lock_bh(&c->erase_completion_lock);
- jeb->used_size -= ref->totlen;
- jeb->dirty_size += ref->totlen;
- c->used_size -= ref->totlen;
- c->dirty_size += ref->totlen;
- ref->flash_offset |= 1;
-
- ACCT_SANITY_CHECK(c, jeb);
+ spin_lock(&c->erase_completion_lock);
+
+ if (ref_flags(ref) == REF_UNCHECKED) {
+ D1(if (unlikely(jeb->unchecked_size < ref_totlen(c, jeb, ref))) {
+ printk(KERN_NOTICE "raw unchecked node of size 0x%08x freed from erase block %d at 0x%08x, but unchecked_size was already 0x%08x\n",
+ ref_totlen(c, jeb, ref), blocknr, ref->flash_offset, jeb->used_size);
+ BUG();
+ })
+ D1(printk(KERN_DEBUG "Obsoleting previously unchecked node at 0x%08x of len %x: ", ref_offset(ref), ref_totlen(c, jeb, ref)));
+ jeb->unchecked_size -= ref_totlen(c, jeb, ref);
+ c->unchecked_size -= ref_totlen(c, jeb, ref);
+ } else {
+ D1(if (unlikely(jeb->used_size < ref_totlen(c, jeb, ref))) {
+ printk(KERN_NOTICE "raw node of size 0x%08x freed from erase block %d at 0x%08x, but used_size was already 0x%08x\n",
+ ref_totlen(c, jeb, ref), blocknr, ref->flash_offset, jeb->used_size);
+ BUG();
+ })
+ D1(printk(KERN_DEBUG "Obsoleting node at 0x%08x of len %#x: ", ref_offset(ref), ref_totlen(c, jeb, ref)));
+ jeb->used_size -= ref_totlen(c, jeb, ref);
+ c->used_size -= ref_totlen(c, jeb, ref);
+ }
- ACCT_PARANOIA_CHECK(jeb);
+ // Take care, that wasted size is taken into concern
+ if ((jeb->dirty_size || ISDIRTY(jeb->wasted_size + ref_totlen(c, jeb, ref))) && jeb != c->nextblock) {
+ D1(printk(KERN_DEBUG "Dirtying\n"));
+ addedsize = ref_totlen(c, jeb, ref);
+ jeb->dirty_size += ref_totlen(c, jeb, ref);
+ c->dirty_size += ref_totlen(c, jeb, ref);
+
+ /* Convert wasted space to dirty, if not a bad block */
+ if (jeb->wasted_size) {
+ if (on_list(&jeb->list, &c->bad_used_list)) {
+ D1(printk(KERN_DEBUG "Leaving block at %08x on the bad_used_list\n",
+ jeb->offset));
+ addedsize = 0; /* To fool the refiling code later */
+ } else {
+ D1(printk(KERN_DEBUG "Converting %d bytes of wasted space to dirty in block at %08x\n",
+ jeb->wasted_size, jeb->offset));
+ addedsize += jeb->wasted_size;
+ jeb->dirty_size += jeb->wasted_size;
+ c->dirty_size += jeb->wasted_size;
+ c->wasted_size -= jeb->wasted_size;
+ jeb->wasted_size = 0;
+ }
+ }
+ } else {
+ D1(printk(KERN_DEBUG "Wasting\n"));
+ addedsize = 0;
+ jeb->wasted_size += ref_totlen(c, jeb, ref);
+ c->wasted_size += ref_totlen(c, jeb, ref);
+ }
+ ref->flash_offset = ref_offset(ref) | REF_OBSOLETE;
+
+ jffs2_dbg_acct_sanity_check_nolock(c, jeb);
+ jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
- if (c->flags & JFFS2_SB_FLAG_MOUNTING) {
- /* Mount in progress. Don't muck about with the block
+ if (c->flags & JFFS2_SB_FLAG_SCANNING) {
+ /* Flash scanning is in progress. Don't muck about with the block
lists because they're not ready yet, and don't actually
- obliterate nodes that look obsolete. If they weren't
+ obliterate nodes that look obsolete. If they weren't
marked obsolete on the flash at the time they _became_
obsolete, there was probably a reason for that. */
- spin_unlock_bh(&c->erase_completion_lock);
+ spin_unlock(&c->erase_completion_lock);
+ /* We didn't lock the erase_free_sem */
return;
}
+
if (jeb == c->nextblock) {
D2(printk(KERN_DEBUG "Not moving nextblock 0x%08x to dirty/erase_pending list\n", jeb->offset));
- } else if (jeb == c->gcblock) {
- D2(printk(KERN_DEBUG "Not moving gcblock 0x%08x to dirty/erase_pending list\n", jeb->offset));
-#if 0 /* We no longer do this here. It can screw the wear levelling. If you have a lot of static
- data and a few blocks free, and you just create new files and keep deleting/overwriting
- them, then you'd keep erasing and reusing those blocks without ever moving stuff around.
- So we leave completely obsoleted blocks on the dirty_list and let the GC delete them
- when it finds them there. That way, we still get the 'once in a while, take a clean block'
- to spread out the flash usage */
- } else if (!jeb->used_size) {
- D1(printk(KERN_DEBUG "Eraseblock at 0x%08x completely dirtied. Removing from (dirty?) list...\n", jeb->offset));
- list_del(&jeb->list);
- D1(printk(KERN_DEBUG "...and adding to erase_pending_list\n"));
- list_add_tail(&jeb->list, &c->erase_pending_list);
- c->nr_erasing_blocks++;
- jffs2_erase_pending_trigger(c);
- // OFNI_BS_2SFFJ(c)->s_dirt = 1;
+ } else if (!jeb->used_size && !jeb->unchecked_size) {
+ if (jeb == c->gcblock) {
+ D1(printk(KERN_DEBUG "gcblock at 0x%08x completely dirtied. Clearing gcblock...\n", jeb->offset));
+ c->gcblock = NULL;
+ } else {
+ D1(printk(KERN_DEBUG "Eraseblock at 0x%08x completely dirtied. Removing from (dirty?) list...\n", jeb->offset));
+ list_del(&jeb->list);
+ jffs2_remove_from_hash_table(c, jeb, 1);
+ }
+ if (jffs2_wbuf_dirty(c)) {
+ D1(printk(KERN_DEBUG "...and adding to erasable_pending_wbuf_list\n"));
+ list_add_tail(&jeb->list, &c->erasable_pending_wbuf_list);
+ } else {
+ if (jiffies & 127) {
+ /* Most of the time, we just erase it immediately. Otherwise we
+ spend ages scanning it on mount, etc. */
+ D1(printk(KERN_DEBUG "...and adding to erase_pending_list\n"));
+ list_add_tail(&jeb->list, &c->erase_pending_list);
+ c->nr_erasing_blocks++;
+ jffs2_erase_pending_trigger(c);
+ } else {
+ /* Sometimes, however, we leave it elsewhere so it doesn't get
+ immediately reused, and we spread the load a bit. */
+ D1(printk(KERN_DEBUG "...and adding to erasable_list\n"));
+ list_add_tail(&jeb->list, &c->erasable_list);
+ }
+ }
D1(printk(KERN_DEBUG "Done OK\n"));
-#endif
- } else if (jeb->dirty_size == ref->totlen) {
+ } else if (jeb == c->gcblock) {
+ D2(printk(KERN_DEBUG "Not moving gcblock 0x%08x to dirty_list\n", jeb->offset));
+ } else if (ISDIRTY(jeb->dirty_size) && !ISDIRTY(jeb->dirty_size - addedsize)) {
D1(printk(KERN_DEBUG "Eraseblock at 0x%08x is freshly dirtied. Removing from clean list...\n", jeb->offset));
list_del(&jeb->list);
D1(printk(KERN_DEBUG "...and adding to dirty_list\n"));
list_add_tail(&jeb->list, &c->dirty_list);
+ } else if (VERYDIRTY(c, jeb->dirty_size) &&
+ !VERYDIRTY(c, jeb->dirty_size - addedsize)) {
+ D1(printk(KERN_DEBUG "Eraseblock at 0x%08x is now very dirty. Removing from dirty list...\n", jeb->offset));
+ list_del(&jeb->list);
+ D1(printk(KERN_DEBUG "...and adding to very_dirty_list\n"));
+ list_add_tail(&jeb->list, &c->very_dirty_list);
+ } else {
+ D1(printk(KERN_DEBUG "Eraseblock at 0x%08x not moved anywhere. (free 0x%08x, dirty 0x%08x, used 0x%08x)\n",
+ jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
}
- spin_unlock_bh(&c->erase_completion_lock);
- if (c->mtd->type != MTD_NORFLASH && c->mtd->type != MTD_RAM)
- return;
- if (OFNI_BS_2SFFJ(c)->s_flags & MS_RDONLY)
+ spin_unlock(&c->erase_completion_lock);
+
+ if (!jffs2_can_mark_obsolete(c) || jffs2_is_readonly(c) ||
+ (c->flags & JFFS2_SB_FLAG_BUILDING)) {
+ /* We didn't lock the erase_free_sem */
return;
+ }
+
+ /* The erase_free_sem is locked, and has been since before we marked the node obsolete
+ and potentially put its eraseblock onto the erase_pending_list. Thus, we know that
+ the block hasn't _already_ been erased, and that 'ref' itself hasn't been freed yet
+ by jffs2_free_all_node_refs() in erase.c. Which is nice. */
- D1(printk(KERN_DEBUG "obliterating obsoleted node at 0x%08x\n", ref->flash_offset &~3));
- ret = c->mtd->read(c->mtd, ref->flash_offset &~3, sizeof(n), &retlen, (char *)&n);
+ D1(printk(KERN_DEBUG "obliterating obsoleted node at 0x%08x\n", ref_offset(ref)));
+ ret = jffs2_flash_read(c, ref_offset(ref), sizeof(n), &retlen, (char *)&n);
if (ret) {
- printk(KERN_WARNING "Read error reading from obsoleted node at 0x%08x: %d\n", ref->flash_offset &~3, ret);
- return;
+ printk(KERN_WARNING "Read error reading from obsoleted node at 0x%08x: %d\n", ref_offset(ref), ret);
+ goto out_erase_sem;
}
if (retlen != sizeof(n)) {
- printk(KERN_WARNING "Short read from obsoleted node at 0x%08x: %d\n", ref->flash_offset &~3, retlen);
- return;
+ printk(KERN_WARNING "Short read from obsoleted node at 0x%08x: %zd\n", ref_offset(ref), retlen);
+ goto out_erase_sem;
}
- if (PAD(n.totlen) != PAD(ref->totlen)) {
- printk(KERN_WARNING "Node totlen on flash (0x%08x) != totlen in node ref (0x%08x)\n", n.totlen, ref->totlen);
- return;
+ if (PAD(je32_to_cpu(n.totlen)) != PAD(ref_totlen(c, jeb, ref))) {
+ printk(KERN_WARNING "Node totlen on flash (0x%08x) != totlen from node ref (0x%08x)\n", je32_to_cpu(n.totlen), ref_totlen(c, jeb, ref));
+ goto out_erase_sem;
}
- if (!(n.nodetype & JFFS2_NODE_ACCURATE)) {
- D1(printk(KERN_DEBUG "Node at 0x%08x was already marked obsolete (nodetype 0x%04x\n", ref->flash_offset &~3, n.nodetype));
- return;
+ if (!(je16_to_cpu(n.nodetype) & JFFS2_NODE_ACCURATE)) {
+ D1(printk(KERN_DEBUG "Node at 0x%08x was already marked obsolete (nodetype 0x%04x)\n", ref_offset(ref), je16_to_cpu(n.nodetype)));
+ goto out_erase_sem;
}
- n.nodetype &= ~JFFS2_NODE_ACCURATE;
- ret = c->mtd->write(c->mtd, ref->flash_offset&~3, sizeof(n), &retlen, (char *)&n);
+ /* XXX FIXME: This is ugly now */
+ n.nodetype = cpu_to_je16(je16_to_cpu(n.nodetype) & ~JFFS2_NODE_ACCURATE);
+ ret = jffs2_flash_write(c, ref_offset(ref), sizeof(n), &retlen, (char *)&n);
if (ret) {
- printk(KERN_WARNING "Write error in obliterating obsoleted node at 0x%08x: %d\n", ref->flash_offset &~3, ret);
- return;
+ printk(KERN_WARNING "Write error in obliterating obsoleted node at 0x%08x: %d\n", ref_offset(ref), ret);
+ goto out_erase_sem;
}
if (retlen != sizeof(n)) {
- printk(KERN_WARNING "Short write in obliterating obsoleted node at 0x%08x: %d\n", ref->flash_offset &~3, retlen);
- return;
+ printk(KERN_WARNING "Short write in obliterating obsoleted node at 0x%08x: %zd\n", ref_offset(ref), retlen);
+ goto out_erase_sem;
+ }
+
+ /* Nodes which have been marked obsolete no longer need to be
+ associated with any inode. Remove them from the per-inode list.
+
+ Note we can't do this for NAND at the moment because we need
+ obsolete dirent nodes to stay on the lists, because of the
+ horridness in jffs2_garbage_collect_deletion_dirent(). Also
+ because we delete the inocache, and on NAND we need that to
+ stay around until all the nodes are actually erased, in order
+ to stop us from giving the same inode number to another newly
+ created inode. */
+ if (ref->next_in_ino) {
+ struct jffs2_inode_cache *ic;
+ struct jffs2_raw_node_ref **p;
+
+ spin_lock(&c->erase_completion_lock);
+
+ ic = jffs2_raw_ref_to_ic(ref);
+ for (p = &ic->nodes; (*p) != ref; p = &((*p)->next_in_ino))
+ ;
+
+ *p = ref->next_in_ino;
+ ref->next_in_ino = NULL;
+
+ if (ic->nodes == (void *)ic && ic->nlink == 0)
+ jffs2_del_ino_cache(c, ic);
+
+ spin_unlock(&c->erase_completion_lock);
+ }
+
+
+ /* Merge with the next node in the physical list, if there is one
+ and if it's also obsolete and if it doesn't belong to any inode */
+ if (ref->next_phys && ref_obsolete(ref->next_phys) &&
+ !ref->next_phys->next_in_ino) {
+ struct jffs2_raw_node_ref *n = ref->next_phys;
+
+ spin_lock(&c->erase_completion_lock);
+
+ ref->__totlen += n->__totlen;
+ ref->next_phys = n->next_phys;
+ if (jeb->last_node == n) jeb->last_node = ref;
+ if (jeb->gc_node == n) {
+ /* gc will be happy continuing gc on this node */
+ jeb->gc_node=ref;
+ }
+ spin_unlock(&c->erase_completion_lock);
+
+ jffs2_free_raw_node_ref(n);
+ }
+
+ /* Also merge with the previous node in the list, if there is one
+ and that one is obsolete */
+ if (ref != jeb->first_node ) {
+ struct jffs2_raw_node_ref *p = jeb->first_node;
+
+ spin_lock(&c->erase_completion_lock);
+
+ while (p->next_phys != ref)
+ p = p->next_phys;
+
+ if (ref_obsolete(p) && !ref->next_in_ino) {
+ p->__totlen += ref->__totlen;
+ if (jeb->last_node == ref) {
+ jeb->last_node = p;
+ }
+ if (jeb->gc_node == ref) {
+ /* gc will be happy continuing gc on this node */
+ jeb->gc_node=p;
+ }
+ p->next_phys = ref->next_phys;
+ jffs2_free_raw_node_ref(ref);
+ }
+ spin_unlock(&c->erase_completion_lock);
}
+ out_erase_sem:
+ up(&c->erase_free_sem);
+}
+
+int jffs2_thread_should_wake(struct jffs2_sb_info *c)
+{
+ int ret = 0;
+ uint32_t dirty;
+
+ if (c->unchecked_size) {
+ D1(printk(KERN_DEBUG "jffs2_thread_should_wake(): unchecked_size %d, checked_ino #%d\n",
+ c->unchecked_size, c->checked_ino));
+ return 1;
+ }
+
+ /* dirty_size contains blocks on erase_pending_list
+ * those blocks are counted in c->nr_erasing_blocks.
+ * If one block is actually erased, it is not longer counted as dirty_space
+ * but it is counted in c->nr_erasing_blocks, so we add it and subtract it
+ * with c->nr_erasing_blocks * c->sector_size again.
+ * Blocks on erasable_list are counted as dirty_size, but not in c->nr_erasing_blocks
+ * This helps us to force gc and pick eventually a clean block to spread the load.
+ */
+ dirty = c->dirty_size + c->erasing_size - c->nr_erasing_blocks * c->sector_size;
+
+ if (c->nr_free_blocks + c->nr_erasing_blocks < c->resv_blocks_gctrigger &&
+ (dirty > c->nospc_dirty_size))
+ ret = 1;
+
+ D1(printk(KERN_DEBUG "jffs2_thread_should_wake(): nr_free_blocks %d, nr_erasing_blocks %d, dirty_size 0x%x: %s\n",
+ c->nr_free_blocks, c->nr_erasing_blocks, c->dirty_size, ret?"yes":"no"));
+
+ return ret;
}
diff --git a/linux-2.4.x/fs/jffs2/os-linux.h b/linux-2.4.x/fs/jffs2/os-linux.h
new file mode 100644
index 0000000..a754501
--- /dev/null
+++ b/linux-2.4.x/fs/jffs2/os-linux.h
@@ -0,0 +1,219 @@
+/*
+ * JFFS2 -- Journalling Flash File System, Version 2.
+ *
+ * Copyright (C) 2002-2003 Red Hat, Inc.
+ *
+ * Created by David Woodhouse <dwmw2@infradead.org>
+ *
+ * For licensing information, see the file 'LICENCE' in this directory.
+ *
+ * $Id: os-linux.h,v 1.68 2006/02/09 16:13:35 dwmw2 Exp $
+ *
+ */
+
+#ifndef __JFFS2_OS_LINUX_H__
+#define __JFFS2_OS_LINUX_H__
+
+/* JFFS2 uses Linux mode bits natively -- no need for conversion */
+#define os_to_jffs2_mode(x) (x)
+#define jffs2_to_os_mode(x) (x)
+
+struct kstatfs;
+struct kvec;
+
+#define JFFS2_INODE_INFO(i) (list_entry(i, struct jffs2_inode_info, vfs_inode))
+#define OFNI_EDONI_2SFFJ(f) (&(f)->vfs_inode)
+#define JFFS2_SB_INFO(sb) (sb->s_fs_info)
+#define OFNI_BS_2SFFJ(c) ((struct super_block *)c->os_priv)
+
+
+#define JFFS2_F_I_SIZE(f) (OFNI_EDONI_2SFFJ(f)->i_size)
+#define JFFS2_F_I_MODE(f) (OFNI_EDONI_2SFFJ(f)->i_mode)
+#define JFFS2_F_I_UID(f) (OFNI_EDONI_2SFFJ(f)->i_uid)
+#define JFFS2_F_I_GID(f) (OFNI_EDONI_2SFFJ(f)->i_gid)
+
+#define JFFS2_F_I_RDEV_MIN(f) (iminor(OFNI_EDONI_2SFFJ(f)))
+#define JFFS2_F_I_RDEV_MAJ(f) (imajor(OFNI_EDONI_2SFFJ(f)))
+
+#define ITIME(sec) ((struct timespec){sec, 0})
+#define I_SEC(tv) ((tv).tv_sec)
+#define JFFS2_F_I_CTIME(f) (OFNI_EDONI_2SFFJ(f)->i_ctime.tv_sec)
+#define JFFS2_F_I_MTIME(f) (OFNI_EDONI_2SFFJ(f)->i_mtime.tv_sec)
+#define JFFS2_F_I_ATIME(f) (OFNI_EDONI_2SFFJ(f)->i_atime.tv_sec)
+
+#define sleep_on_spinunlock(wq, s) \
+ do { \
+ DECLARE_WAITQUEUE(__wait, current); \
+ add_wait_queue((wq), &__wait); \
+ set_current_state(TASK_UNINTERRUPTIBLE); \
+ spin_unlock(s); \
+ schedule(); \
+ remove_wait_queue((wq), &__wait); \
+ } while(0)
+
+static inline void jffs2_init_inode_info(struct jffs2_inode_info *f)
+{
+ f->highest_version = 0;
+ f->fragtree = RB_ROOT;
+ f->metadata = NULL;
+ f->dents = NULL;
+ f->target = NULL;
+ f->flags = 0;
+ f->usercompr = 0;
+}
+
+
+#define jffs2_is_readonly(c) (OFNI_BS_2SFFJ(c)->s_flags & MS_RDONLY)
+
+#define SECTOR_ADDR(x) ( (((unsigned long)(x) / c->sector_size) * c->sector_size) )
+#ifndef CONFIG_JFFS2_FS_WRITEBUFFER
+
+
+#ifdef CONFIG_JFFS2_SUMMARY
+#define jffs2_can_mark_obsolete(c) (0)
+#else
+#define jffs2_can_mark_obsolete(c) (1)
+#endif
+
+#define jffs2_is_writebuffered(c) (0)
+#define jffs2_ebh_oob(c) (0)
+#define jffs2_write_nand_ebh(c,jeb) (-EIO)
+
+#define jffs2_flash_write(c, ofs, len, retlen, buf) jffs2_flash_direct_write(c, ofs, len, retlen, buf)
+#define jffs2_flash_read(c, ofs, len, retlen, buf) ((c)->mtd->read((c)->mtd, ofs, len, retlen, buf))
+#define jffs2_flush_wbuf_pad(c) ({ do{} while(0); (void)(c), 0; })
+#define jffs2_flush_wbuf_gc(c, i) ({ do{} while(0); (void)(c), (void) i, 0; })
+#define jffs2_write_nand_badblock(c,jeb,bad_offset) (1)
+#define jffs2_nand_flash_setup(c) (0)
+#define jffs2_nand_flash_cleanup(c) do {} while(0)
+#define jffs2_wbuf_dirty(c) (0)
+#define jffs2_flash_writev(a,b,c,d,e,f) jffs2_flash_direct_writev(a,b,c,d,e)
+#define jffs2_wbuf_timeout NULL
+#define jffs2_wbuf_process NULL
+#define jffs2_nor_ecc(c) (0)
+#define jffs2_dataflash(c) (0)
+#define jffs2_nor_wbuf_flash(c) (0)
+#define jffs2_block_mtd(c) (0)
+#define jffs2_nor_ecc_flash_setup(c) (0)
+#define jffs2_nor_ecc_flash_cleanup(c) do {} while (0)
+#define jffs2_dataflash_setup(c) (0)
+#define jffs2_dataflash_cleanup(c) do {} while (0)
+#define jffs2_nor_wbuf_flash_setup(c) (0)
+#define jffs2_nor_wbuf_flash_cleanup(c) do {} while (0)
+#define jffs2_block_mtd_setup(c) (0)
+#define jffs2_block_mtd_cleanup(c) do {} while (0)
+
+#else /* NAND and/or ECC'd NOR support present */
+
+#define jffs2_is_writebuffered(c) (c->wbuf != NULL)
+
+#ifdef CONFIG_JFFS2_SUMMARY
+#define jffs2_can_mark_obsolete(c) (0)
+#else
+#define jffs2_can_mark_obsolete(c) \
+ ((c->mtd->type == MTD_NORFLASH && !(c->mtd->flags & (MTD_ECC|MTD_PROGRAM_REGIONS))) || \
+ c->mtd->type == MTD_RAM)
+#endif
+
+#define jffs2_ebh_oob(c) (c->mtd->type == MTD_NANDFLASH)
+
+#define jffs2_flash_write_oob(c, ofs, len, retlen, buf) ((c)->mtd->write_oob((c)->mtd, ofs, len, retlen, buf))
+#define jffs2_flash_read_oob(c, ofs, len, retlen, buf) ((c)->mtd->read_oob((c)->mtd, ofs, len, retlen, buf))
+#define jffs2_wbuf_dirty(c) (!!(c)->wbuf_len)
+
+/* wbuf.c */
+int jffs2_flash_writev(struct jffs2_sb_info *c, const struct kvec *vecs, unsigned long count, loff_t to, size_t *retlen, uint32_t ino);
+int jffs2_flash_write(struct jffs2_sb_info *c, loff_t ofs, size_t len, size_t *retlen, const u_char *buf);
+int jffs2_flash_read(struct jffs2_sb_info *c, loff_t ofs, size_t len, size_t *retlen, u_char *buf);
+int jffs2_flash_read_safe(struct jffs2_sb_info *c, uint32_t ofs, int len, u_char *buf);
+int jffs2_check_oob_empty(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,uint32_t data_len);
+int jffs2_check_nand_cleanmarker_ebh(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, uint32_t *data_len);
+int jffs2_write_nand_ebh(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb);
+int jffs2_write_nand_badblock(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, uint32_t bad_offset);
+void jffs2_wbuf_timeout(unsigned long data);
+void jffs2_wbuf_process(void *data);
+int jffs2_flush_wbuf_gc(struct jffs2_sb_info *c, uint32_t ino);
+int jffs2_flush_wbuf_pad(struct jffs2_sb_info *c);
+int jffs2_nand_flash_setup(struct jffs2_sb_info *c);
+void jffs2_nand_flash_cleanup(struct jffs2_sb_info *c);
+
+#define jffs2_nor_ecc(c) (c->mtd->type == MTD_NORFLASH && (c->mtd->flags & MTD_ECC))
+int jffs2_nor_ecc_flash_setup(struct jffs2_sb_info *c);
+void jffs2_nor_ecc_flash_cleanup(struct jffs2_sb_info *c);
+
+#define jffs2_dataflash(c) (c->mtd->type == MTD_DATAFLASH)
+int jffs2_dataflash_setup(struct jffs2_sb_info *c);
+void jffs2_dataflash_cleanup(struct jffs2_sb_info *c);
+
+#define jffs2_nor_wbuf_flash(c) (c->mtd->type == MTD_NORFLASH && (c->mtd->flags & MTD_PROGRAM_REGIONS))
+int jffs2_nor_wbuf_flash_setup(struct jffs2_sb_info *c);
+void jffs2_nor_wbuf_flash_cleanup(struct jffs2_sb_info *c);
+
+#define jffs2_block_mtd(c) (c->mtd->type == MTD_BLOCK)
+int jffs2_block_mtd_setup(struct jffs2_sb_info *c);
+void jffs2_block_mtd_cleanup(struct jffs2_sb_info *c);
+#endif /* WRITEBUFFER */
+
+/* erase.c */
+static inline void jffs2_erase_pending_trigger(struct jffs2_sb_info *c)
+{
+ OFNI_BS_2SFFJ(c)->s_dirt = 1;
+}
+
+/* background.c */
+int jffs2_start_garbage_collect_thread(struct jffs2_sb_info *c);
+void jffs2_stop_garbage_collect_thread(struct jffs2_sb_info *c);
+void jffs2_garbage_collect_trigger(struct jffs2_sb_info *c);
+
+/* dir.c */
+extern struct file_operations jffs2_dir_operations;
+extern struct inode_operations jffs2_dir_inode_operations;
+
+/* file.c */
+extern struct file_operations jffs2_file_operations;
+extern struct inode_operations jffs2_file_inode_operations;
+extern struct address_space_operations jffs2_file_address_operations;
+int jffs2_fsync(struct file *, struct dentry *, int);
+int jffs2_do_readpage_unlock (struct inode *inode, struct page *pg);
+
+/* ioctl.c */
+int jffs2_ioctl(struct inode *, struct file *, unsigned int, unsigned long);
+
+/* symlink.c */
+extern struct inode_operations jffs2_symlink_inode_operations;
+
+/* fs.c */
+int jffs2_setattr (struct dentry *, struct iattr *);
+void jffs2_read_inode (struct inode *);
+void jffs2_clear_inode (struct inode *);
+void jffs2_dirty_inode(struct inode *inode);
+struct inode *jffs2_new_inode (struct inode *dir_i, int mode,
+ struct jffs2_raw_inode *ri);
+int jffs2_statfs (struct super_block *, struct kstatfs *);
+void jffs2_write_super (struct super_block *);
+int jffs2_remount_fs (struct super_block *, int *, char *);
+int jffs2_do_fill_super(struct super_block *sb, void *data, int silent);
+void jffs2_gc_release_inode(struct jffs2_sb_info *c,
+ struct jffs2_inode_info *f);
+struct jffs2_inode_info *jffs2_gc_fetch_inode(struct jffs2_sb_info *c,
+ int inum, int nlink);
+
+unsigned char *jffs2_gc_fetch_page(struct jffs2_sb_info *c,
+ struct jffs2_inode_info *f,
+ unsigned long offset,
+ unsigned long *priv);
+void jffs2_gc_release_page(struct jffs2_sb_info *c,
+ unsigned char *pg,
+ unsigned long *priv);
+void jffs2_flash_cleanup(struct jffs2_sb_info *c);
+
+
+/* writev.c */
+int jffs2_flash_direct_writev(struct jffs2_sb_info *c, const struct kvec *vecs,
+ unsigned long count, loff_t to, size_t *retlen);
+int jffs2_flash_direct_write(struct jffs2_sb_info *c, loff_t ofs, size_t len,
+ size_t *retlen, const u_char *buf);
+
+#endif /* __JFFS2_OS_LINUX_H__ */
+
+
diff --git a/linux-2.4.x/fs/jffs2/pushpull.h b/linux-2.4.x/fs/jffs2/pushpull.h
index dba10f3..c0c2a91 100644
--- a/linux-2.4.x/fs/jffs2/pushpull.h
+++ b/linux-2.4.x/fs/jffs2/pushpull.h
@@ -1,42 +1,21 @@
/*
* JFFS2 -- Journalling Flash File System, Version 2.
*
- * Copyright (C) 2001 Red Hat, Inc.
+ * Copyright (C) 2001, 2002 Red Hat, Inc.
*
- * Created by David Woodhouse <dwmw2@cambridge.redhat.com>
+ * Created by David Woodhouse <dwmw2@infradead.org>
*
- * The original JFFS, from which the design for JFFS2 was derived,
- * was designed and implemented by Axis Communications AB.
+ * For licensing information, see the file 'LICENCE' in this directory.
*
- * The contents of this file are subject to the Red Hat eCos Public
- * License Version 1.1 (the "Licence"); you may not use this file
- * except in compliance with the Licence. You may obtain a copy of
- * the Licence at http://www.redhat.com/
- *
- * Software distributed under the Licence is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied.
- * See the Licence for the specific language governing rights and
- * limitations under the Licence.
- *
- * The Original Code is JFFS2 - Journalling Flash File System, version 2
- *
- * Alternatively, the contents of this file may be used under the
- * terms of the GNU General Public License version 2 (the "GPL"), in
- * which case the provisions of the GPL are applicable instead of the
- * above. If you wish to allow the use of your version of this file
- * only under the terms of the GPL and not to allow others to use your
- * version of this file under the RHEPL, indicate your decision by
- * deleting the provisions above and replace them with the notice and
- * other provisions required by the GPL. If you do not delete the
- * provisions above, a recipient may use your version of this file
- * under either the RHEPL or the GPL.
- *
- * $Id: pushpull.h,v 1.5 2001/09/23 10:04:15 rmk Exp $
+ * $Id: pushpull.h,v 1.10 2004/11/16 20:36:11 dwmw2 Exp $
*
*/
#ifndef __PUSHPULL_H__
#define __PUSHPULL_H__
+
+#include <linux/errno.h>
+
struct pushpull {
unsigned char *buf;
unsigned int buflen;
@@ -44,9 +23,36 @@ struct pushpull {
unsigned int reserve;
};
-void init_pushpull(struct pushpull *, char *, unsigned, unsigned, unsigned);
-int pushbit(struct pushpull *pp, int bit, int use_reserved);
-int pushedbits(struct pushpull *pp);
+
+static inline void init_pushpull(struct pushpull *pp, char *buf, unsigned buflen, unsigned ofs, unsigned reserve)
+{
+ pp->buf = buf;
+ pp->buflen = buflen;
+ pp->ofs = ofs;
+ pp->reserve = reserve;
+}
+
+static inline int pushbit(struct pushpull *pp, int bit, int use_reserved)
+{
+ if (pp->ofs >= pp->buflen - (use_reserved?0:pp->reserve)) {
+ return -ENOSPC;
+ }
+
+ if (bit) {
+ pp->buf[pp->ofs >> 3] |= (1<<(7-(pp->ofs &7)));
+ }
+ else {
+ pp->buf[pp->ofs >> 3] &= ~(1<<(7-(pp->ofs &7)));
+ }
+ pp->ofs++;
+
+ return 0;
+}
+
+static inline int pushedbits(struct pushpull *pp)
+{
+ return pp->ofs;
+}
static inline int pullbit(struct pushpull *pp)
{
diff --git a/linux-2.4.x/fs/jffs2/rbtree.c b/linux-2.4.x/fs/jffs2/rbtree.c
new file mode 100644
index 0000000..de0bb6a
--- /dev/null
+++ b/linux-2.4.x/fs/jffs2/rbtree.c
@@ -0,0 +1,363 @@
+/*
+ Red Black Trees
+ (C) 1999 Andrea Arcangeli <andrea@suse.de>
+ (C) 2002 David Woodhouse <dwmw2@infradead.org>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+
+ $Id: rbtree.c,v 1.4 2005/11/07 11:14:41 gleixner Exp $
+*/
+
+#ifdef __ECOS /* This file is _not_ under the eCos licence; it is pure GPL. */
+#error "Licence problem. eCos has its own rbtree code."
+#endif
+
+#include <linux/version.h>
+#include <linux/rbtree.h>
+
+/* This wasn't present till 2.4.11, wasn't exported till 2.4.19 */
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,11) || \
+ (LINUX_VERSION_CODE < KERNEL_VERSION(2,4,19) && defined(MODULE))
+static void __rb_rotate_left(struct rb_node * node, struct rb_root * root)
+{
+ struct rb_node * right = node->rb_right;
+
+ if ((node->rb_right = right->rb_left))
+ right->rb_left->rb_parent = node;
+ right->rb_left = node;
+
+ if ((right->rb_parent = node->rb_parent))
+ {
+ if (node == node->rb_parent->rb_left)
+ node->rb_parent->rb_left = right;
+ else
+ node->rb_parent->rb_right = right;
+ }
+ else
+ root->rb_node = right;
+ node->rb_parent = right;
+}
+
+static void __rb_rotate_right(struct rb_node * node, struct rb_root * root)
+{
+ struct rb_node * left = node->rb_left;
+
+ if ((node->rb_left = left->rb_right))
+ left->rb_right->rb_parent = node;
+ left->rb_right = node;
+
+ if ((left->rb_parent = node->rb_parent))
+ {
+ if (node == node->rb_parent->rb_right)
+ node->rb_parent->rb_right = left;
+ else
+ node->rb_parent->rb_left = left;
+ }
+ else
+ root->rb_node = left;
+ node->rb_parent = left;
+}
+
+void rb_insert_color(struct rb_node * node, struct rb_root * root)
+{
+ struct rb_node * parent, * gparent;
+
+ while ((parent = node->rb_parent) && parent->rb_color == RB_RED)
+ {
+ gparent = parent->rb_parent;
+
+ if (parent == gparent->rb_left)
+ {
+ {
+ register struct rb_node * uncle = gparent->rb_right;
+ if (uncle && uncle->rb_color == RB_RED)
+ {
+ uncle->rb_color = RB_BLACK;
+ parent->rb_color = RB_BLACK;
+ gparent->rb_color = RB_RED;
+ node = gparent;
+ continue;
+ }
+ }
+
+ if (parent->rb_right == node)
+ {
+ register struct rb_node * tmp;
+ __rb_rotate_left(parent, root);
+ tmp = parent;
+ parent = node;
+ node = tmp;
+ }
+
+ parent->rb_color = RB_BLACK;
+ gparent->rb_color = RB_RED;
+ __rb_rotate_right(gparent, root);
+ } else {
+ {
+ register struct rb_node * uncle = gparent->rb_left;
+ if (uncle && uncle->rb_color == RB_RED)
+ {
+ uncle->rb_color = RB_BLACK;
+ parent->rb_color = RB_BLACK;
+ gparent->rb_color = RB_RED;
+ node = gparent;
+ continue;
+ }
+ }
+
+ if (parent->rb_left == node)
+ {
+ register struct rb_node * tmp;
+ __rb_rotate_right(parent, root);
+ tmp = parent;
+ parent = node;
+ node = tmp;
+ }
+
+ parent->rb_color = RB_BLACK;
+ gparent->rb_color = RB_RED;
+ __rb_rotate_left(gparent, root);
+ }
+ }
+
+ root->rb_node->rb_color = RB_BLACK;
+}
+
+static void __rb_erase_color(struct rb_node * node, struct rb_node * parent,
+ struct rb_root * root)
+{
+ struct rb_node * other;
+
+ while ((!node || node->rb_color == RB_BLACK) && node != root->rb_node)
+ {
+ if (parent->rb_left == node)
+ {
+ other = parent->rb_right;
+ if (other->rb_color == RB_RED)
+ {
+ other->rb_color = RB_BLACK;
+ parent->rb_color = RB_RED;
+ __rb_rotate_left(parent, root);
+ other = parent->rb_right;
+ }
+ if ((!other->rb_left ||
+ other->rb_left->rb_color == RB_BLACK)
+ && (!other->rb_right ||
+ other->rb_right->rb_color == RB_BLACK))
+ {
+ other->rb_color = RB_RED;
+ node = parent;
+ parent = node->rb_parent;
+ }
+ else
+ {
+ if (!other->rb_right ||
+ other->rb_right->rb_color == RB_BLACK)
+ {
+ register struct rb_node * o_left;
+ if ((o_left = other->rb_left))
+ o_left->rb_color = RB_BLACK;
+ other->rb_color = RB_RED;
+ __rb_rotate_right(other, root);
+ other = parent->rb_right;
+ }
+ other->rb_color = parent->rb_color;
+ parent->rb_color = RB_BLACK;
+ if (other->rb_right)
+ other->rb_right->rb_color = RB_BLACK;
+ __rb_rotate_left(parent, root);
+ node = root->rb_node;
+ break;
+ }
+ }
+ else
+ {
+ other = parent->rb_left;
+ if (other->rb_color == RB_RED)
+ {
+ other->rb_color = RB_BLACK;
+ parent->rb_color = RB_RED;
+ __rb_rotate_right(parent, root);
+ other = parent->rb_left;
+ }
+ if ((!other->rb_left ||
+ other->rb_left->rb_color == RB_BLACK)
+ && (!other->rb_right ||
+ other->rb_right->rb_color == RB_BLACK))
+ {
+ other->rb_color = RB_RED;
+ node = parent;
+ parent = node->rb_parent;
+ }
+ else
+ {
+ if (!other->rb_left ||
+ other->rb_left->rb_color == RB_BLACK)
+ {
+ register struct rb_node * o_right;
+ if ((o_right = other->rb_right))
+ o_right->rb_color = RB_BLACK;
+ other->rb_color = RB_RED;
+ __rb_rotate_left(other, root);
+ other = parent->rb_left;
+ }
+ other->rb_color = parent->rb_color;
+ parent->rb_color = RB_BLACK;
+ if (other->rb_left)
+ other->rb_left->rb_color = RB_BLACK;
+ __rb_rotate_right(parent, root);
+ node = root->rb_node;
+ break;
+ }
+ }
+ }
+ if (node)
+ node->rb_color = RB_BLACK;
+}
+
+void rb_erase(struct rb_node * node, struct rb_root * root)
+{
+ struct rb_node * child, * parent;
+ int color;
+
+ if (!node->rb_left)
+ child = node->rb_right;
+ else if (!node->rb_right)
+ child = node->rb_left;
+ else
+ {
+ struct rb_node * old = node, * left;
+
+ node = node->rb_right;
+ while ((left = node->rb_left))
+ node = left;
+ child = node->rb_right;
+ parent = node->rb_parent;
+ color = node->rb_color;
+
+ if (child)
+ child->rb_parent = parent;
+ if (parent)
+ {
+ if (parent->rb_left == node)
+ parent->rb_left = child;
+ else
+ parent->rb_right = child;
+ }
+ else
+ root->rb_node = child;
+
+ if (node->rb_parent == old)
+ parent = node;
+ node->rb_parent = old->rb_parent;
+ node->rb_color = old->rb_color;
+ node->rb_right = old->rb_right;
+ node->rb_left = old->rb_left;
+
+ if (old->rb_parent)
+ {
+ if (old->rb_parent->rb_left == old)
+ old->rb_parent->rb_left = node;
+ else
+ old->rb_parent->rb_right = node;
+ } else
+ root->rb_node = node;
+
+ old->rb_left->rb_parent = node;
+ if (old->rb_right)
+ old->rb_right->rb_parent = node;
+ goto color;
+ }
+
+ parent = node->rb_parent;
+ color = node->rb_color;
+
+ if (child)
+ child->rb_parent = parent;
+ if (parent)
+ {
+ if (parent->rb_left == node)
+ parent->rb_left = child;
+ else
+ parent->rb_right = child;
+ }
+ else
+ root->rb_node = child;
+
+ color:
+ if (color == RB_BLACK)
+ __rb_erase_color(child, parent, root);
+}
+#endif /* Before 2.4.11 */
+
+ /* These routines haven't made it into 2.4 (yet) */
+struct rb_node *rb_next(struct rb_node *node)
+{
+ /* If we have a right-hand child, go down and then left as far
+ as we can. */
+ if (node->rb_right) {
+ node = node->rb_right;
+ while (node->rb_left)
+ node=node->rb_left;
+ return node;
+ }
+
+ /* No right-hand children. Everything down and left is
+ smaller than us, so any 'next' node must be in the general
+ direction of our parent. Go up the tree; any time the
+ ancestor is a right-hand child of its parent, keep going
+ up. First time it's a left-hand child of its parent, said
+ parent is our 'next' node. */
+ while (node->rb_parent && node == node->rb_parent->rb_right)
+ node = node->rb_parent;
+
+ return node->rb_parent;
+}
+
+struct rb_node *rb_prev(struct rb_node *node)
+{
+ if (node->rb_left) {
+ node = node->rb_left;
+ while (node->rb_right)
+ node=node->rb_right;
+ return node;
+ }
+ while (node->rb_parent && node == node->rb_parent->rb_left)
+ node = node->rb_parent;
+
+ return node->rb_parent;
+}
+
+void rb_replace_node(struct rb_node *victim, struct rb_node *new, struct rb_root *root)
+{
+ struct rb_node *parent = victim->rb_parent;
+
+ /* Set the surrounding nodes to point to the replacement */
+ if (parent) {
+ if (victim == parent->rb_left)
+ parent->rb_left = new;
+ else
+ parent->rb_right = new;
+ } else {
+ root->rb_node = new;
+ }
+ if (victim->rb_left)
+ victim->rb_left->rb_parent = new;
+ if (victim->rb_right)
+ victim->rb_right->rb_parent = new;
+
+ /* Copy the pointers/colour from the victim to the replacement */
+ *new = *victim;
+}
diff --git a/linux-2.4.x/fs/jffs2/read.c b/linux-2.4.x/fs/jffs2/read.c
index 2f0f10d..ea7b21a 100644
--- a/linux-2.4.x/fs/jffs2/read.c
+++ b/linux-2.4.x/fs/jffs2/read.c
@@ -1,52 +1,32 @@
/*
* JFFS2 -- Journalling Flash File System, Version 2.
*
- * Copyright (C) 2001 Red Hat, Inc.
+ * Copyright (C) 2001-2003 Red Hat, Inc.
*
- * Created by David Woodhouse <dwmw2@cambridge.redhat.com>
+ * Created by David Woodhouse <dwmw2@infradead.org>
*
- * The original JFFS, from which the design for JFFS2 was derived,
- * was designed and implemented by Axis Communications AB.
+ * For licensing information, see the file 'LICENCE' in this directory.
*
- * The contents of this file are subject to the Red Hat eCos Public
- * License Version 1.1 (the "Licence"); you may not use this file
- * except in compliance with the Licence. You may obtain a copy of
- * the Licence at http://www.redhat.com/
- *
- * Software distributed under the Licence is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied.
- * See the Licence for the specific language governing rights and
- * limitations under the Licence.
- *
- * The Original Code is JFFS2 - Journalling Flash File System, version 2
- *
- * Alternatively, the contents of this file may be used under the
- * terms of the GNU General Public License version 2 (the "GPL"), in
- * which case the provisions of the GPL are applicable instead of the
- * above. If you wish to allow the use of your version of this file
- * only under the terms of the GPL and not to allow others to use your
- * version of this file under the RHEPL, indicate your decision by
- * deleting the provisions above and replace them with the notice and
- * other provisions required by the GPL. If you do not delete the
- * provisions above, a recipient may use your version of this file
- * under either the RHEPL or the GPL.
- *
- * $Id: read.c,v 1.13.2.1 2002/02/01 23:32:33 dwmw2 Exp $
+ * $Id: read.c,v 1.44 2005/11/13 18:22:07 gleixner Exp $
*
*/
#include <linux/kernel.h>
#include <linux/slab.h>
-#include <linux/jffs2.h>
+#include <linux/crc32.h>
+#include <linux/pagemap.h>
#include <linux/mtd/mtd.h>
+#include <linux/compiler.h>
#include "nodelist.h"
-#include "crc32.h"
+#include "compr.h"
-int jffs2_read_dnode(struct jffs2_sb_info *c, struct jffs2_full_dnode *fd, unsigned char *buf, int ofs, int len)
+int jffs2_read_dnode(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
+ struct jffs2_full_dnode *fd, unsigned char *buf,
+ int ofs, int len)
{
struct jffs2_raw_inode *ri;
size_t readlen;
- __u32 crc;
+ uint32_t crc;
unsigned char *decomprbuf = NULL;
unsigned char *readbuf = NULL;
int ret = 0;
@@ -55,40 +35,46 @@ int jffs2_read_dnode(struct jffs2_sb_info *c, struct jffs2_full_dnode *fd, unsig
if (!ri)
return -ENOMEM;
- ret = c->mtd->read(c->mtd, fd->raw->flash_offset & ~3, sizeof(*ri), &readlen, (char *)ri);
+ ret = jffs2_flash_read(c, ref_offset(fd->raw), sizeof(*ri), &readlen, (char *)ri);
if (ret) {
jffs2_free_raw_inode(ri);
- printk(KERN_WARNING "Error reading node from 0x%08x: %d\n", fd->raw->flash_offset & ~3, ret);
+ printk(KERN_WARNING "Error reading node from 0x%08x: %d\n", ref_offset(fd->raw), ret);
return ret;
}
if (readlen != sizeof(*ri)) {
jffs2_free_raw_inode(ri);
- printk(KERN_WARNING "Short read from 0x%08x: wanted 0x%x bytes, got 0x%x\n",
- fd->raw->flash_offset & ~3, sizeof(*ri), readlen);
+ printk(KERN_WARNING "Short read from 0x%08x: wanted 0x%zx bytes, got 0x%zx\n",
+ ref_offset(fd->raw), sizeof(*ri), readlen);
return -EIO;
}
crc = crc32(0, ri, sizeof(*ri)-8);
- D1(printk(KERN_DEBUG "Node read from %08x: node_crc %08x, calculated CRC %08x. dsize %x, csize %x, offset %x, buf %p\n", fd->raw->flash_offset & ~3, ri->node_crc, crc, ri->dsize, ri->csize, ri->offset, buf));
- if (crc != ri->node_crc) {
- printk(KERN_WARNING "Node CRC %08x != calculated CRC %08x for node at %08x\n", ri->node_crc, crc, fd->raw->flash_offset & ~3);
+ D1(printk(KERN_DEBUG "Node read from %08x: node_crc %08x, calculated CRC %08x. dsize %x, csize %x, offset %x, buf %p\n",
+ ref_offset(fd->raw), je32_to_cpu(ri->node_crc),
+ crc, je32_to_cpu(ri->dsize), je32_to_cpu(ri->csize),
+ je32_to_cpu(ri->offset), buf));
+ if (crc != je32_to_cpu(ri->node_crc)) {
+ printk(KERN_WARNING "Node CRC %08x != calculated CRC %08x for node at %08x\n",
+ je32_to_cpu(ri->node_crc), crc, ref_offset(fd->raw));
ret = -EIO;
goto out_ri;
}
/* There was a bug where we wrote hole nodes out with csize/dsize
swapped. Deal with it */
- if (ri->compr == JFFS2_COMPR_ZERO && !ri->dsize && ri->csize) {
+ if (ri->compr == JFFS2_COMPR_ZERO && !je32_to_cpu(ri->dsize) &&
+ je32_to_cpu(ri->csize)) {
ri->dsize = ri->csize;
- ri->csize = 0;
+ ri->csize = cpu_to_je32(0);
}
- D1(if(ofs + len > ri->dsize) {
- printk(KERN_WARNING "jffs2_read_dnode() asked for %d bytes at %d from %d-byte node\n", len, ofs, ri->dsize);
+ D1(if(ofs + len > je32_to_cpu(ri->dsize)) {
+ printk(KERN_WARNING "jffs2_read_dnode() asked for %d bytes at %d from %d-byte node\n",
+ len, ofs, je32_to_cpu(ri->dsize));
ret = -EINVAL;
goto out_ri;
});
-
+
if (ri->compr == JFFS2_COMPR_ZERO) {
memset(buf, 0, len);
goto out_ri;
@@ -96,22 +82,22 @@ int jffs2_read_dnode(struct jffs2_sb_info *c, struct jffs2_full_dnode *fd, unsig
/* Cases:
Reading whole node and it's uncompressed - read directly to buffer provided, check CRC.
- Reading whole node and it's compressed - read into comprbuf, check CRC and decompress to buffer provided
- Reading partial node and it's uncompressed - read into readbuf, check CRC, and copy
+ Reading whole node and it's compressed - read into comprbuf, check CRC and decompress to buffer provided
+ Reading partial node and it's uncompressed - read into readbuf, check CRC, and copy
Reading partial node and it's compressed - read into readbuf, check checksum, decompress to decomprbuf and copy
*/
- if (ri->compr == JFFS2_COMPR_NONE && len == ri->dsize) {
+ if (ri->compr == JFFS2_COMPR_NONE && len == je32_to_cpu(ri->dsize)) {
readbuf = buf;
} else {
- readbuf = kmalloc(ri->csize, GFP_KERNEL);
+ readbuf = kmalloc(je32_to_cpu(ri->csize), GFP_KERNEL);
if (!readbuf) {
ret = -ENOMEM;
goto out_ri;
}
}
if (ri->compr != JFFS2_COMPR_NONE) {
- if (len < ri->dsize) {
- decomprbuf = kmalloc(ri->dsize, GFP_KERNEL);
+ if (len < je32_to_cpu(ri->dsize)) {
+ decomprbuf = kmalloc(je32_to_cpu(ri->dsize), GFP_KERNEL);
if (!decomprbuf) {
ret = -ENOMEM;
goto out_readbuf;
@@ -123,31 +109,33 @@ int jffs2_read_dnode(struct jffs2_sb_info *c, struct jffs2_full_dnode *fd, unsig
decomprbuf = readbuf;
}
- D2(printk(KERN_DEBUG "Read %d bytes to %p\n", ri->csize, readbuf));
- ret = c->mtd->read(c->mtd, (fd->raw->flash_offset &~3) + sizeof(*ri), ri->csize, &readlen, readbuf);
+ D2(printk(KERN_DEBUG "Read %d bytes to %p\n", je32_to_cpu(ri->csize),
+ readbuf));
- if (!ret && readlen != ri->csize)
- ret = -EIO;
+ ret = jffs2_flash_read_safe(c, (ref_offset(fd->raw)) + sizeof(*ri),
+ je32_to_cpu(ri->csize), readbuf);
if (ret)
goto out_decomprbuf;
- crc = crc32(0, readbuf, ri->csize);
- if (crc != ri->data_crc) {
- printk(KERN_WARNING "Data CRC %08x != calculated CRC %08x for node at %08x\n", ri->data_crc, crc, fd->raw->flash_offset & ~3);
+ crc = crc32(0, readbuf, je32_to_cpu(ri->csize));
+ if (crc != je32_to_cpu(ri->data_crc)) {
+ printk(KERN_WARNING "Data CRC %08x != calculated CRC %08x for node at %08x\n",
+ je32_to_cpu(ri->data_crc), crc, ref_offset(fd->raw));
ret = -EIO;
goto out_decomprbuf;
}
D2(printk(KERN_DEBUG "Data CRC matches calculated CRC %08x\n", crc));
if (ri->compr != JFFS2_COMPR_NONE) {
- D2(printk(KERN_DEBUG "Decompress %d bytes from %p to %d bytes at %p\n", ri->csize, readbuf, ri->dsize, decomprbuf));
- ret = jffs2_decompress(ri->compr, readbuf, decomprbuf, ri->csize, ri->dsize);
+ D2(printk(KERN_DEBUG "Decompress %d bytes from %p to %d bytes at %p\n",
+ je32_to_cpu(ri->csize), readbuf, je32_to_cpu(ri->dsize), decomprbuf));
+ ret = jffs2_decompress(c, f, ri->compr | (ri->usercompr << 8), readbuf, decomprbuf, je32_to_cpu(ri->csize), je32_to_cpu(ri->dsize));
if (ret) {
printk(KERN_WARNING "Error: jffs2_decompress returned %d\n", ret);
goto out_decomprbuf;
}
}
- if (len < ri->dsize) {
+ if (len < je32_to_cpu(ri->dsize)) {
memcpy(buf, decomprbuf+ofs, len);
}
out_decomprbuf:
@@ -161,3 +149,65 @@ int jffs2_read_dnode(struct jffs2_sb_info *c, struct jffs2_full_dnode *fd, unsig
return ret;
}
+
+int jffs2_read_inode_range(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
+ unsigned char *buf, uint32_t offset, uint32_t len)
+{
+ uint32_t end = offset + len;
+ struct jffs2_node_frag *frag;
+ int ret;
+
+ D1(printk(KERN_DEBUG "jffs2_read_inode_range: ino #%u, range 0x%08x-0x%08x\n",
+ f->inocache->ino, offset, offset+len));
+
+ frag = jffs2_lookup_node_frag(&f->fragtree, offset);
+
+ /* XXX FIXME: Where a single physical node actually shows up in two
+ frags, we read it twice. Don't do that. */
+ /* Now we're pointing at the first frag which overlaps our page */
+ while(offset < end) {
+ D2(printk(KERN_DEBUG "jffs2_read_inode_range: offset %d, end %d\n", offset, end));
+ if (unlikely(!frag || frag->ofs > offset)) {
+ uint32_t holesize = end - offset;
+ if (frag) {
+ D1(printk(KERN_NOTICE "Eep. Hole in ino #%u fraglist. frag->ofs = 0x%08x, offset = 0x%08x\n", f->inocache->ino, frag->ofs, offset));
+ holesize = min(holesize, frag->ofs - offset);
+ }
+ D1(printk(KERN_DEBUG "Filling non-frag hole from %d-%d\n", offset, offset+holesize));
+ memset(buf, 0, holesize);
+ buf += holesize;
+ offset += holesize;
+ continue;
+ } else if (unlikely(!frag->node)) {
+ uint32_t holeend = min(end, frag->ofs + frag->size);
+ D1(printk(KERN_DEBUG "Filling frag hole from %d-%d (frag 0x%x 0x%x)\n", offset, holeend, frag->ofs, frag->ofs + frag->size));
+ memset(buf, 0, holeend - offset);
+ buf += holeend - offset;
+ offset = holeend;
+ frag = frag_next(frag);
+ continue;
+ } else {
+ uint32_t readlen;
+ uint32_t fragofs; /* offset within the frag to start reading */
+
+ fragofs = offset - frag->ofs;
+ readlen = min(frag->size - fragofs, end - offset);
+ D1(printk(KERN_DEBUG "Reading %d-%d from node at 0x%08x (%d)\n",
+ frag->ofs+fragofs, frag->ofs+fragofs+readlen,
+ ref_offset(frag->node->raw), ref_flags(frag->node->raw)));
+ ret = jffs2_read_dnode(c, f, frag->node, buf, fragofs + frag->ofs - frag->node->ofs, readlen);
+ D2(printk(KERN_DEBUG "node read done\n"));
+ if (ret) {
+ D1(printk(KERN_DEBUG"jffs2_read_inode_range error %d\n",ret));
+ memset(buf, 0, readlen);
+ return ret;
+ }
+ buf += readlen;
+ offset += readlen;
+ frag = frag_next(frag);
+ D2(printk(KERN_DEBUG "node read was OK. Looping\n"));
+ }
+ }
+ return 0;
+}
+
diff --git a/linux-2.4.x/fs/jffs2/readinode.c b/linux-2.4.x/fs/jffs2/readinode.c
index c2ddfdc..e8218ff 100644
--- a/linux-2.4.x/fs/jffs2/readinode.c
+++ b/linux-2.4.x/fs/jffs2/readinode.c
@@ -1,522 +1,966 @@
/*
* JFFS2 -- Journalling Flash File System, Version 2.
*
- * Copyright (C) 2001 Red Hat, Inc.
+ * Copyright (C) 2001-2003 Red Hat, Inc.
*
- * Created by David Woodhouse <dwmw2@cambridge.redhat.com>
+ * Created by David Woodhouse <dwmw2@infradead.org>
*
- * The original JFFS, from which the design for JFFS2 was derived,
- * was designed and implemented by Axis Communications AB.
+ * For licensing information, see the file 'LICENCE' in this directory.
*
- * The contents of this file are subject to the Red Hat eCos Public
- * License Version 1.1 (the "Licence"); you may not use this file
- * except in compliance with the Licence. You may obtain a copy of
- * the Licence at http://www.redhat.com/
- *
- * Software distributed under the Licence is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied.
- * See the Licence for the specific language governing rights and
- * limitations under the Licence.
- *
- * The Original Code is JFFS2 - Journalling Flash File System, version 2
- *
- * Alternatively, the contents of this file may be used under the
- * terms of the GNU General Public License version 2 (the "GPL"), in
- * which case the provisions of the GPL are applicable instead of the
- * above. If you wish to allow the use of your version of this file
- * only under the terms of the GPL and not to allow others to use your
- * version of this file under the RHEPL, indicate your decision by
- * deleting the provisions above and replace them with the notice and
- * other provisions required by the GPL. If you do not delete the
- * provisions above, a recipient may use your version of this file
- * under either the RHEPL or the GPL.
- *
- * $Id: readinode.c,v 1.58.2.6 2002/10/10 13:18:38 dwmw2 Exp $
+ * $Id: readinode.c,v 1.147 2006/04/09 22:01:36 dwmw2 Exp $
*
*/
-/* Given an inode, probably with existing list of fragments, add the new node
- * to the fragment list.
- */
#include <linux/kernel.h>
+#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/fs.h>
+#include <linux/crc32.h>
+#include <linux/pagemap.h>
#include <linux/mtd/mtd.h>
-#include <linux/jffs2.h>
+#include <linux/compiler.h>
#include "nodelist.h"
-#include "crc32.h"
-
-D1(void jffs2_print_frag_list(struct jffs2_inode_info *f)
+/*
+ * Put a new tmp_dnode_info into the temporaty RB-tree, keeping the list in
+ * order of increasing version.
+ */
+static void jffs2_add_tn_to_tree(struct jffs2_tmp_dnode_info *tn, struct rb_root *list)
{
- struct jffs2_node_frag *this = f->fraglist;
-
- while(this) {
- if (this->node)
- printk(KERN_DEBUG "frag %04x-%04x: 0x%08x on flash (*%p->%p)\n", this->ofs, this->ofs+this->size, this->node->raw->flash_offset &~3, this, this->next);
- else
- printk(KERN_DEBUG "frag %04x-%04x: hole (*%p->%p)\n", this->ofs, this->ofs+this->size, this, this->next);
- this = this->next;
+ struct rb_node **p = &list->rb_node;
+ struct rb_node * parent = NULL;
+ struct jffs2_tmp_dnode_info *this;
+
+ while (*p) {
+ parent = *p;
+ this = rb_entry(parent, struct jffs2_tmp_dnode_info, rb);
+
+ /* There may actually be a collision here, but it doesn't
+ actually matter. As long as the two nodes with the same
+ version are together, it's all fine. */
+ if (tn->version > this->version)
+ p = &(*p)->rb_left;
+ else
+ p = &(*p)->rb_right;
}
- if (f->metadata) {
- printk(KERN_DEBUG "metadata at 0x%08x\n", f->metadata->raw->flash_offset &~3);
- }
-})
+ rb_link_node(&tn->rb, parent, p);
+ rb_insert_color(&tn->rb, list);
+}
-int jffs2_add_full_dnode_to_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, struct jffs2_full_dnode *fn)
+static void jffs2_free_tmp_dnode_info_list(struct rb_root *list)
{
- int ret;
- D1(printk(KERN_DEBUG "jffs2_add_full_dnode_to_inode(ino #%u, f %p, fn %p)\n", f->inocache->ino, f, fn));
+ struct rb_node *this;
+ struct jffs2_tmp_dnode_info *tn;
- ret = jffs2_add_full_dnode_to_fraglist(c, &f->fraglist, fn);
+ this = list->rb_node;
- D2(jffs2_print_frag_list(f));
- return ret;
-}
+ /* Now at bottom of tree */
+ while (this) {
+ if (this->rb_left)
+ this = this->rb_left;
+ else if (this->rb_right)
+ this = this->rb_right;
+ else {
+ tn = rb_entry(this, struct jffs2_tmp_dnode_info, rb);
-static void jffs2_obsolete_node_frag(struct jffs2_sb_info *c, struct jffs2_node_frag *this)
-{
- if (this->node) {
- this->node->frags--;
- if (!this->node->frags) {
- /* The node has no valid frags left. It's totally obsoleted */
- D2(printk(KERN_DEBUG "Marking old node @0x%08x (0x%04x-0x%04x) obsolete\n",
- this->node->raw->flash_offset &~3, this->node->ofs, this->node->ofs+this->node->size));
- jffs2_mark_node_obsolete(c, this->node->raw);
- jffs2_free_full_dnode(this->node);
- } else {
- D2(printk(KERN_DEBUG "Not marking old node @0x%08x (0x%04x-0x%04x) obsolete. frags is %d\n",
- this->node->raw->flash_offset &~3, this->node->ofs, this->node->ofs+this->node->size,
- this->node->frags));
+ this = this->rb_parent;
+
+ jffs2_free_full_dnode(tn->fn);
+ jffs2_free_tmp_dnode_info(tn);
+
+ if (!this)
+ break;
+
+ if (this->rb_left == &tn->rb)
+ this->rb_left = NULL;
+ else if (this->rb_right == &tn->rb)
+ this->rb_right = NULL;
+ else BUG();
}
-
}
- jffs2_free_node_frag(this);
+ list->rb_node = NULL;
}
-/* Doesn't set inode->i_size */
-int jffs2_add_full_dnode_to_fraglist(struct jffs2_sb_info *c, struct jffs2_node_frag **list, struct jffs2_full_dnode *fn)
+static void jffs2_free_full_dirent_list(struct jffs2_full_dirent *fd)
{
-
- struct jffs2_node_frag *this, **prev, *old;
- struct jffs2_node_frag *newfrag, *newfrag2;
- __u32 lastend = 0;
+ struct jffs2_full_dirent *next;
-
- newfrag = jffs2_alloc_node_frag();
- if (!newfrag) {
- return -ENOMEM;
+ while (fd) {
+ next = fd->next;
+ jffs2_free_full_dirent(fd);
+ fd = next;
}
+}
- D2(if (fn->raw)
- printk(KERN_DEBUG "adding node %04x-%04x @0x%08x on flash, newfrag *%p\n", fn->ofs, fn->ofs+fn->size, fn->raw->flash_offset &~3, newfrag);
- else
- printk(KERN_DEBUG "adding hole node %04x-%04x on flash, newfrag *%p\n", fn->ofs, fn->ofs+fn->size, newfrag));
-
- prev = list;
- this = *list;
+/* Returns first valid node after 'ref'. May return 'ref' */
+static struct jffs2_raw_node_ref *jffs2_first_valid_node(struct jffs2_raw_node_ref *ref)
+{
+ while (ref && ref->next_in_ino) {
+ if (!ref_obsolete(ref))
+ return ref;
+ dbg_noderef("node at 0x%08x is obsoleted. Ignoring.\n", ref_offset(ref));
+ ref = ref->next_in_ino;
+ }
+ return NULL;
+}
- if (!fn->size) {
- jffs2_free_node_frag(newfrag);
- return 0;
+/*
+ * Helper function for jffs2_get_inode_nodes().
+ * It is called every time an directory entry node is found.
+ *
+ * Returns: 0 on succes;
+ * 1 if the node should be marked obsolete;
+ * negative error code on failure.
+ */
+static inline int read_direntry(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref,
+ struct jffs2_raw_dirent *rd, uint32_t read, struct jffs2_full_dirent **fdp,
+ uint32_t *latest_mctime, uint32_t *mctime_ver)
+{
+ struct jffs2_full_dirent *fd;
+
+ /* The direntry nodes are checked during the flash scanning */
+ BUG_ON(ref_flags(ref) == REF_UNCHECKED);
+ /* Obsoleted. This cannot happen, surely? dwmw2 20020308 */
+ BUG_ON(ref_obsolete(ref));
+
+ /* Sanity check */
+ if (unlikely(PAD((rd->nsize + sizeof(*rd))) != PAD(je32_to_cpu(rd->totlen)))) {
+ JFFS2_ERROR("illegal nsize in node at %#08x: nsize %#02x, totlen %#04x\n",
+ ref_offset(ref), rd->nsize, je32_to_cpu(rd->totlen));
+ return 1;
}
- newfrag->ofs = fn->ofs;
- newfrag->size = fn->size;
- newfrag->node = fn;
- newfrag->node->frags = 1;
- newfrag->next = (void *)0xdeadbeef;
+ fd = jffs2_alloc_full_dirent(rd->nsize + 1);
+ if (unlikely(!fd))
+ return -ENOMEM;
- /* Skip all the nodes which are completed before this one starts */
- while(this && fn->ofs >= this->ofs+this->size) {
- lastend = this->ofs + this->size;
+ fd->raw = ref;
+ fd->version = je32_to_cpu(rd->version);
+ fd->ino = je32_to_cpu(rd->ino);
+ fd->type = rd->type;
- D2(printk(KERN_DEBUG "j_a_f_d_t_f: skipping frag 0x%04x-0x%04x; phys 0x%08x (*%p->%p)\n",
- this->ofs, this->ofs+this->size, this->node?(this->node->raw->flash_offset &~3):0xffffffff, this, this->next));
- prev = &this->next;
- this = this->next;
+ /* Pick out the mctime of the latest dirent */
+ if(fd->version > *mctime_ver && je32_to_cpu(rd->mctime)) {
+ *mctime_ver = fd->version;
+ *latest_mctime = je32_to_cpu(rd->mctime);
}
- /* See if we ran off the end of the list */
- if (!this) {
- /* We did */
- if (lastend < fn->ofs) {
- /* ... and we need to put a hole in before the new node */
- struct jffs2_node_frag *holefrag = jffs2_alloc_node_frag();
- if (!holefrag)
- return -ENOMEM;
- holefrag->ofs = lastend;
- holefrag->size = fn->ofs - lastend;
- holefrag->next = NULL;
- holefrag->node = NULL;
- *prev = holefrag;
- prev = &holefrag->next;
+ /*
+ * Copy as much of the name as possible from the raw
+ * dirent we've already read from the flash.
+ */
+ if (read > sizeof(*rd))
+ memcpy(&fd->name[0], &rd->name[0],
+ min_t(uint32_t, rd->nsize, (read - sizeof(*rd)) ));
+
+ /* Do we need to copy any more of the name directly from the flash? */
+ if (rd->nsize + sizeof(*rd) > read) {
+ /* FIXME: point() */
+ int err;
+ int already = read - sizeof(*rd);
+
+ err = jffs2_flash_read(c, (ref_offset(ref)) + read,
+ rd->nsize - already, &read, &fd->name[already]);
+ if (unlikely(read != rd->nsize - already) && likely(!err))
+ return -EIO;
+
+ if (unlikely(err)) {
+ JFFS2_ERROR("read remainder of name: error %d\n", err);
+ jffs2_free_full_dirent(fd);
+ return -EIO;
}
- newfrag->next = NULL;
- *prev = newfrag;
- return 0;
}
- D2(printk(KERN_DEBUG "j_a_f_d_t_f: dealing with frag 0x%04x-0x%04x; phys 0x%08x (*%p->%p)\n",
- this->ofs, this->ofs+this->size, this->node?(this->node->raw->flash_offset &~3):0xffffffff, this, this->next));
+ fd->nhash = full_name_hash(fd->name, rd->nsize);
+ fd->next = NULL;
+ fd->name[rd->nsize] = '\0';
- /* OK. 'this' is pointing at the first frag that fn->ofs at least partially obsoletes,
- * - i.e. fn->ofs < this->ofs+this->size && fn->ofs >= this->ofs
+ /*
+ * Wheee. We now have a complete jffs2_full_dirent structure, with
+ * the name in it and everything. Link it into the list
*/
- if (fn->ofs > this->ofs) {
- /* This node isn't completely obsoleted. The start of it remains valid */
- if (this->ofs + this->size > fn->ofs + fn->size) {
- /* The new node splits 'this' frag into two */
- newfrag2 = jffs2_alloc_node_frag();
- if (!newfrag2) {
- jffs2_free_node_frag(newfrag);
- return -ENOMEM;
+ jffs2_add_fd_to_list(c, fd, fdp);
+
+ return 0;
+}
+
+/*
+ * Helper function for jffs2_get_inode_nodes().
+ * It is called every time an inode node is found.
+ *
+ * Returns: 0 on succes;
+ * 1 if the node should be marked obsolete;
+ * negative error code on failure.
+ */
+static inline int read_dnode(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref,
+ struct jffs2_raw_inode *rd, struct rb_root *tnp, int rdlen,
+ uint32_t *latest_mctime, uint32_t *mctime_ver)
+{
+ struct jffs2_tmp_dnode_info *tn;
+ uint32_t len, csize;
+ int ret = 1;
+
+ /* Obsoleted. This cannot happen, surely? dwmw2 20020308 */
+ BUG_ON(ref_obsolete(ref));
+
+ tn = jffs2_alloc_tmp_dnode_info();
+ if (!tn) {
+ JFFS2_ERROR("failed to allocate tn (%d bytes).\n", sizeof(*tn));
+ return -ENOMEM;
+ }
+
+ tn->partial_crc = 0;
+ csize = je32_to_cpu(rd->csize);
+
+ /* If we've never checked the CRCs on this node, check them now */
+ if (ref_flags(ref) == REF_UNCHECKED) {
+ uint32_t crc;
+
+ crc = crc32(0, rd, sizeof(*rd) - 8);
+ if (unlikely(crc != je32_to_cpu(rd->node_crc))) {
+ JFFS2_NOTICE("header CRC failed on node at %#08x: read %#08x, calculated %#08x\n",
+ ref_offset(ref), je32_to_cpu(rd->node_crc), crc);
+ goto free_out;
+ }
+
+ /* Sanity checks */
+ if (unlikely(je32_to_cpu(rd->offset) > je32_to_cpu(rd->isize)) ||
+ unlikely(PAD(je32_to_cpu(rd->csize) + sizeof(*rd)) != PAD(je32_to_cpu(rd->totlen)))) {
+ JFFS2_WARNING("inode node header CRC is corrupted at %#08x\n", ref_offset(ref));
+ jffs2_dbg_dump_node(c, ref_offset(ref));
+ goto free_out;
+ }
+
+ if (jffs2_is_writebuffered(c) && csize != 0) {
+ /* At this point we are supposed to check the data CRC
+ * of our unchecked node. But thus far, we do not
+ * know whether the node is valid or obsolete. To
+ * figure this out, we need to walk all the nodes of
+ * the inode and build the inode fragtree. We don't
+ * want to spend time checking data of nodes which may
+ * later be found to be obsolete. So we put off the full
+ * data CRC checking until we have read all the inode
+ * nodes and have started building the fragtree.
+ *
+ * The fragtree is being built starting with nodes
+ * having the highest version number, so we'll be able
+ * to detect whether a node is valid (i.e., it is not
+ * overlapped by a node with higher version) or not.
+ * And we'll be able to check only those nodes, which
+ * are not obsolete.
+ *
+ * Of course, this optimization only makes sense in case
+ * of NAND flashes (or other flashes whith
+ * !jffs2_can_mark_obsolete()), since on NOR flashes
+ * nodes are marked obsolete physically.
+ *
+ * Since NAND flashes (or other flashes with
+ * jffs2_is_writebuffered(c)) are anyway read by
+ * fractions of c->wbuf_pagesize, and we have just read
+ * the node header, it is likely that the starting part
+ * of the node data is also read when we read the
+ * header. So we don't mind to check the CRC of the
+ * starting part of the data of the node now, and check
+ * the second part later (in jffs2_check_node_data()).
+ * Of course, we will not need to re-read and re-check
+ * the NAND page which we have just read. This is why we
+ * read the whole NAND page at jffs2_get_inode_nodes(),
+ * while we needed only the node header.
+ */
+ unsigned char *buf;
+
+ /* 'buf' will point to the start of data */
+ buf = (unsigned char *)rd + sizeof(*rd);
+ /* len will be the read data length */
+ len = min_t(uint32_t, rdlen - sizeof(*rd), csize);
+ tn->partial_crc = crc32(0, buf, len);
+
+ dbg_readinode("Calculates CRC (%#08x) for %d bytes, csize %d\n", tn->partial_crc, len, csize);
+
+ /* If we actually calculated the whole data CRC
+ * and it is wrong, drop the node. */
+ if (len >= csize && unlikely(tn->partial_crc != je32_to_cpu(rd->data_crc))) {
+ JFFS2_NOTICE("wrong data CRC in data node at 0x%08x: read %#08x, calculated %#08x.\n",
+ ref_offset(ref), tn->partial_crc, je32_to_cpu(rd->data_crc));
+ goto free_out;
}
- D1(printk(KERN_DEBUG "split old frag 0x%04x-0x%04x -->", this->ofs, this->ofs+this->size);
- if (this->node)
- printk("phys 0x%08x\n", this->node->raw->flash_offset &~3);
- else
- printk("hole\n");
- )
- newfrag2->ofs = fn->ofs + fn->size;
- newfrag2->size = (this->ofs+this->size) - newfrag2->ofs;
- newfrag2->next = this->next;
- newfrag2->node = this->node;
- if (this->node)
- this->node->frags++;
- newfrag->next = newfrag2;
- this->next = newfrag;
- this->size = newfrag->ofs - this->ofs;
- return 0;
+
+ } else if (csize == 0) {
+ /*
+ * We checked the header CRC. If the node has no data, adjust
+ * the space accounting now. For other nodes this will be done
+ * later either when the node is marked obsolete or when its
+ * data is checked.
+ */
+ struct jffs2_eraseblock *jeb;
+
+ dbg_readinode("the node has no data.\n");
+ jeb = c->blocks[ref->flash_offset / c->sector_size];
+ len = ref_totlen(c, jeb, ref);
+
+ spin_lock(&c->erase_completion_lock);
+ jeb->used_size += len;
+ jeb->unchecked_size -= len;
+ c->used_size += len;
+ c->unchecked_size -= len;
+ ref->flash_offset = ref_offset(ref) | REF_NORMAL;
+ spin_unlock(&c->erase_completion_lock);
}
- /* New node just reduces 'this' frag in size, doesn't split it */
- this->size = fn->ofs - this->ofs;
- newfrag->next = this->next;
- this->next = newfrag;
- this = newfrag->next;
- } else {
- D2(printk(KERN_DEBUG "Inserting newfrag (*%p) in before 'this' (*%p)\n", newfrag, this));
- *prev = newfrag;
- newfrag->next = this;
- }
- /* OK, now we have newfrag added in the correct place in the list, but
- newfrag->next points to a fragment which may be overlapping it
- */
- while (this && newfrag->ofs + newfrag->size >= this->ofs + this->size) {
- /* 'this' frag is obsoleted. */
- old = this;
- this = old->next;
- jffs2_obsolete_node_frag(c, old);
}
- /* Now we're pointing at the first frag which isn't totally obsoleted by
- the new frag */
- newfrag->next = this;
- if (!this || newfrag->ofs + newfrag->size == this->ofs) {
- return 0;
+ tn->fn = jffs2_alloc_full_dnode();
+ if (!tn->fn) {
+ JFFS2_ERROR("alloc fn failed\n");
+ ret = -ENOMEM;
+ goto free_out;
}
- /* Still some overlap */
- this->size = (this->ofs + this->size) - (newfrag->ofs + newfrag->size);
- this->ofs = newfrag->ofs + newfrag->size;
+
+ tn->version = je32_to_cpu(rd->version);
+ tn->fn->ofs = je32_to_cpu(rd->offset);
+ tn->data_crc = je32_to_cpu(rd->data_crc);
+ tn->csize = csize;
+ tn->fn->raw = ref;
+
+ /* There was a bug where we wrote hole nodes out with
+ csize/dsize swapped. Deal with it */
+ if (rd->compr == JFFS2_COMPR_ZERO && !je32_to_cpu(rd->dsize) && csize)
+ tn->fn->size = csize;
+ else // normal case...
+ tn->fn->size = je32_to_cpu(rd->dsize);
+
+ dbg_readinode("dnode @%08x: ver %u, offset %#04x, dsize %#04x, csize %#04x\n",
+ ref_offset(ref), je32_to_cpu(rd->version), je32_to_cpu(rd->offset), je32_to_cpu(rd->dsize), csize);
+
+ jffs2_add_tn_to_tree(tn, tnp);
+
return 0;
+
+free_out:
+ jffs2_free_tmp_dnode_info(tn);
+ return ret;
}
-void jffs2_truncate_fraglist (struct jffs2_sb_info *c, struct jffs2_node_frag **list, __u32 size)
+/*
+ * Helper function for jffs2_get_inode_nodes().
+ * It is called every time an unknown node is found.
+ *
+ * Returns: 0 on succes;
+ * 1 if the node should be marked obsolete;
+ * negative error code on failure.
+ */
+static inline int read_unknown(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref, struct jffs2_unknown_node *un)
{
- D1(printk(KERN_DEBUG "Truncating fraglist to 0x%08x bytes\n", size));
-
- while (*list) {
- if ((*list)->ofs >= size) {
- struct jffs2_node_frag *this = *list;
- *list = this->next;
- D1(printk(KERN_DEBUG "Removing frag 0x%08x-0x%08x\n", this->ofs, this->ofs+this->size));
- jffs2_obsolete_node_frag(c, this);
- continue;
- } else if ((*list)->ofs + (*list)->size > size) {
- D1(printk(KERN_DEBUG "Truncating frag 0x%08x-0x%08x\n", (*list)->ofs, (*list)->ofs + (*list)->size));
- (*list)->size = size - (*list)->ofs;
+ /* We don't mark unknown nodes as REF_UNCHECKED */
+ BUG_ON(ref_flags(ref) == REF_UNCHECKED);
+
+ un->nodetype = cpu_to_je16(JFFS2_NODE_ACCURATE | je16_to_cpu(un->nodetype));
+
+ if (crc32(0, un, sizeof(struct jffs2_unknown_node) - 4) != je32_to_cpu(un->hdr_crc)) {
+ /* Hmmm. This should have been caught at scan time. */
+ JFFS2_NOTICE("node header CRC failed at %#08x. But it must have been OK earlier.\n", ref_offset(ref));
+ jffs2_dbg_dump_node(c, ref_offset(ref));
+ return 1;
+ } else {
+ switch(je16_to_cpu(un->nodetype) & JFFS2_COMPAT_MASK) {
+
+ case JFFS2_FEATURE_INCOMPAT:
+ JFFS2_ERROR("unknown INCOMPAT nodetype %#04X at %#08x\n",
+ je16_to_cpu(un->nodetype), ref_offset(ref));
+ /* EEP */
+ BUG();
+ break;
+
+ case JFFS2_FEATURE_ROCOMPAT:
+ JFFS2_ERROR("unknown ROCOMPAT nodetype %#04X at %#08x\n",
+ je16_to_cpu(un->nodetype), ref_offset(ref));
+ BUG_ON(!(c->flags & JFFS2_SB_FLAG_RO));
+ break;
+
+ case JFFS2_FEATURE_RWCOMPAT_COPY:
+ JFFS2_NOTICE("unknown RWCOMPAT_COPY nodetype %#04X at %#08x\n",
+ je16_to_cpu(un->nodetype), ref_offset(ref));
+ break;
+
+ case JFFS2_FEATURE_RWCOMPAT_DELETE:
+ JFFS2_NOTICE("unknown RWCOMPAT_DELETE nodetype %#04X at %#08x\n",
+ je16_to_cpu(un->nodetype), ref_offset(ref));
+ return 1;
}
- list = &(*list)->next;
}
+
+ return 0;
}
-/* Scan the list of all nodes present for this ino, build map of versions, etc. */
+/*
+ * Helper function for jffs2_get_inode_nodes().
+ * The function detects whether more data should be read and reads it if yes.
+ *
+ * Returns: 0 on succes;
+ * negative error code on failure.
+ */
+static int read_more(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref,
+ int right_size, int *rdlen, unsigned char *buf, unsigned char *bufstart)
+{
+ int right_len, len;
+ uint32_t offs;
+
+ if (jffs2_is_writebuffered(c)) {
+ right_len = c->wbuf_pagesize - (bufstart - buf);
+ if (right_size + (int)(bufstart - buf) > c->wbuf_pagesize)
+ right_len += c->wbuf_pagesize;
+ } else
+ right_len = right_size;
+
+ if (*rdlen == right_len)
+ return 0;
+
+ /* We need to read more data */
+ offs = ref_offset(ref) + *rdlen;
+ if (jffs2_is_writebuffered(c)) {
+ bufstart = buf + c->wbuf_pagesize;
+ len = c->wbuf_pagesize;
+ } else {
+ bufstart = buf + *rdlen;
+ len = right_size - *rdlen;
+ }
+
+ dbg_readinode("read more %d bytes\n", len);
+
+ if (jffs2_flash_read_safe(c, offs, len, bufstart))
+ return -EIO;
-void jffs2_read_inode (struct inode *inode)
+ *rdlen = right_len;
+
+ return 0;
+}
+
+/* Get tmp_dnode_info and full_dirent for all non-obsolete nodes associated
+ with this ino, returning the former in order of version */
+static int jffs2_get_inode_nodes(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
+ struct rb_root *tnp, struct jffs2_full_dirent **fdp,
+ uint32_t *highest_version, uint32_t *latest_mctime,
+ uint32_t *mctime_ver)
{
- struct jffs2_tmp_dnode_info *tn_list, *tn;
- struct jffs2_full_dirent *fd_list;
- struct jffs2_inode_info *f;
- struct jffs2_full_dnode *fn = NULL;
- struct jffs2_sb_info *c;
- struct jffs2_raw_inode latest_node;
- __u32 latest_mctime, mctime_ver;
- __u32 mdata_ver = 0;
- int ret;
- ssize_t retlen;
+ struct jffs2_raw_node_ref *ref, *valid_ref;
+ struct rb_root ret_tn = RB_ROOT;
+ struct jffs2_full_dirent *ret_fd = NULL;
+ unsigned char *buf = NULL;
+ union jffs2_node_union *node;
+ size_t retlen;
+ int len, err;
+
+ *mctime_ver = 0;
+
+ dbg_readinode("ino #%u\n", f->inocache->ino);
+
+ if (jffs2_is_writebuffered(c)) {
+ /*
+ * If we have the write buffer, we assume the minimal I/O unit
+ * is c->wbuf_pagesize. We implement some optimizations which in
+ * this case and we need a temporary buffer of size =
+ * 2*c->wbuf_pagesize bytes (see comments in read_dnode()).
+ * Basically, we want to read not only the node header, but the
+ * whole wbuf (NAND page in case of NAND) or 2, if the node
+ * header overlaps the border between the 2 wbufs.
+ */
+ len = 2*c->wbuf_pagesize;
+ } else {
+ /*
+ * When there is no write buffer, the size of the temporary
+ * buffer is the size of the larges node header.
+ */
+ len = sizeof(union jffs2_node_union);
+ }
- D1(printk(KERN_DEBUG "jffs2_read_inode(): inode->i_ino == %lu\n", inode->i_ino));
+ /* FIXME: in case of NOR and available ->point() this
+ * needs to be fixed. */
+ buf = kmalloc(len, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
- f = JFFS2_INODE_INFO(inode);
- c = JFFS2_SB_INFO(inode->i_sb);
+ spin_lock(&c->erase_completion_lock);
+ valid_ref = jffs2_first_valid_node(f->inocache->nodes);
+ if (!valid_ref && f->inocache->ino != 1)
+ JFFS2_WARNING("Eep. No valid nodes for ino #%u.\n", f->inocache->ino);
+ while (valid_ref) {
+ unsigned char *bufstart;
+
+ /* We can hold a pointer to a non-obsolete node without the spinlock,
+ but _obsolete_ nodes may disappear at any time, if the block
+ they're in gets erased. So if we mark 'ref' obsolete while we're
+ not holding the lock, it can go away immediately. For that reason,
+ we find the next valid node first, before processing 'ref'.
+ */
+ ref = valid_ref;
+ valid_ref = jffs2_first_valid_node(ref->next_in_ino);
+ spin_unlock(&c->erase_completion_lock);
+
+ cond_resched();
+
+ /*
+ * At this point we don't know the type of the node we're going
+ * to read, so we do not know the size of its header. In order
+ * to minimize the amount of flash IO we assume the node has
+ * size = JFFS2_MIN_NODE_HEADER.
+ */
+ if (jffs2_is_writebuffered(c)) {
+ /*
+ * We treat 'buf' as 2 adjacent wbufs. We want to
+ * adjust bufstart such as it points to the
+ * beginning of the node within this wbuf.
+ */
+ bufstart = buf + (ref_offset(ref) % c->wbuf_pagesize);
+ /* We will read either one wbuf or 2 wbufs. */
+ len = c->wbuf_pagesize - (bufstart - buf);
+ if (JFFS2_MIN_NODE_HEADER + (int)(bufstart - buf) > c->wbuf_pagesize) {
+ /* The header spans the border of the first wbuf */
+ len += c->wbuf_pagesize;
+ }
+ } else {
+ bufstart = buf;
+ len = JFFS2_MIN_NODE_HEADER;
+ }
- memset(f, 0, sizeof(*f));
- D2(printk(KERN_DEBUG "getting inocache\n"));
- init_MUTEX(&f->sem);
- f->inocache = jffs2_get_ino_cache(c, inode->i_ino);
- D2(printk(KERN_DEBUG "jffs2_read_inode(): Got inocache at %p\n", f->inocache));
+ dbg_readinode("read %d bytes at %#08x(%d).\n", len, ref_offset(ref), ref_flags(ref));
- if (!f->inocache && inode->i_ino == 1) {
- /* Special case - no root inode on medium */
- f->inocache = jffs2_alloc_inode_cache();
- if (!f->inocache) {
- printk(KERN_CRIT "jffs2_read_inode(): Cannot allocate inocache for root inode\n");
- make_bad_inode(inode);
- return;
+ /* FIXME: point() */
+ err = jffs2_flash_read(c, ref_offset(ref), len,
+ &retlen, bufstart);
+ if (err) {
+ JFFS2_ERROR("can not read %d bytes from 0x%08x, " "error code: %d.\n", len, ref_offset(ref), err);
+ goto free_out;
}
- D1(printk(KERN_DEBUG "jffs2_read_inode(): Creating inocache for root inode\n"));
- memset(f->inocache, 0, sizeof(struct jffs2_inode_cache));
- f->inocache->ino = f->inocache->nlink = 1;
- f->inocache->nodes = (struct jffs2_raw_node_ref *)f->inocache;
- jffs2_add_ino_cache(c, f->inocache);
- }
- if (!f->inocache) {
- printk(KERN_WARNING "jffs2_read_inode() on nonexistent ino %lu\n", (unsigned long)inode->i_ino);
- make_bad_inode(inode);
- return;
+
+ if (retlen < len) {
+ JFFS2_ERROR("short read at %#08x: %d instead of %d.\n", ref_offset(ref), retlen, len);
+ err = -EIO;
+ goto free_out;
+ }
+
+ node = (union jffs2_node_union *)bufstart;
+
+ switch (je16_to_cpu(node->u.nodetype)) {
+
+ case JFFS2_NODETYPE_DIRENT:
+
+ if (JFFS2_MIN_NODE_HEADER < sizeof(struct jffs2_raw_dirent)) {
+ err = read_more(c, ref, sizeof(struct jffs2_raw_dirent), &len, buf, bufstart);
+ if (unlikely(err))
+ goto free_out;
+ }
+
+ err = read_direntry(c, ref, &node->d, retlen, &ret_fd, latest_mctime, mctime_ver);
+ if (err == 1) {
+ jffs2_mark_node_obsolete(c, ref);
+ break;
+ } else if (unlikely(err))
+ goto free_out;
+
+ if (je32_to_cpu(node->d.version) > *highest_version)
+ *highest_version = je32_to_cpu(node->d.version);
+
+ break;
+
+ case JFFS2_NODETYPE_INODE:
+
+ if (JFFS2_MIN_NODE_HEADER < sizeof(struct jffs2_raw_inode)) {
+ err = read_more(c, ref, sizeof(struct jffs2_raw_inode), &len, buf, bufstart);
+ if (unlikely(err))
+ goto free_out;
+ }
+
+ err = read_dnode(c, ref, &node->i, &ret_tn, len, latest_mctime, mctime_ver);
+ if (err == 1) {
+ jffs2_mark_node_obsolete(c, ref);
+ break;
+ } else if (unlikely(err))
+ goto free_out;
+
+ if (je32_to_cpu(node->i.version) > *highest_version)
+ *highest_version = je32_to_cpu(node->i.version);
+
+ break;
+
+ default:
+ if (JFFS2_MIN_NODE_HEADER < sizeof(struct jffs2_unknown_node)) {
+ err = read_more(c, ref, sizeof(struct jffs2_unknown_node), &len, buf, bufstart);
+ if (unlikely(err))
+ goto free_out;
+ }
+
+ err = read_unknown(c, ref, &node->u);
+ if (err == 1) {
+ jffs2_mark_node_obsolete(c, ref);
+ break;
+ } else if (unlikely(err))
+ goto free_out;
+
+ }
+ spin_lock(&c->erase_completion_lock);
}
- D1(printk(KERN_DEBUG "jffs2_read_inode(): ino #%lu nlink is %d\n", (unsigned long)inode->i_ino, f->inocache->nlink));
- inode->i_nlink = f->inocache->nlink;
+
+ spin_unlock(&c->erase_completion_lock);
+ *tnp = ret_tn;
+ *fdp = ret_fd;
+ kfree(buf);
+
+ dbg_readinode("nodes of inode #%u were read, the highest version is %u, latest_mctime %u, mctime_ver %u.\n",
+ f->inocache->ino, *highest_version, *latest_mctime, *mctime_ver);
+ return 0;
+
+ free_out:
+ jffs2_free_tmp_dnode_info_list(&ret_tn);
+ jffs2_free_full_dirent_list(ret_fd);
+ kfree(buf);
+ return err;
+}
+
+static int jffs2_do_read_inode_internal(struct jffs2_sb_info *c,
+ struct jffs2_inode_info *f,
+ struct jffs2_raw_inode *latest_node)
+{
+ struct jffs2_tmp_dnode_info *tn;
+ struct rb_root tn_list;
+ struct rb_node *rb, *repl_rb;
+ struct jffs2_full_dirent *fd_list;
+ struct jffs2_full_dnode *fn, *first_fn = NULL;
+ uint32_t crc;
+ uint32_t latest_mctime, mctime_ver;
+ size_t retlen;
+ int ret;
+
+ dbg_readinode("ino #%u nlink is %d\n", f->inocache->ino, f->inocache->nlink);
/* Grab all nodes relevant to this ino */
- ret = jffs2_get_inode_nodes(c, inode->i_ino, f, &tn_list, &fd_list, &f->highest_version, &latest_mctime, &mctime_ver);
+ ret = jffs2_get_inode_nodes(c, f, &tn_list, &fd_list, &f->highest_version, &latest_mctime, &mctime_ver);
if (ret) {
- printk(KERN_CRIT "jffs2_get_inode_nodes() for ino %lu returned %d\n", inode->i_ino, ret);
- make_bad_inode(inode);
- return;
+ JFFS2_ERROR("cannot read nodes for ino %u, returned error is %d\n", f->inocache->ino, ret);
+ if (f->inocache->state == INO_STATE_READING)
+ jffs2_set_inocache_state(c, f->inocache, INO_STATE_CHECKEDABSENT);
+ return ret;
}
f->dents = fd_list;
- while (tn_list) {
- tn = tn_list;
+ rb = rb_first(&tn_list);
+ while (rb) {
+ cond_resched();
+ tn = rb_entry(rb, struct jffs2_tmp_dnode_info, rb);
fn = tn->fn;
-
- if (f->metadata && tn->version > mdata_ver) {
- D1(printk(KERN_DEBUG "Obsoleting old metadata at 0x%08x\n", f->metadata->raw->flash_offset &~3));
- jffs2_mark_node_obsolete(c, f->metadata->raw);
- jffs2_free_full_dnode(f->metadata);
- f->metadata = NULL;
-
- mdata_ver = 0;
- }
+ ret = 1;
+ dbg_readinode("consider node ver %u, phys offset "
+ "%#08x(%d), range %u-%u.\n", tn->version,
+ ref_offset(fn->raw), ref_flags(fn->raw),
+ fn->ofs, fn->ofs + fn->size);
if (fn->size) {
- jffs2_add_full_dnode_to_inode(c, f, fn);
- } else {
- /* Zero-sized node at end of version list. Just a metadata update */
- D1(printk(KERN_DEBUG "metadata @%08x: ver %d\n", fn->raw->flash_offset &~3, tn->version));
+ ret = jffs2_add_older_frag_to_fragtree(c, f, tn);
+ /* TODO: the error code isn't checked, check it */
+ jffs2_dbg_fragtree_paranoia_check_nolock(f);
+ BUG_ON(ret < 0);
+ if (!first_fn && ret == 0)
+ first_fn = fn;
+ } else if (!first_fn) {
+ first_fn = fn;
f->metadata = fn;
- mdata_ver = tn->version;
- }
- tn_list = tn->next;
+ ret = 0; /* Prevent freeing the metadata update node */
+ } else
+ jffs2_mark_node_obsolete(c, fn->raw);
+
+ BUG_ON(rb->rb_left);
+ if (rb->rb_parent && rb->rb_parent->rb_left == rb) {
+ /* We were then left-hand child of our parent. We need
+ * to move our own right-hand child into our place. */
+ repl_rb = rb->rb_right;
+ if (repl_rb)
+ repl_rb->rb_parent = rb->rb_parent;
+ } else
+ repl_rb = NULL;
+
+ rb = rb_next(rb);
+
+ /* Remove the spent tn from the tree; don't bother rebalancing
+ * but put our right-hand child in our own place. */
+ if (tn->rb.rb_parent) {
+ if (tn->rb.rb_parent->rb_left == &tn->rb)
+ tn->rb.rb_parent->rb_left = repl_rb;
+ else if (tn->rb.rb_parent->rb_right == &tn->rb)
+ tn->rb.rb_parent->rb_right = repl_rb;
+ else BUG();
+ } else if (tn->rb.rb_right)
+ tn->rb.rb_right->rb_parent = NULL;
+
jffs2_free_tmp_dnode_info(tn);
+ if (ret) {
+ dbg_readinode("delete dnode %u-%u.\n",
+ fn->ofs, fn->ofs + fn->size);
+ jffs2_free_full_dnode(fn);
+ }
}
- if (!fn) {
+ jffs2_dbg_fragtree_paranoia_check_nolock(f);
+
+ BUG_ON(first_fn && ref_obsolete(first_fn->raw));
+
+ fn = first_fn;
+ if (unlikely(!first_fn)) {
/* No data nodes for this inode. */
- if (inode->i_ino != 1) {
- printk(KERN_WARNING "jffs2_read_inode(): No data nodes found for ino #%lu\n", inode->i_ino);
+ if (f->inocache->ino != 1) {
+ JFFS2_WARNING("no data nodes found for ino #%u\n", f->inocache->ino);
if (!fd_list) {
- make_bad_inode(inode);
- return;
+ if (f->inocache->state == INO_STATE_READING)
+ jffs2_set_inocache_state(c, f->inocache, INO_STATE_CHECKEDABSENT);
+ return -EIO;
}
- printk(KERN_WARNING "jffs2_read_inode(): But it has children so we fake some modes for it\n");
- }
- inode->i_mode = S_IFDIR | S_IRUGO | S_IWUSR | S_IXUGO;
- latest_node.version = 0;
- inode->i_atime = inode->i_ctime = inode->i_mtime = CURRENT_TIME;
- inode->i_nlink = f->inocache->nlink;
- inode->i_size = 0;
- } else {
- __u32 crc;
-
- ret = c->mtd->read(c->mtd, fn->raw->flash_offset & ~3, sizeof(latest_node), &retlen, (void *)&latest_node);
- if (ret || retlen != sizeof(latest_node)) {
- printk(KERN_NOTICE "MTD read in jffs2_read_inode() failed: Returned %d, %ld of %d bytes read\n",
- ret, (long)retlen, sizeof(latest_node));
- jffs2_clear_inode(inode);
- make_bad_inode(inode);
- return;
+ JFFS2_NOTICE("but it has children so we fake some modes for it\n");
}
+ latest_node->mode = cpu_to_jemode(S_IFDIR|S_IRUGO|S_IWUSR|S_IXUGO);
+ latest_node->version = cpu_to_je32(0);
+ latest_node->atime = latest_node->ctime = latest_node->mtime = cpu_to_je32(0);
+ latest_node->isize = cpu_to_je32(0);
+ latest_node->gid = cpu_to_je16(0);
+ latest_node->uid = cpu_to_je16(0);
+ if (f->inocache->state == INO_STATE_READING)
+ jffs2_set_inocache_state(c, f->inocache, INO_STATE_PRESENT);
+ return 0;
+ }
- crc = crc32(0, &latest_node, sizeof(latest_node)-8);
- if (crc != latest_node.node_crc) {
- printk(KERN_NOTICE "CRC failed for read_inode of inode %ld at physical location 0x%x\n", inode->i_ino, fn->raw->flash_offset & ~3);
- jffs2_clear_inode(inode);
- make_bad_inode(inode);
- return;
- }
+ ret = jffs2_flash_read(c, ref_offset(fn->raw), sizeof(*latest_node), &retlen, (void *)latest_node);
+ if (ret || retlen != sizeof(*latest_node)) {
+ JFFS2_ERROR("failed to read from flash: error %d, %zd of %zd bytes read\n",
+ ret, retlen, sizeof(*latest_node));
+ /* FIXME: If this fails, there seems to be a memory leak. Find it. */
+ up(&f->sem);
+ jffs2_do_clear_inode(c, f);
+ return ret?ret:-EIO;
+ }
- inode->i_mode = latest_node.mode;
- inode->i_uid = latest_node.uid;
- inode->i_gid = latest_node.gid;
- inode->i_size = latest_node.isize;
- if (S_ISREG(inode->i_mode))
- jffs2_truncate_fraglist(c, &f->fraglist, latest_node.isize);
- inode->i_atime = latest_node.atime;
- inode->i_mtime = latest_node.mtime;
- inode->i_ctime = latest_node.ctime;
+ crc = crc32(0, latest_node, sizeof(*latest_node)-8);
+ if (crc != je32_to_cpu(latest_node->node_crc)) {
+ JFFS2_ERROR("CRC failed for read_inode of inode %u at physical location 0x%x\n",
+ f->inocache->ino, ref_offset(fn->raw));
+ up(&f->sem);
+ jffs2_do_clear_inode(c, f);
+ return -EIO;
}
- /* OK, now the special cases. Certain inode types should
- have only one data node, and it's kept as the metadata
- node */
- if (S_ISBLK(inode->i_mode) || S_ISCHR(inode->i_mode) ||
- S_ISLNK(inode->i_mode)) {
- if (f->metadata) {
- printk(KERN_WARNING "Argh. Special inode #%lu with mode 0%o had metadata node\n", inode->i_ino, inode->i_mode);
- jffs2_clear_inode(inode);
- make_bad_inode(inode);
- return;
- }
- if (!f->fraglist) {
- printk(KERN_WARNING "Argh. Special inode #%lu with mode 0%o has no fragments\n", inode->i_ino, inode->i_mode);
- jffs2_clear_inode(inode);
- make_bad_inode(inode);
- return;
- }
- /* ASSERT: f->fraglist != NULL */
- if (f->fraglist->next) {
- printk(KERN_WARNING "Argh. Special inode #%lu with mode 0%o had more than one node\n", inode->i_ino, inode->i_mode);
- /* FIXME: Deal with it - check crc32, check for duplicate node, check times and discard the older one */
- jffs2_clear_inode(inode);
- make_bad_inode(inode);
- return;
+ switch(jemode_to_cpu(latest_node->mode) & S_IFMT) {
+ case S_IFDIR:
+ if (mctime_ver > je32_to_cpu(latest_node->version)) {
+ /* The times in the latest_node are actually older than
+ mctime in the latest dirent. Cheat. */
+ latest_node->ctime = latest_node->mtime = cpu_to_je32(latest_mctime);
}
- /* OK. We're happy */
- f->metadata = f->fraglist->node;
- jffs2_free_node_frag(f->fraglist);
- f->fraglist = NULL;
- }
-
- inode->i_blksize = PAGE_SIZE;
- inode->i_blocks = (inode->i_size + 511) >> 9;
-
- switch (inode->i_mode & S_IFMT) {
- unsigned short rdev;
+ break;
+
+
+ case S_IFREG:
+ /* If it was a regular file, truncate it to the latest node's isize */
+ jffs2_truncate_fragtree(c, &f->fragtree, je32_to_cpu(latest_node->isize));
+ break;
case S_IFLNK:
- inode->i_op = &jffs2_symlink_inode_operations;
/* Hack to work around broken isize in old symlink code.
Remove this when dwmw2 comes to his senses and stops
symlinks from being an entirely gratuitous special
case. */
- if (!inode->i_size)
- inode->i_size = latest_node.dsize;
- break;
-
- case S_IFDIR:
- if (mctime_ver > latest_node.version) {
- /* The times in the latest_node are actually older than
- mctime in the latest dirent. Cheat. */
- inode->i_mtime = inode->i_ctime = inode->i_atime =
- latest_mctime;
+ if (!je32_to_cpu(latest_node->isize))
+ latest_node->isize = latest_node->dsize;
+
+ if (f->inocache->state != INO_STATE_CHECKING) {
+ /* Symlink's inode data is the target path. Read it and
+ * keep in RAM to facilitate quick follow symlink
+ * operation. */
+ f->target = kmalloc(je32_to_cpu(latest_node->csize) + 1, GFP_KERNEL);
+ if (!f->target) {
+ JFFS2_ERROR("can't allocate %d bytes of memory for the symlink target path cache\n", je32_to_cpu(latest_node->csize));
+ up(&f->sem);
+ jffs2_do_clear_inode(c, f);
+ return -ENOMEM;
+ }
+
+ ret = jffs2_flash_read(c, ref_offset(fn->raw) + sizeof(*latest_node),
+ je32_to_cpu(latest_node->csize), &retlen, (char *)f->target);
+
+ if (!ret && retlen != je32_to_cpu(latest_node->csize))
+ ret = -EIO;
+ if (ret) {
+ kfree(f->target);
+ f->target = NULL;
+ up(&f->sem);
+ jffs2_do_clear_inode(c, f);
+ return ret;
+ }
+
+ f->target[je32_to_cpu(latest_node->csize)] = '\0';
+ dbg_readinode("symlink's target '%s' cached\n", f->target);
}
- inode->i_op = &jffs2_dir_inode_operations;
- inode->i_fop = &jffs2_dir_operations;
- break;
- case S_IFREG:
- inode->i_op = &jffs2_file_inode_operations;
- inode->i_fop = &jffs2_file_operations;
- inode->i_mapping->a_ops = &jffs2_file_address_operations;
- inode->i_mapping->nrpages = 0;
- break;
+ /* fall through... */
case S_IFBLK:
case S_IFCHR:
- /* Read the device numbers from the media */
- D1(printk(KERN_DEBUG "Reading device numbers from flash\n"));
- if (jffs2_read_dnode(c, f->metadata, (char *)&rdev, 0, sizeof(rdev)) < 0) {
- /* Eep */
- printk(KERN_NOTICE "Read device numbers for inode %lu failed\n", (unsigned long)inode->i_ino);
- jffs2_clear_inode(inode);
- make_bad_inode(inode);
- return;
- }
-
- case S_IFSOCK:
- case S_IFIFO:
- inode->i_op = &jffs2_file_inode_operations;
- init_special_inode(inode, inode->i_mode, kdev_t_to_nr(MKDEV(rdev>>8, rdev&0xff)));
+ /* Certain inode types should have only one data node, and it's
+ kept as the metadata node */
+ if (f->metadata) {
+ JFFS2_ERROR("Argh. Special inode #%u with mode 0%o had metadata node\n",
+ f->inocache->ino, jemode_to_cpu(latest_node->mode));
+ up(&f->sem);
+ jffs2_do_clear_inode(c, f);
+ return -EIO;
+ }
+ if (!frag_first(&f->fragtree)) {
+ JFFS2_ERROR("Argh. Special inode #%u with mode 0%o has no fragments\n",
+ f->inocache->ino, jemode_to_cpu(latest_node->mode));
+ up(&f->sem);
+ jffs2_do_clear_inode(c, f);
+ return -EIO;
+ }
+ /* ASSERT: f->fraglist != NULL */
+ if (frag_next(frag_first(&f->fragtree))) {
+ JFFS2_ERROR("Argh. Special inode #%u with mode 0x%x had more than one node\n",
+ f->inocache->ino, jemode_to_cpu(latest_node->mode));
+ /* FIXME: Deal with it - check crc32, check for duplicate node, check times and discard the older one */
+ up(&f->sem);
+ jffs2_do_clear_inode(c, f);
+ return -EIO;
+ }
+ /* OK. We're happy */
+ f->metadata = frag_first(&f->fragtree)->node;
+ jffs2_free_node_frag(frag_first(&f->fragtree));
+ f->fragtree = RB_ROOT;
break;
+ }
+ if (f->inocache->state == INO_STATE_READING)
+ jffs2_set_inocache_state(c, f->inocache, INO_STATE_PRESENT);
- default:
- printk(KERN_WARNING "jffs2_read_inode(): Bogus imode %o for ino %lu", inode->i_mode, (unsigned long)inode->i_ino);
+ return 0;
+}
+
+/* Scan the list of all nodes present for this ino, build map of versions, etc. */
+int jffs2_do_read_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
+ uint32_t ino, struct jffs2_raw_inode *latest_node)
+{
+ dbg_readinode("read inode #%u\n", ino);
+
+ retry_inocache:
+ spin_lock(&c->inocache_lock);
+ f->inocache = jffs2_get_ino_cache(c, ino);
+
+ if (f->inocache) {
+ /* Check its state. We may need to wait before we can use it */
+ switch(f->inocache->state) {
+ case INO_STATE_UNCHECKED:
+ case INO_STATE_CHECKEDABSENT:
+ f->inocache->state = INO_STATE_READING;
+ break;
+
+ case INO_STATE_CHECKING:
+ case INO_STATE_GC:
+ /* If it's in either of these states, we need
+ to wait for whoever's got it to finish and
+ put it back. */
+ dbg_readinode("waiting for ino #%u in state %d\n", ino, f->inocache->state);
+ sleep_on_spinunlock(&c->inocache_wq, &c->inocache_lock);
+ goto retry_inocache;
+
+ case INO_STATE_READING:
+ case INO_STATE_PRESENT:
+ /* Eep. This should never happen. It can
+ happen if Linux calls read_inode() again
+ before clear_inode() has finished though. */
+ JFFS2_ERROR("Eep. Trying to read_inode #%u when it's already in state %d!\n", ino, f->inocache->state);
+ /* Fail. That's probably better than allowing it to succeed */
+ f->inocache = NULL;
+ break;
+
+ default:
+ BUG();
+ }
}
- D1(printk(KERN_DEBUG "jffs2_read_inode() returning\n"));
+ spin_unlock(&c->inocache_lock);
+
+ if (!f->inocache && ino == 1) {
+ /* Special case - no root inode on medium */
+ f->inocache = jffs2_alloc_inode_cache();
+ if (!f->inocache) {
+ JFFS2_ERROR("cannot allocate inocache for root inode\n");
+ return -ENOMEM;
+ }
+ dbg_readinode("creating inocache for root inode\n");
+ memset(f->inocache, 0, sizeof(struct jffs2_inode_cache));
+ f->inocache->ino = f->inocache->nlink = 1;
+ f->inocache->nodes = (struct jffs2_raw_node_ref *)f->inocache;
+ f->inocache->state = INO_STATE_READING;
+ jffs2_add_ino_cache(c, f->inocache);
+ }
+ if (!f->inocache) {
+ JFFS2_ERROR("requestied to read an nonexistent ino %u\n", ino);
+ return -ENOENT;
+ }
+
+ return jffs2_do_read_inode_internal(c, f, latest_node);
}
-void jffs2_clear_inode (struct inode *inode)
+int jffs2_do_crccheck_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic)
+{
+ struct jffs2_raw_inode n;
+ struct jffs2_inode_info *f = kmalloc(sizeof(*f), GFP_KERNEL);
+ int ret;
+
+ if (!f)
+ return -ENOMEM;
+
+ memset(f, 0, sizeof(*f));
+ init_MUTEX_LOCKED(&f->sem);
+ f->inocache = ic;
+
+ ret = jffs2_do_read_inode_internal(c, f, &n);
+ if (!ret) {
+ up(&f->sem);
+ jffs2_do_clear_inode(c, f);
+ }
+ kfree (f);
+ return ret;
+}
+
+void jffs2_do_clear_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f)
{
- /* We can forget about this inode for now - drop all
- * the nodelists associated with it, etc.
- */
- struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
- struct jffs2_node_frag *frag, *frags;
struct jffs2_full_dirent *fd, *fds;
- struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
- /* I don't think we care about the potential race due to reading this
- without f->sem. It can never get undeleted. */
- int deleted = f->inocache && !f->inocache->nlink;
-
- D1(printk(KERN_DEBUG "jffs2_clear_inode(): ino #%lu mode %o\n", inode->i_ino, inode->i_mode));
-
- /* If it's a deleted inode, grab the alloc_sem. This prevents
- jffs2_garbage_collect_pass() from deciding that it wants to
- garbage collect one of the nodes we're just about to mark
- obsolete -- by the time we drop alloc_sem and return, all
- the nodes are marked obsolete, and jffs2_g_c_pass() won't
- call iget() for the inode in question.
- */
- if (deleted)
- down(&c->alloc_sem);
+ int deleted;
down(&f->sem);
+ deleted = f->inocache && !f->inocache->nlink;
+
+ if (f->inocache && f->inocache->state != INO_STATE_CHECKING)
+ jffs2_set_inocache_state(c, f->inocache, INO_STATE_CLEARING);
- frags = f->fraglist;
- fds = f->dents;
if (f->metadata) {
if (deleted)
jffs2_mark_node_obsolete(c, f->metadata->raw);
jffs2_free_full_dnode(f->metadata);
}
- while (frags) {
- frag = frags;
- frags = frag->next;
- D2(printk(KERN_DEBUG "jffs2_clear_inode: frag at 0x%x-0x%x: node %p, frags %d--\n", frag->ofs, frag->ofs+frag->size, frag->node, frag->node?frag->node->frags:0));
-
- if (frag->node && !(--frag->node->frags)) {
- /* Not a hole, and it's the final remaining frag of this node. Free the node */
- if (deleted)
- jffs2_mark_node_obsolete(c, frag->node->raw);
+ jffs2_kill_fragtree(&f->fragtree, deleted?c:NULL);
- jffs2_free_full_dnode(frag->node);
- }
- jffs2_free_node_frag(frag);
+ if (f->target) {
+ kfree(f->target);
+ f->target = NULL;
}
+
+ fds = f->dents;
while(fds) {
fd = fds;
fds = fd->next;
jffs2_free_full_dirent(fd);
}
- up(&f->sem);
-
- if(deleted)
- up(&c->alloc_sem);
-};
+ if (f->inocache && f->inocache->state != INO_STATE_CHECKING) {
+ jffs2_set_inocache_state(c, f->inocache, INO_STATE_CHECKEDABSENT);
+ if (f->inocache->nodes == (void *)f->inocache)
+ jffs2_del_ino_cache(c, f->inocache);
+ }
+ up(&f->sem);
+}
diff --git a/linux-2.4.x/fs/jffs2/scan.c b/linux-2.4.x/fs/jffs2/scan.c
index 69932e1..ebc05ec 100644
--- a/linux-2.4.x/fs/jffs2/scan.c
+++ b/linux-2.4.x/fs/jffs2/scan.c
@@ -1,56 +1,27 @@
/*
* JFFS2 -- Journalling Flash File System, Version 2.
*
- * Copyright (C) 2001 Red Hat, Inc.
+ * Copyright (C) 2001-2003 Red Hat, Inc.
*
- * Created by David Woodhouse <dwmw2@cambridge.redhat.com>
+ * Created by David Woodhouse <dwmw2@infradead.org>
*
- * The original JFFS, from which the design for JFFS2 was derived,
- * was designed and implemented by Axis Communications AB.
+ * For licensing information, see the file 'LICENCE' in this directory.
*
- * The contents of this file are subject to the Red Hat eCos Public
- * License Version 1.1 (the "Licence"); you may not use this file
- * except in compliance with the Licence. You may obtain a copy of
- * the Licence at http://www.redhat.com/
- *
- * Software distributed under the Licence is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied.
- * See the Licence for the specific language governing rights and
- * limitations under the Licence.
- *
- * The Original Code is JFFS2 - Journalling Flash File System, version 2
- *
- * Alternatively, the contents of this file may be used under the
- * terms of the GNU General Public License version 2 (the "GPL"), in
- * which case the provisions of the GPL are applicable instead of the
- * above. If you wish to allow the use of your version of this file
- * only under the terms of the GPL and not to allow others to use your
- * version of this file under the RHEPL, indicate your decision by
- * deleting the provisions above and replace them with the notice and
- * other provisions required by the GPL. If you do not delete the
- * provisions above, a recipient may use your version of this file
- * under either the RHEPL or the GPL.
- *
- * $Id: scan.c,v 1.51.2.3 2002/07/25 20:49:06 dwmw2 Exp $
+ * $Id: scan.c,v 1.136 2006/04/07 10:00:31 havasi Exp $
*
*/
#include <linux/kernel.h>
+#include <linux/sched.h>
#include <linux/slab.h>
-#include <linux/jffs2.h>
#include <linux/mtd/mtd.h>
#include <linux/pagemap.h>
+#include <linux/crc32.h>
+#include <linux/compiler.h>
#include "nodelist.h"
-#include "crc32.h"
+#include "summary.h"
+#include "debug.h"
-
-#define DIRTY_SPACE(x) do { typeof(x) _x = (x); \
- c->free_size -= _x; c->dirty_size += _x; \
- jeb->free_size -= _x ; jeb->dirty_size += _x; \
- }while(0)
-#define USED_SPACE(x) do { typeof(x) _x = (x); \
- c->free_size -= _x; c->used_size += _x; \
- jeb->free_size -= _x ; jeb->used_size += _x; \
- }while(0)
+#define DEFAULT_EMPTY_SCAN_SIZE 1024
#define noisy_printk(noise, args...) do { \
if (*(noise)) { \
@@ -63,54 +34,123 @@
} while(0)
static uint32_t pseudo_random;
-static void jffs2_rotate_lists(struct jffs2_sb_info *c);
-static int jffs2_scan_eraseblock (struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb);
+static int jffs2_scan_eraseblock (struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
+ unsigned char *buf, uint32_t buf_size, struct jffs2_summary *s);
-/* These helper functions _must_ increase ofs and also do the dirty/used space accounting.
+/* These helper functions _must_ increase ofs and also do the dirty/used space accounting.
* Returning an error will abort the mount - bad checksums etc. should just mark the space
* as dirty.
*/
-static int jffs2_scan_empty(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, __u32 *ofs, int *noise);
-static int jffs2_scan_inode_node(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, __u32 *ofs);
-static int jffs2_scan_dirent_node(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, __u32 *ofs);
+static int jffs2_scan_inode_node(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
+ struct jffs2_raw_inode *ri, uint32_t ofs, struct jffs2_summary *s);
+static int jffs2_scan_dirent_node(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
+ struct jffs2_raw_dirent *rd, uint32_t ofs, struct jffs2_summary *s);
+static int jffs2_scan_eraseblock_header(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
+ struct jffs2_raw_ebh *eh, uint32_t ofs, struct jffs2_summary *s);
+
+static inline int min_free(struct jffs2_sb_info *c)
+{
+ uint32_t min = 2 * sizeof(struct jffs2_raw_inode);
+#ifdef CONFIG_JFFS2_FS_WRITEBUFFER
+ if (!jffs2_can_mark_obsolete(c) && min < c->wbuf_pagesize)
+ return c->wbuf_pagesize;
+#endif
+ return min;
+
+}
+static inline uint32_t EMPTY_SCAN_SIZE(uint32_t sector_size) {
+ if (sector_size < DEFAULT_EMPTY_SCAN_SIZE)
+ return sector_size;
+ else
+ return DEFAULT_EMPTY_SCAN_SIZE;
+}
int jffs2_scan_medium(struct jffs2_sb_info *c)
{
int i, ret;
- __u32 empty_blocks = 0;
+ uint32_t empty_blocks = 0, bad_blocks = 0;
+ unsigned char *flashbuf = NULL;
+ uint32_t buf_size = 0;
+ struct jffs2_summary *s = NULL; /* summary info collected by the scan process */
+#ifndef __ECOS
+ size_t pointlen;
+
+ if (c->mtd->point) {
+ ret = c->mtd->point (c->mtd, 0, c->mtd->size, &pointlen, &flashbuf);
+ if (!ret && pointlen < c->mtd->size) {
+ /* Don't muck about if it won't let us point to the whole flash */
+ D1(printk(KERN_DEBUG "MTD point returned len too short: 0x%zx\n", pointlen));
+ c->mtd->unpoint(c->mtd, flashbuf, 0, c->mtd->size);
+ flashbuf = NULL;
+ }
+ if (ret)
+ D1(printk(KERN_DEBUG "MTD point failed %d\n", ret));
+ }
+#endif
+ if (!flashbuf) {
+ /* For NAND it's quicker to read a whole eraseblock at a time,
+ apparently */
+ if (jffs2_ebh_oob(c))
+ buf_size = c->sector_size;
+ else
+ buf_size = PAGE_SIZE;
+
+ /* Respect kmalloc limitations */
+ if (buf_size > 128*1024)
+ buf_size = 128*1024;
+
+ D1(printk(KERN_DEBUG "Allocating readbuf of %d bytes\n", buf_size));
+ flashbuf = kmalloc(buf_size, GFP_KERNEL);
+ if (!flashbuf)
+ return -ENOMEM;
+ }
- if (!c->blocks) {
- printk(KERN_WARNING "EEEK! c->blocks is NULL!\n");
- return -EINVAL;
+ if (jffs2_sum_active()) {
+ s = kmalloc(sizeof(struct jffs2_summary), GFP_KERNEL);
+ if (!s) {
+ JFFS2_WARNING("Can't allocate memory for summary\n");
+ return -ENOMEM;
+ }
+ memset(s, 0, sizeof(struct jffs2_summary));
}
+
for (i=0; i<c->nr_blocks; i++) {
- struct jffs2_eraseblock *jeb = &c->blocks[i];
+ struct jffs2_eraseblock *jeb = c->blocks[i];
+
+ /* reset summary info for next eraseblock scan */
+ jffs2_sum_reset_collected(s);
+
+ ret = jffs2_scan_eraseblock(c, jeb, buf_size?flashbuf:(flashbuf+jeb->offset),
+ buf_size, s);
- ret = jffs2_scan_eraseblock(c, jeb);
if (ret < 0)
- return ret;
+ goto out;
- ACCT_PARANOIA_CHECK(jeb);
+ jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
/* Now decide which list to put it on */
- if (ret == 1) {
- /*
- * Empty block. Since we can't be sure it
+ switch(ret) {
+ case BLK_STATE_ALLFF:
+ /*
+ * Empty block. Since we can't be sure it
* was entirely erased, we just queue it for erase
* again. It will be marked as such when the erase
* is complete. Meanwhile we still count it as empty
* for later checks.
*/
- list_add(&jeb->list, &c->erase_pending_list);
empty_blocks++;
+ list_add(&jeb->list, &c->erase_pending_list);
c->nr_erasing_blocks++;
- } else if (jeb->used_size == PAD(sizeof(struct jffs2_unknown_node)) && !jeb->first_node->next_in_ino) {
+ break;
+
+ case BLK_STATE_CLEANMARKER:
/* Only a CLEANMARKER node is valid */
if (!jeb->dirty_size) {
/* It's actually free */
list_add(&jeb->list, &c->free_list);
+ jffs2_add_to_hash_table(c, jeb, 2);
c->nr_free_blocks++;
} else {
/* Dirt */
@@ -118,74 +158,270 @@ int jffs2_scan_medium(struct jffs2_sb_info *c)
list_add(&jeb->list, &c->erase_pending_list);
c->nr_erasing_blocks++;
}
- } else if (jeb->used_size > c->sector_size - (2*sizeof(struct jffs2_raw_inode))) {
- /* Full (or almost full) of clean data. Clean list */
- list_add(&jeb->list, &c->clean_list);
- } else if (jeb->used_size) {
- /* Some data, but not full. Dirty list. */
- /* Except that we want to remember the block with most free space,
- and stick it in the 'nextblock' position to start writing to it.
- Later when we do snapshots, this must be the most recent block,
- not the one with most free space.
- */
- if (jeb->free_size > 2*sizeof(struct jffs2_raw_inode) &&
- (!c->nextblock || c->nextblock->free_size < jeb->free_size)) {
- /* Better candidate for the next writes to go to */
- if (c->nextblock)
- list_add(&c->nextblock->list, &c->dirty_list);
- c->nextblock = jeb;
- } else {
- list_add(&jeb->list, &c->dirty_list);
- }
- } else {
+ break;
+
+ case BLK_STATE_CLEAN:
+ /* Full (or almost full) of clean data. Clean list */
+ list_add(&jeb->list, &c->clean_list);
+ jffs2_add_to_hash_table(c, jeb, 1);
+ break;
+
+ case BLK_STATE_PARTDIRTY:
+ /* Some data, but not full. Dirty list. */
+ /* We want to remember the block with most free space
+ and stick it in the 'nextblock' position to start writing to it. */
+ if (jeb->free_size > min_free(c) &&
+ (!c->nextblock || c->nextblock->free_size < jeb->free_size)) {
+ /* Better candidate for the next writes to go to */
+ if (c->nextblock) {
+ c->nextblock->dirty_size += c->nextblock->free_size + c->nextblock->wasted_size;
+ c->dirty_size += c->nextblock->free_size + c->nextblock->wasted_size;
+ c->free_size -= c->nextblock->free_size;
+ c->wasted_size -= c->nextblock->wasted_size;
+ c->nextblock->free_size = c->nextblock->wasted_size = 0;
+ if (VERYDIRTY(c, c->nextblock->dirty_size)) {
+ list_add(&c->nextblock->list, &c->very_dirty_list);
+ } else {
+ list_add(&c->nextblock->list, &c->dirty_list);
+ }
+ jffs2_add_to_hash_table(c, c->nextblock, 1);
+ /* deleting summary information of the old nextblock */
+ jffs2_sum_reset_collected(c->summary);
+ }
+ /* update collected summary infromation for the current nextblock */
+ jffs2_sum_move_collected(c, s);
+ D1(printk(KERN_DEBUG "jffs2_scan_medium(): new nextblock = 0x%08x\n", jeb->offset));
+ c->nextblock = jeb;
+ } else {
+ jeb->dirty_size += jeb->free_size + jeb->wasted_size;
+ c->dirty_size += jeb->free_size + jeb->wasted_size;
+ c->free_size -= jeb->free_size;
+ c->wasted_size -= jeb->wasted_size;
+ jeb->free_size = jeb->wasted_size = 0;
+ if (VERYDIRTY(c, jeb->dirty_size)) {
+ list_add(&jeb->list, &c->very_dirty_list);
+ } else {
+ list_add(&jeb->list, &c->dirty_list);
+ }
+ jffs2_add_to_hash_table(c, jeb, 1);
+ }
+ break;
+
+ case BLK_STATE_ALLDIRTY:
/* Nothing valid - not even a clean marker. Needs erasing. */
- /* For now we just put it on the erasing list. We'll start the erases later */
- printk(KERN_NOTICE "JFFS2: Erase block at 0x%08x is not formatted. It will be erased\n", jeb->offset);
- list_add(&jeb->list, &c->erase_pending_list);
+ /* For now we just put it on the erasing list. We'll start the erases later */
+ D1(printk(KERN_NOTICE "JFFS2: Erase block at 0x%08x is not formatted. It will be erased\n", jeb->offset));
+ list_add(&jeb->list, &c->erase_pending_list);
c->nr_erasing_blocks++;
+ break;
+
+ case BLK_STATE_BADBLOCK:
+ D1(printk(KERN_NOTICE "JFFS2: Block at 0x%08x is bad\n", jeb->offset));
+ list_add(&jeb->list, &c->bad_list);
+ c->bad_size += c->sector_size;
+ c->free_size -= c->sector_size;
+ bad_blocks++;
+ break;
+ default:
+ printk(KERN_WARNING "jffs2_scan_medium(): unknown block state\n");
+ BUG();
}
}
- /* Rotate the lists by some number to ensure wear levelling */
- jffs2_rotate_lists(c);
+ if (jffs2_sum_active() && s)
+ kfree(s);
+
+ /* Nextblock dirty is always seen as wasted, because we cannot recycle it now */
+ if (c->nextblock && (c->nextblock->dirty_size)) {
+ c->nextblock->wasted_size += c->nextblock->dirty_size;
+ c->wasted_size += c->nextblock->dirty_size;
+ c->dirty_size -= c->nextblock->dirty_size;
+ c->nextblock->dirty_size = 0;
+ }
+#ifdef CONFIG_JFFS2_FS_WRITEBUFFER
+ if (!jffs2_can_mark_obsolete(c) && c->wbuf_pagesize && c->nextblock && (c->nextblock->free_size % c->wbuf_pagesize)) {
+ /* If we're going to start writing into a block which already
+ contains data, and the end of the data isn't page-aligned,
+ skip a little and align it. */
+
+ uint32_t skip = c->nextblock->free_size % c->wbuf_pagesize;
+
+ D1(printk(KERN_DEBUG "jffs2_scan_medium(): Skipping %d bytes in nextblock to ensure page alignment\n",
+ skip));
+ c->nextblock->wasted_size += skip;
+ c->wasted_size += skip;
+
+ c->nextblock->free_size -= skip;
+ c->free_size -= skip;
+ }
+#endif
if (c->nr_erasing_blocks) {
- if (!c->used_size && empty_blocks != c->nr_blocks) {
+ if ( !c->used_size && ((c->nr_free_blocks+empty_blocks+bad_blocks)!= c->nr_blocks || bad_blocks == c->nr_blocks) ) {
printk(KERN_NOTICE "Cowardly refusing to erase blocks on filesystem with no valid JFFS2 nodes\n");
- return -EIO;
+ printk(KERN_NOTICE "empty_blocks %d, bad_blocks %d, c->nr_blocks %d\n",empty_blocks,bad_blocks,c->nr_blocks);
+ ret = -EIO;
+ goto out;
}
jffs2_erase_pending_trigger(c);
}
- return 0;
+ ret = 0;
+ out:
+ if (buf_size)
+ kfree(flashbuf);
+#ifndef __ECOS
+ else
+ c->mtd->unpoint(c->mtd, flashbuf, 0, c->mtd->size);
+#endif
+ return ret;
+}
+
+int jffs2_scan_classify_jeb(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb)
+{
+ if (EBFLAGS_HAS_EBH(jeb) && c->ebh_size) {
+ if (!jeb->first_node->next_phys && !jeb->dirty_size)
+ return BLK_STATE_CLEANMARKER;
+ }
+
+ if ((jeb->used_size + jeb->unchecked_size) == PAD(c->cleanmarker_size) && !jeb->dirty_size
+ && (!jeb->first_node || !jeb->first_node->next_phys) )
+ return BLK_STATE_CLEANMARKER;
+
+ /* move blocks with max 4 byte dirty space to cleanlist */
+ else if (!ISDIRTY(c->sector_size - (jeb->used_size + jeb->unchecked_size))) {
+ c->dirty_size -= jeb->dirty_size;
+ c->wasted_size += jeb->dirty_size;
+ jeb->wasted_size += jeb->dirty_size;
+ jeb->dirty_size = 0;
+ return BLK_STATE_CLEAN;
+ } else if (jeb->used_size || jeb->unchecked_size)
+ return BLK_STATE_PARTDIRTY;
+ else
+ return BLK_STATE_ALLDIRTY;
}
-static int jffs2_scan_eraseblock (struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb) {
- struct jffs2_unknown_node node;
- __u32 ofs, prevofs;
- __u32 hdr_crc, nodetype;
+static int jffs2_scan_eraseblock (struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
+ unsigned char *buf, uint32_t buf_size, struct jffs2_summary *s) {
+ struct jffs2_unknown_node *node;
+ struct jffs2_unknown_node crcnode;
+ struct jffs2_sum_marker *sm;
+ uint32_t ofs, prevofs;
+ uint32_t hdr_crc, buf_ofs, buf_len;
int err;
int noise = 0;
+
+#ifdef CONFIG_JFFS2_FS_WRITEBUFFER
+ uint32_t data_len = 0;
+#endif
+
ofs = jeb->offset;
prevofs = jeb->offset - 1;
D1(printk(KERN_DEBUG "jffs2_scan_eraseblock(): Scanning block at 0x%x\n", ofs));
- err = jffs2_scan_empty(c, jeb, &ofs, &noise);
- if (err) return err;
- if (ofs == jeb->offset + c->sector_size) {
+#ifdef CONFIG_JFFS2_FS_WRITEBUFFER
+ if (jffs2_ebh_oob(c)) {
+ int ret = jffs2_check_nand_cleanmarker_ebh(c, jeb, &data_len);
+ D2(printk(KERN_NOTICE "jffs_check_nand_cleanmarker returned %d\n",ret));
+ /* Even if it's not found, we still scan to see
+ if the block is empty. We use this information
+ to decide whether to erase it or not. */
+ switch (ret) {
+ case 0: break;
+ case 1: break;
+ case 2: return BLK_STATE_BADBLOCK;
+ case 3: return BLK_STATE_ALLDIRTY; /* Block has failed to erase min. once */
+ default: return ret;
+ }
+ }
+#endif
+
+ if (jffs2_sum_active()) {
+ sm = kmalloc(sizeof(struct jffs2_sum_marker), GFP_KERNEL);
+ if (!sm) {
+ return -ENOMEM;
+ }
+
+ err = jffs2_flash_read_safe(c, jeb->offset + c->sector_size -
+ sizeof(struct jffs2_sum_marker),
+ sizeof(struct jffs2_sum_marker),
+ (unsigned char *) sm);
+ if (err) {
+ kfree(sm);
+ return err;
+ }
+
+ if (je32_to_cpu(sm->magic) == JFFS2_SUM_MAGIC ) {
+ err = jffs2_sum_scan_sumnode(c, jeb, je32_to_cpu(sm->offset), &pseudo_random);
+ if (err) {
+ kfree(sm);
+ return err;
+ }
+ }
+
+ kfree(sm);
+ }
+
+ buf_ofs = jeb->offset;
+
+ if (!buf_size) {
+ buf_len = c->sector_size;
+ } else {
+ buf_len = EMPTY_SCAN_SIZE(c->sector_size);
+ err = jffs2_flash_read_safe(c, buf_ofs, buf_len, buf);
+ if (err)
+ return err;
+ }
+
+ /* We temporarily use 'ofs' as a pointer into the buffer/jeb */
+ ofs = 0;
+
+ /* Scan only 4KiB of 0xFF before declaring it's empty */
+ while(ofs < EMPTY_SCAN_SIZE(c->sector_size) && *(uint32_t *)(&buf[ofs]) == 0xFFFFFFFF)
+ ofs += 4;
+
+ if (ofs == EMPTY_SCAN_SIZE(c->sector_size)) {
+#ifdef CONFIG_JFFS2_FS_WRITEBUFFER
+ if (jffs2_ebh_oob(c)) {
+ /* scan oob, take care of cleanmarker */
+ int ret = jffs2_check_oob_empty(c, jeb, data_len);
+ D2(printk(KERN_NOTICE "jffs2_check_oob_empty returned %d\n",ret));
+ switch (ret) {
+ case 0: return data_len ? BLK_STATE_CLEANMARKER : BLK_STATE_ALLFF;
+ case 1: return BLK_STATE_ALLDIRTY;
+ default: return ret;
+ }
+ }
+#endif
D1(printk(KERN_DEBUG "Block at 0x%08x is empty (erased)\n", jeb->offset));
- return 1; /* special return code */
+ if (c->cleanmarker_size == 0)
+ return BLK_STATE_CLEANMARKER; /* don't bother with re-erase */
+ else
+ return BLK_STATE_ALLFF; /* OK to erase if all blocks are like this */
}
-
+ if (ofs) {
+ D1(printk(KERN_DEBUG "Free space at %08x ends at %08x\n", jeb->offset,
+ jeb->offset + ofs));
+ DIRTY_SPACE(ofs);
+ }
+
+ /* Now ofs is a complete physical flash offset as it always was... */
+ ofs += jeb->offset;
+
noise = 10;
+ dbg_summary("no summary found in jeb 0x%08x. Apply original scan.\n",jeb->offset);
+
+scan_more:
while(ofs < jeb->offset + c->sector_size) {
- ssize_t retlen;
- ACCT_PARANOIA_CHECK(jeb);
-
+
+ jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
+
+ cond_resched();
+
if (ofs & 3) {
printk(KERN_WARNING "Eep. ofs 0x%08x not word-aligned!\n", ofs);
- ofs = (ofs+3)&~3;
+ ofs = PAD(ofs);
continue;
}
if (ofs == prevofs) {
@@ -195,103 +431,189 @@ static int jffs2_scan_eraseblock (struct jffs2_sb_info *c, struct jffs2_eraseblo
continue;
}
prevofs = ofs;
-
- if (jeb->offset + c->sector_size < ofs + sizeof(node)) {
- D1(printk(KERN_DEBUG "Fewer than %d bytes left to end of block. Not reading\n", sizeof(struct jffs2_unknown_node)));
+
+ if (jeb->offset + c->sector_size < ofs + sizeof(*node)) {
+ D1(printk(KERN_DEBUG "Fewer than %zd bytes left to end of block. (%x+%x<%x+%zx) Not reading\n", sizeof(struct jffs2_unknown_node),
+ jeb->offset, c->sector_size, ofs, sizeof(*node)));
DIRTY_SPACE((jeb->offset + c->sector_size)-ofs);
break;
}
- err = c->mtd->read(c->mtd, ofs, sizeof(node), &retlen, (char *)&node);
-
- if (err) {
- D1(printk(KERN_WARNING "mtd->read(0x%x bytes from 0x%x) returned %d\n", sizeof(node), ofs, err));
- return err;
- }
- if (retlen < sizeof(node)) {
- D1(printk(KERN_WARNING "Read at 0x%x gave only 0x%x bytes\n", ofs, retlen));
- DIRTY_SPACE(retlen);
- ofs += retlen;
- continue;
+ if (buf_ofs + buf_len < ofs + sizeof(*node)) {
+ buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs);
+ D1(printk(KERN_DEBUG "Fewer than %zd bytes (node header) left to end of buf. Reading 0x%x at 0x%08x\n",
+ sizeof(struct jffs2_unknown_node), buf_len, ofs));
+ err = jffs2_flash_read_safe(c, ofs, buf_len, buf);
+ if (err)
+ return err;
+ buf_ofs = ofs;
}
- if (node.magic == JFFS2_EMPTY_BITMASK && node.nodetype == JFFS2_EMPTY_BITMASK) {
- D1(printk(KERN_DEBUG "Found empty flash at 0x%x\n", ofs));
- err = jffs2_scan_empty(c, jeb, &ofs, &noise);
- if (err) return err;
- continue;
+ node = (struct jffs2_unknown_node *)&buf[ofs-buf_ofs];
+
+ if (*(uint32_t *)(&buf[ofs-buf_ofs]) == 0xffffffff) {
+ uint32_t inbuf_ofs;
+ uint32_t empty_start;
+
+ empty_start = ofs;
+ ofs += 4;
+
+ D1(printk(KERN_DEBUG "Found empty flash at 0x%08x\n", ofs));
+ more_empty:
+ inbuf_ofs = ofs - buf_ofs;
+ while (inbuf_ofs < buf_len) {
+ if (*(uint32_t *)(&buf[inbuf_ofs]) != 0xffffffff) {
+ printk(KERN_WARNING "Empty flash at 0x%08x ends at 0x%08x\n",
+ empty_start, ofs);
+ DIRTY_SPACE(ofs-empty_start);
+ goto scan_more;
+ }
+
+ inbuf_ofs+=4;
+ ofs += 4;
+ }
+ /* Ran off end. */
+ D1(printk(KERN_DEBUG "Empty flash to end of buffer at 0x%08x\n", ofs));
+
+ /* If we're only checking the beginning of a block with a cleanmarker,
+ bail now */
+ if (buf_ofs == jeb->offset && jeb->used_size == PAD(c->cleanmarker_size) &&
+ c->cleanmarker_size && !jeb->dirty_size && !jeb->first_node->next_phys) {
+ D1(printk(KERN_DEBUG "%d bytes at start of block seems clean... assuming all clean\n", EMPTY_SCAN_SIZE(c->sector_size)));
+ return BLK_STATE_CLEANMARKER;
+ }
+
+ if (EBFLAGS_HAS_EBH(jeb) && c->ebh_size) {
+ if (!jeb->first_node->next_phys && !jeb->dirty_size) {
+ D1(printk(KERN_DEBUG "%d bytes at start of block seems clean... assuming all clean\n", EMPTY_SCAN_SIZE(c->sector_size)));
+ return BLK_STATE_CLEANMARKER;
+ }
+ }
+
+ /* See how much more there is to read in this eraseblock... */
+ buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs);
+ if (!buf_len) {
+ /* No more to read. Break out of main loop without marking
+ this range of empty space as dirty (because it's not) */
+ D1(printk(KERN_DEBUG "Empty flash at %08x runs to end of block. Treating as free_space\n",
+ empty_start));
+ break;
+ }
+ D1(printk(KERN_DEBUG "Reading another 0x%x at 0x%08x\n", buf_len, ofs));
+ err = jffs2_flash_read_safe(c, ofs, buf_len, buf);
+ if (err)
+ return err;
+ buf_ofs = ofs;
+ goto more_empty;
}
- if (ofs == jeb->offset && node.magic == KSAMTIB_CIGAM_2SFFJ) {
+ if (ofs == jeb->offset && je16_to_cpu(node->magic) == KSAMTIB_CIGAM_2SFFJ) {
printk(KERN_WARNING "Magic bitmask is backwards at offset 0x%08x. Wrong endian filesystem?\n", ofs);
DIRTY_SPACE(4);
ofs += 4;
continue;
}
- if (node.magic == JFFS2_DIRTY_BITMASK) {
- D1(printk(KERN_DEBUG "Empty bitmask at 0x%08x\n", ofs));
+ if (je16_to_cpu(node->magic) == JFFS2_DIRTY_BITMASK) {
+ D1(printk(KERN_DEBUG "Dirty bitmask at 0x%08x\n", ofs));
DIRTY_SPACE(4);
ofs += 4;
continue;
}
- if (node.magic == JFFS2_OLD_MAGIC_BITMASK) {
+ if (je16_to_cpu(node->magic) == JFFS2_OLD_MAGIC_BITMASK) {
printk(KERN_WARNING "Old JFFS2 bitmask found at 0x%08x\n", ofs);
printk(KERN_WARNING "You cannot use older JFFS2 filesystems with newer kernels\n");
DIRTY_SPACE(4);
ofs += 4;
continue;
}
- if (node.magic != JFFS2_MAGIC_BITMASK) {
+ if (je16_to_cpu(node->magic) != JFFS2_MAGIC_BITMASK) {
/* OK. We're out of possibilities. Whinge and move on */
- noisy_printk(&noise, "jffs2_scan_eraseblock(): Magic bitmask 0x%04x not found at 0x%08x: 0x%04x instead\n", JFFS2_MAGIC_BITMASK, ofs, node.magic);
+ noisy_printk(&noise, "jffs2_scan_eraseblock(): Magic bitmask 0x%04x not found at 0x%08x: 0x%04x instead\n",
+ JFFS2_MAGIC_BITMASK, ofs,
+ je16_to_cpu(node->magic));
DIRTY_SPACE(4);
ofs += 4;
continue;
}
/* We seem to have a node of sorts. Check the CRC */
- nodetype = node.nodetype;
- node.nodetype |= JFFS2_NODE_ACCURATE;
- hdr_crc = crc32(0, &node, sizeof(node)-4);
- node.nodetype = nodetype;
- if (hdr_crc != node.hdr_crc) {
+ crcnode.magic = node->magic;
+ crcnode.nodetype = cpu_to_je16( je16_to_cpu(node->nodetype) | JFFS2_NODE_ACCURATE);
+ crcnode.totlen = node->totlen;
+ hdr_crc = crc32(0, &crcnode, sizeof(crcnode)-4);
+
+ if (hdr_crc != je32_to_cpu(node->hdr_crc)) {
noisy_printk(&noise, "jffs2_scan_eraseblock(): Node at 0x%08x {0x%04x, 0x%04x, 0x%08x) has invalid CRC 0x%08x (calculated 0x%08x)\n",
- ofs, node.magic, node.nodetype, node.totlen, node.hdr_crc, hdr_crc);
+ ofs, je16_to_cpu(node->magic),
+ je16_to_cpu(node->nodetype),
+ je32_to_cpu(node->totlen),
+ je32_to_cpu(node->hdr_crc),
+ hdr_crc);
DIRTY_SPACE(4);
ofs += 4;
continue;
}
- if (ofs + node.totlen > jeb->offset + c->sector_size) {
+ if (ofs + je32_to_cpu(node->totlen) >
+ jeb->offset + c->sector_size) {
/* Eep. Node goes over the end of the erase block. */
printk(KERN_WARNING "Node at 0x%08x with length 0x%08x would run over the end of the erase block\n",
- ofs, node.totlen);
- printk(KERN_WARNING "Perhaps the file system was created with the wrong erase size?\n");
- DIRTY_SPACE(4);
- ofs += 4;
+ ofs, je32_to_cpu(node->totlen));
+ printk(KERN_NOTICE "Perhaps the file system was created with the wrong erase size? Reject to mount.\n");
+ return -EINVAL;
+ }
+
+ if (!(je16_to_cpu(node->nodetype) & JFFS2_NODE_ACCURATE)) {
+ /* Wheee. This is an obsoleted node */
+ D2(printk(KERN_DEBUG "Node at 0x%08x is obsolete. Skipping\n", ofs));
+ DIRTY_SPACE(PAD(je32_to_cpu(node->totlen)));
+ ofs += PAD(je32_to_cpu(node->totlen));
continue;
}
- switch(node.nodetype | JFFS2_NODE_ACCURATE) {
+ switch(je16_to_cpu(node->nodetype)) {
case JFFS2_NODETYPE_INODE:
- err = jffs2_scan_inode_node(c, jeb, &ofs);
+ if (buf_ofs + buf_len < ofs + sizeof(struct jffs2_raw_inode)) {
+ buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs);
+ D1(printk(KERN_DEBUG "Fewer than %zd bytes (inode node) left to end of buf. Reading 0x%x at 0x%08x\n",
+ sizeof(struct jffs2_raw_inode), buf_len, ofs));
+ err = jffs2_flash_read_safe(c, ofs, buf_len, buf);
+ if (err)
+ return err;
+ buf_ofs = ofs;
+ node = (void *)buf;
+ }
+ err = jffs2_scan_inode_node(c, jeb, (void *)node, ofs, s);
if (err) return err;
+ ofs += PAD(je32_to_cpu(node->totlen));
break;
-
+
case JFFS2_NODETYPE_DIRENT:
- err = jffs2_scan_dirent_node(c, jeb, &ofs);
+ if (buf_ofs + buf_len < ofs + je32_to_cpu(node->totlen)) {
+ buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs);
+ D1(printk(KERN_DEBUG "Fewer than %d bytes (dirent node) left to end of buf. Reading 0x%x at 0x%08x\n",
+ je32_to_cpu(node->totlen), buf_len, ofs));
+ err = jffs2_flash_read_safe(c, ofs, buf_len, buf);
+ if (err)
+ return err;
+ buf_ofs = ofs;
+ node = (void *)buf;
+ }
+ err = jffs2_scan_dirent_node(c, jeb, (void *)node, ofs, s);
if (err) return err;
+ ofs += PAD(je32_to_cpu(node->totlen));
break;
case JFFS2_NODETYPE_CLEANMARKER:
- if (node.totlen != sizeof(struct jffs2_unknown_node)) {
- printk(KERN_NOTICE "CLEANMARKER node found at 0x%08x has totlen 0x%x != normal 0x%x\n",
- ofs, node.totlen, sizeof(struct jffs2_unknown_node));
+ D1(printk(KERN_DEBUG "CLEANMARKER node found at 0x%08x\n", ofs));
+ if (je32_to_cpu(node->totlen) != c->cleanmarker_size) {
+ printk(KERN_NOTICE "CLEANMARKER node found at 0x%08x has totlen 0x%x != normal 0x%x\n",
+ ofs, je32_to_cpu(node->totlen), c->cleanmarker_size);
DIRTY_SPACE(PAD(sizeof(struct jffs2_unknown_node)));
+ ofs += PAD(sizeof(struct jffs2_unknown_node));
} else if (jeb->first_node) {
printk(KERN_NOTICE "CLEANMARKER node found at 0x%08x, not first node in block (0x%08x)\n", ofs, jeb->offset);
DIRTY_SPACE(PAD(sizeof(struct jffs2_unknown_node)));
ofs += PAD(sizeof(struct jffs2_unknown_node));
- continue;
} else {
struct jffs2_raw_node_ref *marker_ref = jffs2_alloc_raw_node_ref();
if (!marker_ref) {
@@ -300,98 +622,95 @@ static int jffs2_scan_eraseblock (struct jffs2_sb_info *c, struct jffs2_eraseblo
}
marker_ref->next_in_ino = NULL;
marker_ref->next_phys = NULL;
- marker_ref->flash_offset = ofs;
- marker_ref->totlen = sizeof(struct jffs2_unknown_node);
+ marker_ref->flash_offset = ofs | REF_NORMAL;
+ marker_ref->__totlen = c->cleanmarker_size;
jeb->first_node = jeb->last_node = marker_ref;
-
- USED_SPACE(PAD(sizeof(struct jffs2_unknown_node)));
+
+ USED_SPACE(PAD(c->cleanmarker_size));
+ ofs += PAD(c->cleanmarker_size);
}
- ofs += PAD(sizeof(struct jffs2_unknown_node));
+ break;
+
+ case JFFS2_NODETYPE_ERASEBLOCK_HEADER:
+ if (ofs != jeb->offset) {
+ printk(KERN_NOTICE "Eraseblock header found at 0x%08x is not at the beginning of block (0x%08x)\n", ofs, jeb->offset);
+ DIRTY_SPACE(PAD(je32_to_cpu(node->totlen)));
+ ofs += PAD(je32_to_cpu(node->totlen));
+ } else {
+ if (buf_ofs + buf_len < ofs + je32_to_cpu(node->totlen)) {
+ buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs);
+ err = jffs2_flash_read_safe(c, ofs, buf_len, buf);
+ if (err)
+ return err;
+ buf_ofs = ofs;
+ node = (void *)buf;
+ }
+ err = jffs2_scan_eraseblock_header(c, jeb, (void *)node, ofs, s);
+ if (err) return err;
+ ofs += PAD(je32_to_cpu(node->totlen));
+ }
+ break;
+
+ case JFFS2_NODETYPE_PADDING:
+ if (jffs2_sum_active())
+ jffs2_sum_add_padding_mem(s, je32_to_cpu(node->totlen));
+ DIRTY_SPACE(PAD(je32_to_cpu(node->totlen)));
+ ofs += PAD(je32_to_cpu(node->totlen));
break;
default:
- switch (node.nodetype & JFFS2_COMPAT_MASK) {
+ switch (je16_to_cpu(node->nodetype) & JFFS2_COMPAT_MASK) {
case JFFS2_FEATURE_ROCOMPAT:
- printk(KERN_NOTICE "Read-only compatible feature node (0x%04x) found at offset 0x%08x\n", node.nodetype, ofs);
+ printk(KERN_NOTICE "Read-only compatible feature node (0x%04x) found at offset 0x%08x\n", je16_to_cpu(node->nodetype), ofs);
c->flags |= JFFS2_SB_FLAG_RO;
- if (!(OFNI_BS_2SFFJ(c)->s_flags & MS_RDONLY))
+ if (!(jffs2_is_readonly(c)))
return -EROFS;
- DIRTY_SPACE(PAD(node.totlen));
- ofs += PAD(node.totlen);
- continue;
+ DIRTY_SPACE(PAD(je32_to_cpu(node->totlen)));
+ ofs += PAD(je32_to_cpu(node->totlen));
+ break;
case JFFS2_FEATURE_INCOMPAT:
- printk(KERN_NOTICE "Incompatible feature node (0x%04x) found at offset 0x%08x\n", node.nodetype, ofs);
+ printk(KERN_NOTICE "Incompatible feature node (0x%04x) found at offset 0x%08x\n", je16_to_cpu(node->nodetype), ofs);
return -EINVAL;
case JFFS2_FEATURE_RWCOMPAT_DELETE:
- printk(KERN_NOTICE "Unknown but compatible feature node (0x%04x) found at offset 0x%08x\n", node.nodetype, ofs);
- DIRTY_SPACE(PAD(node.totlen));
- ofs += PAD(node.totlen);
+ D1(printk(KERN_NOTICE "Unknown but compatible feature node (0x%04x) found at offset 0x%08x\n", je16_to_cpu(node->nodetype), ofs));
+ DIRTY_SPACE(PAD(je32_to_cpu(node->totlen)));
+ ofs += PAD(je32_to_cpu(node->totlen));
break;
case JFFS2_FEATURE_RWCOMPAT_COPY:
- printk(KERN_NOTICE "Unknown but compatible feature node (0x%04x) found at offset 0x%08x\n", node.nodetype, ofs);
- USED_SPACE(PAD(node.totlen));
- ofs += PAD(node.totlen);
+ D1(printk(KERN_NOTICE "Unknown but compatible feature node (0x%04x) found at offset 0x%08x\n", je16_to_cpu(node->nodetype), ofs));
+ USED_SPACE(PAD(je32_to_cpu(node->totlen)));
+ ofs += PAD(je32_to_cpu(node->totlen));
break;
}
}
}
- D1(printk(KERN_DEBUG "Block at 0x%08x: free 0x%08x, dirty 0x%08x, used 0x%08x\n", jeb->offset,
- jeb->free_size, jeb->dirty_size, jeb->used_size));
- return 0;
-}
-/* We're pointing at the first empty word on the flash. Scan and account for the whole dirty region */
-static int jffs2_scan_empty(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, __u32 *startofs, int *noise)
-{
- __u32 *buf;
- __u32 scanlen = (jeb->offset + c->sector_size) - *startofs;
- __u32 curofs = *startofs;
-
- buf = kmalloc(min((__u32)PAGE_SIZE, scanlen), GFP_KERNEL);
- if (!buf) {
- printk(KERN_WARNING "Scan buffer allocation failed\n");
- return -ENOMEM;
- }
- while(scanlen) {
- ssize_t retlen;
- int ret, i;
-
- ret = c->mtd->read(c->mtd, curofs, min((__u32)PAGE_SIZE, scanlen), &retlen, (char *)buf);
- if(ret) {
- D1(printk(KERN_WARNING "jffs2_scan_empty(): Read 0x%x bytes at 0x%08x returned %d\n", min((__u32)PAGE_SIZE, scanlen), curofs, ret));
- kfree(buf);
- return ret;
- }
- if (retlen < 4) {
- D1(printk(KERN_WARNING "Eep. too few bytes read in scan_empty()\n"));
- kfree(buf);
- return -EIO;
+ if (jffs2_sum_active()) {
+ if (PAD(s->sum_size + JFFS2_SUMMARY_FRAME_SIZE) > jeb->free_size) {
+ dbg_summary("There is not enough space for "
+ "summary information, disabling for this jeb!\n");
+ jffs2_sum_disable_collecting(s);
}
- for (i=0; i<(retlen / 4); i++) {
- if (buf[i] != 0xffffffff) {
- curofs += i*4;
-
- noisy_printk(noise, "jffs2_scan_empty(): Empty block at 0x%08x ends at 0x%08x (with 0x%08x)! Marking dirty\n", *startofs, curofs, buf[i]);
- DIRTY_SPACE(curofs - (*startofs));
- *startofs = curofs;
- kfree(buf);
- return 0;
- }
- }
- scanlen -= retlen&~3;
- curofs += retlen&~3;
}
- D1(printk(KERN_DEBUG "Empty flash detected from 0x%08x to 0x%08x\n", *startofs, curofs));
- kfree(buf);
- *startofs = curofs;
- return 0;
+ D1(printk(KERN_DEBUG "Block at 0x%08x: free 0x%08x, dirty 0x%08x, unchecked 0x%08x, used 0x%08x\n", jeb->offset,
+ jeb->free_size, jeb->dirty_size, jeb->unchecked_size, jeb->used_size));
+
+ /* mark_node_obsolete can add to wasted !! */
+ if (jeb->wasted_size) {
+ jeb->dirty_size += jeb->wasted_size;
+ c->dirty_size += jeb->wasted_size;
+ c->wasted_size -= jeb->wasted_size;
+ jeb->wasted_size = 0;
+ }
+
+ return jffs2_scan_classify_jeb(c, jeb);
}
-static struct jffs2_inode_cache *jffs2_scan_make_ino_cache(struct jffs2_sb_info *c, __u32 ino)
+struct jffs2_inode_cache *jffs2_scan_make_ino_cache(struct jffs2_sb_info *c, uint32_t ino)
{
struct jffs2_inode_cache *ic;
@@ -399,137 +718,77 @@ static struct jffs2_inode_cache *jffs2_scan_make_ino_cache(struct jffs2_sb_info
if (ic)
return ic;
+ if (ino > c->highest_ino)
+ c->highest_ino = ino;
+
ic = jffs2_alloc_inode_cache();
if (!ic) {
printk(KERN_NOTICE "jffs2_scan_make_inode_cache(): allocation of inode cache failed\n");
return NULL;
}
memset(ic, 0, sizeof(*ic));
- ic->scan = kmalloc(sizeof(struct jffs2_scan_info), GFP_KERNEL);
- if (!ic->scan) {
- printk(KERN_NOTICE "jffs2_scan_make_inode_cache(): allocation of scan info for inode cache failed\n");
- jffs2_free_inode_cache(ic);
- return NULL;
- }
- memset(ic->scan, 0, sizeof(*ic->scan));
+
ic->ino = ino;
ic->nodes = (void *)ic;
jffs2_add_ino_cache(c, ic);
if (ino == 1)
- ic->nlink=1;
+ ic->nlink = 1;
return ic;
}
-static int jffs2_scan_inode_node(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, __u32 *ofs)
+static int jffs2_scan_inode_node(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
+ struct jffs2_raw_inode *ri, uint32_t ofs, struct jffs2_summary *s)
{
struct jffs2_raw_node_ref *raw;
- struct jffs2_full_dnode *fn;
- struct jffs2_tmp_dnode_info *tn, **tn_list;
struct jffs2_inode_cache *ic;
- struct jffs2_raw_inode ri;
- __u32 crc;
- __u16 oldnodetype;
- int ret;
- ssize_t retlen;
-
- D1(printk(KERN_DEBUG "jffs2_scan_inode_node(): Node at 0x%08x\n", *ofs));
-
- ret = c->mtd->read(c->mtd, *ofs, sizeof(ri), &retlen, (char *)&ri);
- if (ret) {
- printk(KERN_NOTICE "jffs2_scan_inode_node(): Read error at 0x%08x: %d\n", *ofs, ret);
- return ret;
- }
- if (retlen != sizeof(ri)) {
- printk(KERN_NOTICE "Short read: 0x%x bytes at 0x%08x instead of requested %x\n",
- retlen, *ofs, sizeof(ri));
- return -EIO;
- }
-
- /* We sort of assume that the node was accurate when it was
- first written to the medium :) */
- oldnodetype = ri.nodetype;
- ri.nodetype |= JFFS2_NODE_ACCURATE;
- crc = crc32(0, &ri, sizeof(ri)-8);
- ri.nodetype = oldnodetype;
-
- if(crc != ri.node_crc) {
- printk(KERN_NOTICE "jffs2_scan_inode_node(): CRC failed on node at 0x%08x: Read 0x%08x, calculated 0x%08x\n",
- *ofs, ri.node_crc, crc);
- /* FIXME: Why do we believe totlen? */
- DIRTY_SPACE(4);
- *ofs += 4;
- return 0;
- }
- /* There was a bug where we wrote hole nodes out with csize/dsize
- swapped. Deal with it */
- if (ri.compr == JFFS2_COMPR_ZERO && !ri.dsize && ri.csize) {
- ri.dsize = ri.csize;
- ri.csize = 0;
- }
+ uint32_t ino = je32_to_cpu(ri->ino);
- if (ri.csize) {
- /* Check data CRC too */
- unsigned char *dbuf;
- __u32 crc;
+ D1(printk(KERN_DEBUG "jffs2_scan_inode_node(): Node at 0x%08x\n", ofs));
- dbuf = kmalloc(PAGE_CACHE_SIZE, GFP_KERNEL);
- if (!dbuf) {
- printk(KERN_NOTICE "jffs2_scan_inode_node(): allocation of temporary data buffer for CRC check failed\n");
- return -ENOMEM;
- }
- ret = c->mtd->read(c->mtd, *ofs+sizeof(ri), ri.csize, &retlen, dbuf);
- if (ret) {
- printk(KERN_NOTICE "jffs2_scan_inode_node(): Read error at 0x%08x: %d\n", *ofs+sizeof(ri), ret);
- kfree(dbuf);
- return ret;
- }
- if (retlen != ri.csize) {
- printk(KERN_NOTICE "Short read: 0x%x bytes at 0x%08x instead of requested %x\n",
- retlen, *ofs+ sizeof(ri), ri.csize);
- kfree(dbuf);
- return -EIO;
- }
- crc = crc32(0, dbuf, ri.csize);
- kfree(dbuf);
- if (crc != ri.data_crc) {
- printk(KERN_NOTICE "jffs2_scan_inode_node(): Data CRC failed on node at 0x%08x: Read 0x%08x, calculated 0x%08x\n",
- *ofs, ri.data_crc, crc);
- DIRTY_SPACE(PAD(ri.totlen));
- *ofs += PAD(ri.totlen);
- return 0;
- }
- }
+ /* We do very little here now. Just check the ino# to which we should attribute
+ this node; we can do all the CRC checking etc. later. There's a tradeoff here --
+ we used to scan the flash once only, reading everything we want from it into
+ memory, then building all our in-core data structures and freeing the extra
+ information. Now we allow the first part of the mount to complete a lot quicker,
+ but we have to go _back_ to the flash in order to finish the CRC checking, etc.
+ Which means that the _full_ amount of time to get to proper write mode with GC
+ operational may actually be _longer_ than before. Sucks to be me. */
- /* Wheee. It worked */
raw = jffs2_alloc_raw_node_ref();
if (!raw) {
printk(KERN_NOTICE "jffs2_scan_inode_node(): allocation of node reference failed\n");
return -ENOMEM;
}
- tn = jffs2_alloc_tmp_dnode_info();
- if (!tn) {
- jffs2_free_raw_node_ref(raw);
- return -ENOMEM;
- }
- fn = jffs2_alloc_full_dnode();
- if (!fn) {
- jffs2_free_tmp_dnode_info(tn);
- jffs2_free_raw_node_ref(raw);
- return -ENOMEM;
- }
- ic = jffs2_scan_make_ino_cache(c, ri.ino);
+
+ ic = jffs2_get_ino_cache(c, ino);
if (!ic) {
- jffs2_free_full_dnode(fn);
- jffs2_free_tmp_dnode_info(tn);
- jffs2_free_raw_node_ref(raw);
- return -ENOMEM;
+ /* Inocache get failed. Either we read a bogus ino# or it's just genuinely the
+ first node we found for this inode. Do a CRC check to protect against the former
+ case */
+ uint32_t crc = crc32(0, ri, sizeof(*ri)-8);
+
+ if (crc != je32_to_cpu(ri->node_crc)) {
+ printk(KERN_NOTICE "jffs2_scan_inode_node(): CRC failed on node at 0x%08x: Read 0x%08x, calculated 0x%08x\n",
+ ofs, je32_to_cpu(ri->node_crc), crc);
+ /* We believe totlen because the CRC on the node _header_ was OK, just the node itself failed. */
+ DIRTY_SPACE(PAD(je32_to_cpu(ri->totlen)));
+ jffs2_free_raw_node_ref(raw);
+ return 0;
+ }
+ ic = jffs2_scan_make_ino_cache(c, ino);
+ if (!ic) {
+ jffs2_free_raw_node_ref(raw);
+ return -ENOMEM;
+ }
}
- /* Build the data structures and file them for later */
- raw->flash_offset = *ofs;
- raw->totlen = PAD(ri.totlen);
+ /* Wheee. It worked */
+
+ raw->flash_offset = ofs | REF_UNCHECKED;
+ raw->__totlen = PAD(je32_to_cpu(ri->totlen));
raw->next_phys = NULL;
raw->next_in_ino = ic->nodes;
+
ic->nodes = raw;
if (!jeb->first_node)
jeb->first_node = raw;
@@ -537,135 +796,62 @@ static int jffs2_scan_inode_node(struct jffs2_sb_info *c, struct jffs2_erasebloc
jeb->last_node->next_phys = raw;
jeb->last_node = raw;
- D1(printk(KERN_DEBUG "Node is ino #%u, version %d. Range 0x%x-0x%x\n",
- ri.ino, ri.version, ri.offset, ri.offset+ri.dsize));
+ D1(printk(KERN_DEBUG "Node is ino #%u, version %d. Range 0x%x-0x%x\n",
+ je32_to_cpu(ri->ino), je32_to_cpu(ri->version),
+ je32_to_cpu(ri->offset),
+ je32_to_cpu(ri->offset)+je32_to_cpu(ri->dsize)));
- pseudo_random += ri.version;
+ pseudo_random += je32_to_cpu(ri->version);
- for (tn_list = &ic->scan->tmpnodes; *tn_list; tn_list = &((*tn_list)->next)) {
- if ((*tn_list)->version < ri.version)
- continue;
- if ((*tn_list)->version > ri.version)
- break;
- /* Wheee. We've found another instance of the same version number.
- We should obsolete one of them.
- */
- D1(printk(KERN_DEBUG "Duplicate version %d found in ino #%u. Previous one is at 0x%08x\n", ri.version, ic->ino, (*tn_list)->fn->raw->flash_offset &~3));
- if (!jeb->used_size) {
- D1(printk(KERN_DEBUG "No valid nodes yet found in this eraseblock 0x%08x, so obsoleting the new instance at 0x%08x\n",
- jeb->offset, raw->flash_offset & ~3));
- ri.nodetype &= ~JFFS2_NODE_ACCURATE;
- /* Perhaps we could also mark it as such on the medium. Maybe later */
- }
- break;
- }
-
- if (ri.nodetype & JFFS2_NODE_ACCURATE) {
- memset(fn,0,sizeof(*fn));
-
- fn->ofs = ri.offset;
- fn->size = ri.dsize;
- fn->frags = 0;
- fn->raw = raw;
-
- tn->next = NULL;
- tn->fn = fn;
- tn->version = ri.version;
+ UNCHECKED_SPACE(PAD(je32_to_cpu(ri->totlen)));
- USED_SPACE(PAD(ri.totlen));
- jffs2_add_tn_to_list(tn, &ic->scan->tmpnodes);
- /* Make sure the one we just added is the _last_ in the list
- with this version number, so the older ones get obsoleted */
- while (tn->next && tn->next->version == tn->version) {
-
- D1(printk(KERN_DEBUG "Shifting new node at 0x%08x after other node at 0x%08x for version %d in list\n",
- fn->raw->flash_offset&~3, tn->next->fn->raw->flash_offset &~3, ri.version));
+ if (jffs2_sum_active()) {
+ jffs2_sum_add_inode_mem(s, ri, ofs - jeb->offset);
+ }
- if(tn->fn != fn)
- BUG();
- tn->fn = tn->next->fn;
- tn->next->fn = fn;
- tn = tn->next;
- }
- } else {
- jffs2_free_full_dnode(fn);
- jffs2_free_tmp_dnode_info(tn);
- raw->flash_offset |= 1;
- DIRTY_SPACE(PAD(ri.totlen));
- }
- *ofs += PAD(ri.totlen);
return 0;
}
-static int jffs2_scan_dirent_node(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, __u32 *ofs)
+static int jffs2_scan_dirent_node(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
+ struct jffs2_raw_dirent *rd, uint32_t ofs, struct jffs2_summary *s)
{
struct jffs2_raw_node_ref *raw;
struct jffs2_full_dirent *fd;
struct jffs2_inode_cache *ic;
- struct jffs2_raw_dirent rd;
- __u16 oldnodetype;
- int ret;
- __u32 crc;
- ssize_t retlen;
-
- D1(printk(KERN_DEBUG "jffs2_scan_dirent_node(): Node at 0x%08x\n", *ofs));
+ uint32_t crc;
- ret = c->mtd->read(c->mtd, *ofs, sizeof(rd), &retlen, (char *)&rd);
- if (ret) {
- printk(KERN_NOTICE "jffs2_scan_dirent_node(): Read error at 0x%08x: %d\n", *ofs, ret);
- return ret;
- }
- if (retlen != sizeof(rd)) {
- printk(KERN_NOTICE "Short read: 0x%x bytes at 0x%08x instead of requested %x\n",
- retlen, *ofs, sizeof(rd));
- return -EIO;
- }
+ D1(printk(KERN_DEBUG "jffs2_scan_dirent_node(): Node at 0x%08x\n", ofs));
- /* We sort of assume that the node was accurate when it was
- first written to the medium :) */
- oldnodetype = rd.nodetype;
- rd.nodetype |= JFFS2_NODE_ACCURATE;
- crc = crc32(0, &rd, sizeof(rd)-8);
- rd.nodetype = oldnodetype;
+ /* We don't get here unless the node is still valid, so we don't have to
+ mask in the ACCURATE bit any more. */
+ crc = crc32(0, rd, sizeof(*rd)-8);
- if (crc != rd.node_crc) {
+ if (crc != je32_to_cpu(rd->node_crc)) {
printk(KERN_NOTICE "jffs2_scan_dirent_node(): Node CRC failed on node at 0x%08x: Read 0x%08x, calculated 0x%08x\n",
- *ofs, rd.node_crc, crc);
- /* FIXME: Why do we believe totlen? */
- DIRTY_SPACE(4);
- *ofs += 4;
+ ofs, je32_to_cpu(rd->node_crc), crc);
+ /* We believe totlen because the CRC on the node _header_ was OK, just the node itself failed. */
+ DIRTY_SPACE(PAD(je32_to_cpu(rd->totlen)));
return 0;
}
- pseudo_random += rd.version;
+ pseudo_random += je32_to_cpu(rd->version);
- fd = jffs2_alloc_full_dirent(rd.nsize+1);
+ fd = jffs2_alloc_full_dirent(rd->nsize+1);
if (!fd) {
return -ENOMEM;
-}
- ret = c->mtd->read(c->mtd, *ofs + sizeof(rd), rd.nsize, &retlen, &fd->name[0]);
- if (ret) {
- jffs2_free_full_dirent(fd);
- printk(KERN_NOTICE "jffs2_scan_dirent_node(): Read error at 0x%08x: %d\n",
- *ofs + sizeof(rd), ret);
- return ret;
}
- if (retlen != rd.nsize) {
- jffs2_free_full_dirent(fd);
- printk(KERN_NOTICE "Short read: 0x%x bytes at 0x%08x instead of requested %x\n",
- retlen, *ofs + sizeof(rd), rd.nsize);
- return -EIO;
- }
- crc = crc32(0, fd->name, rd.nsize);
- if (crc != rd.name_crc) {
+ memcpy(&fd->name, rd->name, rd->nsize);
+ fd->name[rd->nsize] = 0;
+
+ crc = crc32(0, fd->name, rd->nsize);
+ if (crc != je32_to_cpu(rd->name_crc)) {
printk(KERN_NOTICE "jffs2_scan_dirent_node(): Name CRC failed on node at 0x%08x: Read 0x%08x, calculated 0x%08x\n",
- *ofs, rd.name_crc, crc);
- fd->name[rd.nsize]=0;
- D1(printk(KERN_NOTICE "Name for which CRC failed is (now) '%s', ino #%d\n", fd->name, rd.ino));
+ ofs, je32_to_cpu(rd->name_crc), crc);
+ D1(printk(KERN_NOTICE "Name for which CRC failed is (now) '%s', ino #%d\n", fd->name, je32_to_cpu(rd->ino)));
jffs2_free_full_dirent(fd);
/* FIXME: Why do we believe totlen? */
- DIRTY_SPACE(PAD(rd.totlen));
- *ofs += PAD(rd.totlen);
+ /* We believe totlen because the CRC on the node _header_ was OK, just the name failed. */
+ DIRTY_SPACE(PAD(je32_to_cpu(rd->totlen)));
return 0;
}
raw = jffs2_alloc_raw_node_ref();
@@ -674,15 +860,15 @@ static int jffs2_scan_dirent_node(struct jffs2_sb_info *c, struct jffs2_eraseblo
printk(KERN_NOTICE "jffs2_scan_dirent_node(): allocation of node reference failed\n");
return -ENOMEM;
}
- ic = jffs2_scan_make_ino_cache(c, rd.pino);
+ ic = jffs2_scan_make_ino_cache(c, je32_to_cpu(rd->pino));
if (!ic) {
jffs2_free_full_dirent(fd);
jffs2_free_raw_node_ref(raw);
return -ENOMEM;
}
-
- raw->totlen = PAD(rd.totlen);
- raw->flash_offset = *ofs;
+
+ raw->__totlen = PAD(je32_to_cpu(rd->totlen));
+ raw->flash_offset = ofs | REF_PRISTINE;
raw->next_phys = NULL;
raw->next_in_ino = ic->nodes;
ic->nodes = raw;
@@ -692,24 +878,72 @@ static int jffs2_scan_dirent_node(struct jffs2_sb_info *c, struct jffs2_eraseblo
jeb->last_node->next_phys = raw;
jeb->last_node = raw;
- if (rd.nodetype & JFFS2_NODE_ACCURATE) {
- fd->raw = raw;
- fd->next = NULL;
- fd->version = rd.version;
- fd->ino = rd.ino;
- fd->name[rd.nsize]=0;
- fd->nhash = full_name_hash(fd->name, rd.nsize);
- fd->type = rd.type;
-
- USED_SPACE(PAD(rd.totlen));
- jffs2_add_fd_to_list(c, fd, &ic->scan->dents);
- } else {
- raw->flash_offset |= 1;
- jffs2_free_full_dirent(fd);
+ fd->raw = raw;
+ fd->next = NULL;
+ fd->version = je32_to_cpu(rd->version);
+ fd->ino = je32_to_cpu(rd->ino);
+ fd->nhash = full_name_hash(fd->name, rd->nsize);
+ fd->type = rd->type;
+ USED_SPACE(PAD(je32_to_cpu(rd->totlen)));
+ jffs2_add_fd_to_list(c, fd, &ic->scan_dents);
+
+ if (jffs2_sum_active()) {
+ jffs2_sum_add_dirent_mem(s, rd, ofs - jeb->offset);
+ }
+
+ return 0;
+}
+
+static int jffs2_scan_eraseblock_header(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
+ struct jffs2_raw_ebh *eh, uint32_t ofs, struct jffs2_summary *s)
+{
+ uint32_t crc, node_crc;
+ struct jffs2_raw_node_ref *raw;
+
+ D1(printk(KERN_DEBUG "jffs2_scan_eraseblock_header(): Node at 0x%08x\n", ofs));
+ crc = crc32(0, (unsigned char *)eh + sizeof(struct jffs2_unknown_node) + 4,
+ sizeof(struct jffs2_raw_ebh) - sizeof(struct jffs2_unknown_node) - 4);
+ node_crc = je32_to_cpu(eh->node_crc);
+
+ if (crc != node_crc) {
+ printk(KERN_NOTICE "jffs2_scan_eraseblock_header(): Node CRC failed on node at 0x%08x: Read 0x%08x, calculated 0x%08x\n",
+ ofs, node_crc, crc);
+ DIRTY_SPACE(PAD(je32_to_cpu(eh->totlen)));
+ return 0;
+ }
+
+ if ((JFFS2_EBH_INCOMPAT_FSET | eh->incompat_fset) != JFFS2_EBH_INCOMPAT_FSET) {
+ printk(KERN_NOTICE "The incompat_fset of fs image EBH %d exceed the incompat_fset of JFFS2 module %d. Reject to mount.\n",
+ eh->incompat_fset, JFFS2_EBH_INCOMPAT_FSET);
+ return -EINVAL;
+ }
+ if ((JFFS2_EBH_ROCOMPAT_FSET | eh->rocompat_fset) != JFFS2_EBH_ROCOMPAT_FSET) {
+ printk(KERN_NOTICE "Read-only compatible EBH feature found at offset 0x%08x\n ", jeb->offset);
+ if (!(jffs2_is_readonly(c)))
+ return -EROFS;
+ }
+
+ raw = jffs2_alloc_raw_node_ref();
+ if (!raw) {
+ printk(KERN_NOTICE "jffs2_scan_eraseblock_header(): allocation of node reference failed.\n");
+ return -ENOMEM;
+ }
+
+ EBFLAGS_SET_EBH(jeb);
+ jeb->erase_count = je32_to_cpu(eh->erase_count);
+ record_erase_count(c, jeb);
+
+ raw->next_in_ino = NULL;
+ raw->next_phys = NULL;
+ raw->flash_offset = ofs | REF_NORMAL;
+ raw->__totlen = PAD(je32_to_cpu(eh->totlen));
+ jeb->first_node = jeb->last_node = raw;
+
+ USED_SPACE(PAD(je32_to_cpu(eh->totlen)));
+ if (jffs2_sum_active()) {
+ jffs2_sum_add_ebh_mem(s, eh, ofs - jeb->offset);
+ }
- DIRTY_SPACE(PAD(rd.totlen));
- }
- *ofs += PAD(rd.totlen);
return 0;
}
@@ -731,26 +965,48 @@ static void rotate_list(struct list_head *head, uint32_t count)
struct list_head *n = head->next;
list_del(head);
- while(count--)
+ while(count--) {
n = n->next;
+ }
list_add(head, n);
}
-static void jffs2_rotate_lists(struct jffs2_sb_info *c)
+void jffs2_rotate_lists(struct jffs2_sb_info *c)
{
uint32_t x;
+ uint32_t rotateby;
x = count_list(&c->clean_list);
- if (x)
- rotate_list((&c->clean_list), pseudo_random % x);
+ if (x) {
+ rotateby = pseudo_random % x;
+ rotate_list((&c->clean_list), rotateby);
+ }
+
+ x = count_list(&c->very_dirty_list);
+ if (x) {
+ rotateby = pseudo_random % x;
+ rotate_list((&c->very_dirty_list), rotateby);
+ }
x = count_list(&c->dirty_list);
- if (x)
- rotate_list((&c->dirty_list), pseudo_random % x);
+ if (x) {
+ rotateby = pseudo_random % x;
+ rotate_list((&c->dirty_list), rotateby);
+ }
+
+ x = count_list(&c->erasable_list);
+ if (x) {
+ rotateby = pseudo_random % x;
+ rotate_list((&c->erasable_list), rotateby);
+ }
- if (c->nr_erasing_blocks)
- rotate_list((&c->erase_pending_list), pseudo_random % c->nr_erasing_blocks);
+ if (c->nr_erasing_blocks) {
+ rotateby = pseudo_random % c->nr_erasing_blocks;
+ rotate_list((&c->erase_pending_list), rotateby);
+ }
- if (c->nr_free_blocks) /* Not that it should ever be zero */
- rotate_list((&c->free_list), pseudo_random % c->nr_free_blocks);
+ if (c->nr_free_blocks) {
+ rotateby = pseudo_random % c->nr_free_blocks;
+ rotate_list((&c->free_list), rotateby);
+ }
}
diff --git a/linux-2.4.x/fs/jffs2/summary.c b/linux-2.4.x/fs/jffs2/summary.c
new file mode 100644
index 0000000..6f85469
--- /dev/null
+++ b/linux-2.4.x/fs/jffs2/summary.c
@@ -0,0 +1,865 @@
+/*
+ * JFFS2 -- Journalling Flash File System, Version 2.
+ *
+ * Copyright (C) 2004 Ferenc Havasi <havasi@inf.u-szeged.hu>,
+ * Zoltan Sogor <weth@inf.u-szeged.hu>,
+ * Patrik Kluba <pajko@halom.u-szeged.hu>,
+ * University of Szeged, Hungary
+ *
+ * For licensing information, see the file 'LICENCE' in this directory.
+ *
+ * $Id: summary.c,v 1.10 2005/11/23 11:09:29 havasi Exp $
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/mtd/mtd.h>
+#include <linux/pagemap.h>
+#include <linux/crc32.h>
+#include <linux/compiler.h>
+#include <linux/vmalloc.h>
+#include "nodelist.h"
+#include "debug.h"
+
+int jffs2_sum_init(struct jffs2_sb_info *c)
+{
+ c->summary = kmalloc(sizeof(struct jffs2_summary), GFP_KERNEL);
+
+ if (!c->summary) {
+ JFFS2_WARNING("Can't allocate memory for summary information!\n");
+ return -ENOMEM;
+ }
+
+ memset(c->summary, 0, sizeof(struct jffs2_summary));
+
+ c->summary->sum_buf = vmalloc(c->sector_size);
+
+ if (!c->summary->sum_buf) {
+ JFFS2_WARNING("Can't allocate buffer for writing out summary information!\n");
+ kfree(c->summary);
+ return -ENOMEM;
+ }
+
+ dbg_summary("returned succesfully\n");
+
+ return 0;
+}
+
+void jffs2_sum_exit(struct jffs2_sb_info *c)
+{
+ dbg_summary("called\n");
+
+ jffs2_sum_disable_collecting(c->summary);
+
+ vfree(c->summary->sum_buf);
+ c->summary->sum_buf = NULL;
+
+ kfree(c->summary);
+ c->summary = NULL;
+}
+
+static int jffs2_sum_add_mem(struct jffs2_summary *s, union jffs2_sum_mem *item)
+{
+ if (!s->sum_list_head)
+ s->sum_list_head = (union jffs2_sum_mem *) item;
+ if (s->sum_list_tail)
+ s->sum_list_tail->u.next = (union jffs2_sum_mem *) item;
+ s->sum_list_tail = (union jffs2_sum_mem *) item;
+
+ switch (je16_to_cpu(item->u.nodetype)) {
+ case JFFS2_NODETYPE_INODE:
+ s->sum_size += JFFS2_SUMMARY_INODE_SIZE;
+ s->sum_num++;
+ dbg_summary("inode (%u) added to summary\n",
+ je32_to_cpu(item->i.inode));
+ break;
+ case JFFS2_NODETYPE_DIRENT:
+ s->sum_size += JFFS2_SUMMARY_DIRENT_SIZE(item->d.nsize);
+ s->sum_num++;
+ dbg_summary("dirent (%u) added to summary\n",
+ je32_to_cpu(item->d.ino));
+ break;
+ case JFFS2_NODETYPE_ERASEBLOCK_HEADER:
+ s->sum_size += sizeof(struct jffs2_sum_ebh_flash) + je32_to_cpu(item->eh.totlen)
+ - sizeof(struct jffs2_raw_ebh);
+ s->sum_num++;
+ dbg_summary("eraseblock header (%x) added to summary\n",
+ je32_to_cpu(item->eh.offset));
+ break;
+ default:
+ JFFS2_WARNING("UNKNOWN node type %u\n",
+ je16_to_cpu(item->u.nodetype));
+ return 1;
+ }
+ return 0;
+}
+
+
+/* The following 3 functions are called from scan.c to collect summary info for not closed jeb */
+
+int jffs2_sum_add_padding_mem(struct jffs2_summary *s, uint32_t size)
+{
+ dbg_summary("called with %u\n", size);
+ s->sum_padded += size;
+ return 0;
+}
+
+int jffs2_sum_add_inode_mem(struct jffs2_summary *s, struct jffs2_raw_inode *ri,
+ uint32_t ofs)
+{
+ struct jffs2_sum_inode_mem *temp = kmalloc(sizeof(struct jffs2_sum_inode_mem), GFP_KERNEL);
+
+ if (!temp)
+ return -ENOMEM;
+
+ temp->nodetype = ri->nodetype;
+ temp->inode = ri->ino;
+ temp->version = ri->version;
+ temp->offset = cpu_to_je32(ofs); /* relative offset from the begining of the jeb */
+ temp->totlen = ri->totlen;
+ temp->next = NULL;
+
+ return jffs2_sum_add_mem(s, (union jffs2_sum_mem *)temp);
+}
+
+int jffs2_sum_add_dirent_mem(struct jffs2_summary *s, struct jffs2_raw_dirent *rd,
+ uint32_t ofs)
+{
+ struct jffs2_sum_dirent_mem *temp =
+ kmalloc(sizeof(struct jffs2_sum_dirent_mem) + rd->nsize, GFP_KERNEL);
+
+ if (!temp)
+ return -ENOMEM;
+
+ temp->nodetype = rd->nodetype;
+ temp->totlen = rd->totlen;
+ temp->offset = cpu_to_je32(ofs); /* relative from the begining of the jeb */
+ temp->pino = rd->pino;
+ temp->version = rd->version;
+ temp->ino = rd->ino;
+ temp->nsize = rd->nsize;
+ temp->type = rd->type;
+ temp->next = NULL;
+
+ memcpy(temp->name, rd->name, rd->nsize);
+
+ return jffs2_sum_add_mem(s, (union jffs2_sum_mem *)temp);
+}
+
+int jffs2_sum_add_ebh_mem(struct jffs2_summary *s, struct jffs2_raw_ebh *eh, uint32_t ofs)
+{
+ struct jffs2_sum_ebh_mem *temp = kmalloc(sizeof(struct jffs2_sum_ebh_mem) + je32_to_cpu(eh->totlen)
+ - sizeof(struct jffs2_raw_ebh), GFP_KERNEL);
+ if (!temp)
+ return -ENOMEM;
+
+ temp->nodetype = eh->nodetype;
+ temp->totlen = eh->totlen;
+ temp->offset = cpu_to_je32(ofs);
+ temp->reserved = eh->reserved;
+ temp->compat_fset = eh->compat_fset;
+ temp->incompat_fset = eh->incompat_fset;
+ temp->rocompat_fset = eh->rocompat_fset;
+ temp->erase_count = eh->erase_count;
+ temp->next = NULL;
+
+ memcpy(temp->data, eh->data, je32_to_cpu(eh->totlen) - sizeof(struct jffs2_raw_ebh));
+
+ return jffs2_sum_add_mem(s, (union jffs2_sum_mem *)temp);
+}
+
+/* Cleanup every collected summary information */
+
+static void jffs2_sum_clean_collected(struct jffs2_summary *s)
+{
+ union jffs2_sum_mem *temp;
+
+ if (!s->sum_list_head) {
+ dbg_summary("already empty\n");
+ }
+ while (s->sum_list_head) {
+ temp = s->sum_list_head;
+ s->sum_list_head = s->sum_list_head->u.next;
+ kfree(temp);
+ }
+ s->sum_list_tail = NULL;
+ s->sum_padded = 0;
+ s->sum_num = 0;
+}
+
+void jffs2_sum_reset_collected(struct jffs2_summary *s)
+{
+ dbg_summary("called\n");
+ jffs2_sum_clean_collected(s);
+ s->sum_size = 0;
+}
+
+void jffs2_sum_disable_collecting(struct jffs2_summary *s)
+{
+ dbg_summary("called\n");
+ jffs2_sum_clean_collected(s);
+ s->sum_size = JFFS2_SUMMARY_NOSUM_SIZE;
+}
+
+int jffs2_sum_is_disabled(struct jffs2_summary *s)
+{
+ return (s->sum_size == JFFS2_SUMMARY_NOSUM_SIZE);
+}
+
+/* Move the collected summary information into sb (called from scan.c) */
+
+void jffs2_sum_move_collected(struct jffs2_sb_info *c, struct jffs2_summary *s)
+{
+ dbg_summary("oldsize=0x%x oldnum=%u => newsize=0x%x newnum=%u\n",
+ c->summary->sum_size, c->summary->sum_num,
+ s->sum_size, s->sum_num);
+
+ c->summary->sum_size = s->sum_size;
+ c->summary->sum_num = s->sum_num;
+ c->summary->sum_padded = s->sum_padded;
+ c->summary->sum_list_head = s->sum_list_head;
+ c->summary->sum_list_tail = s->sum_list_tail;
+
+ s->sum_list_head = s->sum_list_tail = NULL;
+}
+
+/* Called from wbuf.c to collect writed node info */
+
+int jffs2_sum_add_kvec(struct jffs2_sb_info *c, const struct kvec *invecs,
+ unsigned long count, uint32_t ofs)
+{
+ union jffs2_node_union *node;
+ struct jffs2_eraseblock *jeb;
+
+ node = invecs[0].iov_base;
+ jeb = c->blocks[ofs / c->sector_size];
+ ofs -= jeb->offset;
+
+ switch (je16_to_cpu(node->u.nodetype)) {
+ case JFFS2_NODETYPE_INODE: {
+ struct jffs2_sum_inode_mem *temp =
+ kmalloc(sizeof(struct jffs2_sum_inode_mem), GFP_KERNEL);
+
+ if (!temp)
+ goto no_mem;
+
+ temp->nodetype = node->i.nodetype;
+ temp->inode = node->i.ino;
+ temp->version = node->i.version;
+ temp->offset = cpu_to_je32(ofs);
+ temp->totlen = node->i.totlen;
+ temp->next = NULL;
+
+ return jffs2_sum_add_mem(c->summary, (union jffs2_sum_mem *)temp);
+ }
+
+ case JFFS2_NODETYPE_DIRENT: {
+ struct jffs2_sum_dirent_mem *temp =
+ kmalloc(sizeof(struct jffs2_sum_dirent_mem) + node->d.nsize, GFP_KERNEL);
+
+ if (!temp)
+ goto no_mem;
+
+ temp->nodetype = node->d.nodetype;
+ temp->totlen = node->d.totlen;
+ temp->offset = cpu_to_je32(ofs);
+ temp->pino = node->d.pino;
+ temp->version = node->d.version;
+ temp->ino = node->d.ino;
+ temp->nsize = node->d.nsize;
+ temp->type = node->d.type;
+ temp->next = NULL;
+
+ switch (count) {
+ case 1:
+ memcpy(temp->name,node->d.name,node->d.nsize);
+ break;
+
+ case 2:
+ memcpy(temp->name,invecs[1].iov_base,node->d.nsize);
+ break;
+
+ default:
+ BUG(); /* impossible count value */
+ break;
+ }
+
+ return jffs2_sum_add_mem(c->summary, (union jffs2_sum_mem *)temp);
+ }
+
+ case JFFS2_NODETYPE_ERASEBLOCK_HEADER: {
+ struct jffs2_sum_ebh_mem *temp = kmalloc(sizeof(struct jffs2_sum_ebh_mem) + je32_to_cpu(node->eh.totlen)
+ - sizeof(struct jffs2_raw_ebh), GFP_KERNEL);
+ if (!temp)
+ goto no_mem;
+
+ temp->nodetype = node->eh.nodetype;
+ temp->totlen = node->eh.totlen;
+ temp->offset = cpu_to_je32(ofs);
+ temp->reserved = node->eh.reserved;
+ temp->compat_fset = node->eh.compat_fset;
+ temp->incompat_fset = node->eh.incompat_fset;
+ temp->rocompat_fset = node->eh.rocompat_fset;
+ temp->erase_count = node->eh.erase_count;
+ temp->next = NULL;
+ switch (count) {
+ case 1:
+ memcpy(temp->data, node->eh.data, je32_to_cpu(node->eh.totlen) - sizeof(struct jffs2_raw_ebh));
+ break;
+ case 2:
+ memcpy(temp->data, invecs[1].iov_base, je32_to_cpu(node->eh.totlen) - sizeof(struct jffs2_raw_ebh));
+ break;
+ default:
+ BUG();
+ break;
+ }
+ return jffs2_sum_add_mem(c->summary, (union jffs2_sum_mem *)temp);
+ }
+
+ case JFFS2_NODETYPE_PADDING:
+ dbg_summary("node PADDING\n");
+ c->summary->sum_padded += je32_to_cpu(node->u.totlen);
+ break;
+
+ case JFFS2_NODETYPE_CLEANMARKER:
+ dbg_summary("node CLEANMARKER\n");
+ break;
+
+ case JFFS2_NODETYPE_SUMMARY:
+ dbg_summary("node SUMMARY\n");
+ break;
+
+ default:
+ /* If you implement a new node type you should also implement
+ summary support for it or disable summary.
+ */
+ BUG();
+ break;
+ }
+
+ return 0;
+
+no_mem:
+ JFFS2_WARNING("MEMORY ALLOCATION ERROR!");
+ return -ENOMEM;
+}
+
+
+/* Process the stored summary information - helper function for jffs2_sum_scan_sumnode() */
+
+static int jffs2_sum_process_sum_data(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
+ struct jffs2_raw_summary *summary, uint32_t *pseudo_random)
+{
+ struct jffs2_raw_node_ref *raw;
+ struct jffs2_inode_cache *ic;
+ struct jffs2_full_dirent *fd;
+ void *sp;
+ int i, ino;
+
+ sp = summary->sum;
+
+ for (i=0; i<je32_to_cpu(summary->sum_num); i++) {
+ dbg_summary("processing summary index %d\n", i);
+
+ switch (je16_to_cpu(((struct jffs2_sum_unknown_flash *)sp)->nodetype)) {
+ case JFFS2_NODETYPE_INODE: {
+ struct jffs2_sum_inode_flash *spi;
+ spi = sp;
+
+ ino = je32_to_cpu(spi->inode);
+
+ dbg_summary("Inode at 0x%08x\n",
+ jeb->offset + je32_to_cpu(spi->offset));
+
+ raw = jffs2_alloc_raw_node_ref();
+ if (!raw) {
+ JFFS2_NOTICE("allocation of node reference failed\n");
+ kfree(summary);
+ return -ENOMEM;
+ }
+
+ ic = jffs2_scan_make_ino_cache(c, ino);
+ if (!ic) {
+ JFFS2_NOTICE("scan_make_ino_cache failed\n");
+ jffs2_free_raw_node_ref(raw);
+ kfree(summary);
+ return -ENOMEM;
+ }
+
+ raw->flash_offset = (jeb->offset + je32_to_cpu(spi->offset)) | REF_UNCHECKED;
+ raw->__totlen = PAD(je32_to_cpu(spi->totlen));
+ raw->next_phys = NULL;
+ raw->next_in_ino = ic->nodes;
+
+ ic->nodes = raw;
+ if (!jeb->first_node)
+ jeb->first_node = raw;
+ if (jeb->last_node)
+ jeb->last_node->next_phys = raw;
+ jeb->last_node = raw;
+ *pseudo_random += je32_to_cpu(spi->version);
+
+ UNCHECKED_SPACE(PAD(je32_to_cpu(spi->totlen)));
+
+ sp += JFFS2_SUMMARY_INODE_SIZE;
+
+ break;
+ }
+
+ case JFFS2_NODETYPE_DIRENT: {
+ struct jffs2_sum_dirent_flash *spd;
+ spd = sp;
+
+ dbg_summary("Dirent at 0x%08x\n",
+ jeb->offset + je32_to_cpu(spd->offset));
+
+ fd = jffs2_alloc_full_dirent(spd->nsize+1);
+ if (!fd) {
+ kfree(summary);
+ return -ENOMEM;
+ }
+
+ memcpy(&fd->name, spd->name, spd->nsize);
+ fd->name[spd->nsize] = 0;
+
+ raw = jffs2_alloc_raw_node_ref();
+ if (!raw) {
+ jffs2_free_full_dirent(fd);
+ JFFS2_NOTICE("allocation of node reference failed\n");
+ kfree(summary);
+ return -ENOMEM;
+ }
+
+ ic = jffs2_scan_make_ino_cache(c, je32_to_cpu(spd->pino));
+ if (!ic) {
+ jffs2_free_full_dirent(fd);
+ jffs2_free_raw_node_ref(raw);
+ kfree(summary);
+ return -ENOMEM;
+ }
+
+ raw->__totlen = PAD(je32_to_cpu(spd->totlen));
+ raw->flash_offset = (jeb->offset + je32_to_cpu(spd->offset)) | REF_PRISTINE;
+ raw->next_phys = NULL;
+ raw->next_in_ino = ic->nodes;
+ ic->nodes = raw;
+ if (!jeb->first_node)
+ jeb->first_node = raw;
+ if (jeb->last_node)
+ jeb->last_node->next_phys = raw;
+ jeb->last_node = raw;
+
+ fd->raw = raw;
+ fd->next = NULL;
+ fd->version = je32_to_cpu(spd->version);
+ fd->ino = je32_to_cpu(spd->ino);
+ fd->nhash = full_name_hash(fd->name, spd->nsize);
+ fd->type = spd->type;
+ USED_SPACE(PAD(je32_to_cpu(spd->totlen)));
+ jffs2_add_fd_to_list(c, fd, &ic->scan_dents);
+
+ *pseudo_random += je32_to_cpu(spd->version);
+
+ sp += JFFS2_SUMMARY_DIRENT_SIZE(spd->nsize);
+
+ break;
+ }
+
+ case JFFS2_NODETYPE_ERASEBLOCK_HEADER: {
+ struct jffs2_sum_ebh_flash *speh;
+ speh = sp;
+
+ dbg_summary("Eraseblock header at 0x%08x\n",
+ jeb->offset + je32_to_cpu(speh->offset));
+
+ if (je32_to_cpu(speh->offset) != 0) {
+ printk(KERN_NOTICE "Eraseblock header offset is %d\n", je32_to_cpu(speh->offset));
+ kfree(summary);
+ return -EINVAL;
+ }
+
+ if ((JFFS2_EBH_INCOMPAT_FSET | speh->incompat_fset) != JFFS2_EBH_INCOMPAT_FSET) {
+ printk(KERN_NOTICE "The incompat_fset of fs image EBH %d exceed the incompat_fset \
+ of JFFS2 module %d. Reject to mount.\n",
+ speh->incompat_fset, JFFS2_EBH_INCOMPAT_FSET);
+ kfree(summary);
+ return -EINVAL;
+ }
+ if ((JFFS2_EBH_ROCOMPAT_FSET | speh->rocompat_fset) != JFFS2_EBH_ROCOMPAT_FSET) {
+ printk(KERN_NOTICE "Read-only compatible EBH feature found at offset 0x%08x\n ",
+ jeb->offset);
+ if (!(jffs2_is_readonly(c))) {
+ kfree(summary);
+ return -EROFS;
+ }
+ }
+
+ raw = jffs2_alloc_raw_node_ref();
+ if (!raw) {
+ JFFS2_NOTICE("allocation of node reference failed\n");
+ kfree(summary);
+ return -ENOMEM;
+ }
+ EBFLAGS_SET_EBH(jeb);
+ jeb->erase_count = je32_to_cpu(speh->erase_count);
+ record_erase_count(c, jeb);
+
+ raw->next_in_ino = NULL;
+ raw->next_phys = NULL;
+ raw->flash_offset = (jeb->offset + je32_to_cpu(speh->offset)) | REF_NORMAL;
+ raw->__totlen = PAD(je32_to_cpu(speh->totlen));
+
+ if (!jeb->first_node)
+ jeb->first_node = raw;
+ if (jeb->last_node)
+ jeb->last_node->next_phys = raw;
+ jeb->last_node = raw;
+
+ USED_SPACE(PAD(je32_to_cpu(speh->totlen)));
+ sp += sizeof(struct jffs2_sum_ebh_flash) + je32_to_cpu(speh->totlen) - sizeof(struct jffs2_raw_ebh);
+
+ break;
+ }
+
+ default : {
+ JFFS2_WARNING("Unsupported node type found in summary! Exiting...");
+ kfree(summary);
+ return -EIO;
+ }
+ }
+ }
+
+ kfree(summary);
+ return 0;
+}
+
+/* Process the summary node - called from jffs2_scan_eraseblock() */
+
+int jffs2_sum_scan_sumnode(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
+ uint32_t ofs, uint32_t *pseudo_random)
+{
+ struct jffs2_unknown_node crcnode;
+ struct jffs2_raw_node_ref *cache_ref;
+ struct jffs2_raw_summary *summary;
+ int ret, sumsize;
+ uint32_t crc;
+
+ sumsize = c->sector_size - ofs;
+ ofs += jeb->offset;
+
+ dbg_summary("summary found for 0x%08x at 0x%08x (0x%x bytes)\n",
+ jeb->offset, ofs, sumsize);
+
+ summary = kmalloc(sumsize, GFP_KERNEL);
+
+ if (!summary) {
+ return -ENOMEM;
+ }
+
+ ret = jffs2_flash_read_safe(c, ofs, sumsize,
+ (unsigned char *)summary);
+
+ if (ret) {
+ kfree(summary);
+ return ret;
+ }
+
+ /* OK, now check for node validity and CRC */
+ crcnode.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
+ crcnode.nodetype = cpu_to_je16(JFFS2_NODETYPE_SUMMARY);
+ crcnode.totlen = summary->totlen;
+ crc = crc32(0, &crcnode, sizeof(crcnode)-4);
+
+ if (je32_to_cpu(summary->hdr_crc) != crc) {
+ dbg_summary("Summary node header is corrupt (bad CRC or "
+ "no summary at all)\n");
+ goto crc_err;
+ }
+
+ if (je32_to_cpu(summary->totlen) != sumsize) {
+ dbg_summary("Summary node is corrupt (wrong erasesize?)\n");
+ goto crc_err;
+ }
+
+ crc = crc32(0, summary, sizeof(struct jffs2_raw_summary)-8);
+
+ if (je32_to_cpu(summary->node_crc) != crc) {
+ dbg_summary("Summary node is corrupt (bad CRC)\n");
+ goto crc_err;
+ }
+
+ crc = crc32(0, summary->sum, sumsize - sizeof(struct jffs2_raw_summary));
+
+ if (je32_to_cpu(summary->sum_crc) != crc) {
+ dbg_summary("Summary node data is corrupt (bad CRC)\n");
+ goto crc_err;
+ }
+
+ if ( je32_to_cpu(summary->cln_mkr) ) {
+
+ dbg_summary("Summary : CLEANMARKER node \n");
+
+ if (je32_to_cpu(summary->cln_mkr) != c->cleanmarker_size) {
+ dbg_summary("CLEANMARKER node has totlen 0x%x != normal 0x%x\n",
+ je32_to_cpu(summary->cln_mkr), c->cleanmarker_size);
+ UNCHECKED_SPACE(PAD(je32_to_cpu(summary->cln_mkr)));
+ } else if (jeb->first_node) {
+ dbg_summary("CLEANMARKER node not first node in block "
+ "(0x%08x)\n", jeb->offset);
+ UNCHECKED_SPACE(PAD(je32_to_cpu(summary->cln_mkr)));
+ } else {
+ struct jffs2_raw_node_ref *marker_ref = jffs2_alloc_raw_node_ref();
+
+ if (!marker_ref) {
+ JFFS2_NOTICE("Failed to allocate node ref for clean marker\n");
+ kfree(summary);
+ return -ENOMEM;
+ }
+
+ marker_ref->next_in_ino = NULL;
+ marker_ref->next_phys = NULL;
+ marker_ref->flash_offset = jeb->offset | REF_NORMAL;
+ marker_ref->__totlen = je32_to_cpu(summary->cln_mkr);
+ jeb->first_node = jeb->last_node = marker_ref;
+
+ USED_SPACE( PAD(je32_to_cpu(summary->cln_mkr)) );
+ }
+ }
+
+ if (je32_to_cpu(summary->padded)) {
+ DIRTY_SPACE(je32_to_cpu(summary->padded));
+ }
+
+ ret = jffs2_sum_process_sum_data(c, jeb, summary, pseudo_random);
+ if (ret)
+ return ret;
+
+ /* for PARANOIA_CHECK */
+ cache_ref = jffs2_alloc_raw_node_ref();
+
+ if (!cache_ref) {
+ JFFS2_NOTICE("Failed to allocate node ref for cache\n");
+ return -ENOMEM;
+ }
+
+ cache_ref->next_in_ino = NULL;
+ cache_ref->next_phys = NULL;
+ cache_ref->flash_offset = ofs | REF_NORMAL;
+ cache_ref->__totlen = sumsize;
+
+ if (!jeb->first_node)
+ jeb->first_node = cache_ref;
+ if (jeb->last_node)
+ jeb->last_node->next_phys = cache_ref;
+ jeb->last_node = cache_ref;
+
+ USED_SPACE(sumsize);
+
+ jeb->wasted_size += jeb->free_size;
+ c->wasted_size += jeb->free_size;
+ c->free_size -= jeb->free_size;
+ jeb->free_size = 0;
+
+ return jffs2_scan_classify_jeb(c, jeb);
+
+crc_err:
+ JFFS2_WARNING("Summary node crc error, skipping summary information.\n");
+
+ return 0;
+}
+
+/* Write summary data to flash - helper function for jffs2_sum_write_sumnode() */
+
+static int jffs2_sum_write_data(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
+ uint32_t infosize, uint32_t datasize, int padsize)
+{
+ struct jffs2_raw_summary isum;
+ union jffs2_sum_mem *temp;
+ struct jffs2_sum_marker *sm;
+ struct kvec vecs[2];
+ void *wpage;
+ int ret;
+ size_t retlen;
+
+ memset(c->summary->sum_buf, 0xff, datasize);
+ memset(&isum, 0, sizeof(isum));
+
+ isum.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
+ isum.nodetype = cpu_to_je16(JFFS2_NODETYPE_SUMMARY);
+ isum.totlen = cpu_to_je32(infosize);
+ isum.hdr_crc = cpu_to_je32(crc32(0, &isum, sizeof(struct jffs2_unknown_node) - 4));
+ isum.padded = cpu_to_je32(c->summary->sum_padded);
+ isum.cln_mkr = cpu_to_je32(0);
+ isum.sum_num = cpu_to_je32(c->summary->sum_num);
+ wpage = c->summary->sum_buf;
+
+ while (c->summary->sum_num) {
+
+ switch (je16_to_cpu(c->summary->sum_list_head->u.nodetype)) {
+ case JFFS2_NODETYPE_INODE: {
+ struct jffs2_sum_inode_flash *sino_ptr = wpage;
+
+ sino_ptr->nodetype = c->summary->sum_list_head->i.nodetype;
+ sino_ptr->inode = c->summary->sum_list_head->i.inode;
+ sino_ptr->version = c->summary->sum_list_head->i.version;
+ sino_ptr->offset = c->summary->sum_list_head->i.offset;
+ sino_ptr->totlen = c->summary->sum_list_head->i.totlen;
+
+ wpage += JFFS2_SUMMARY_INODE_SIZE;
+
+ break;
+ }
+
+ case JFFS2_NODETYPE_DIRENT: {
+ struct jffs2_sum_dirent_flash *sdrnt_ptr = wpage;
+
+ sdrnt_ptr->nodetype = c->summary->sum_list_head->d.nodetype;
+ sdrnt_ptr->totlen = c->summary->sum_list_head->d.totlen;
+ sdrnt_ptr->offset = c->summary->sum_list_head->d.offset;
+ sdrnt_ptr->pino = c->summary->sum_list_head->d.pino;
+ sdrnt_ptr->version = c->summary->sum_list_head->d.version;
+ sdrnt_ptr->ino = c->summary->sum_list_head->d.ino;
+ sdrnt_ptr->nsize = c->summary->sum_list_head->d.nsize;
+ sdrnt_ptr->type = c->summary->sum_list_head->d.type;
+
+ memcpy(sdrnt_ptr->name, c->summary->sum_list_head->d.name,
+ c->summary->sum_list_head->d.nsize);
+
+ wpage += JFFS2_SUMMARY_DIRENT_SIZE(c->summary->sum_list_head->d.nsize);
+
+ break;
+ }
+
+ case JFFS2_NODETYPE_ERASEBLOCK_HEADER: {
+ struct jffs2_sum_ebh_flash *sebh_ptr = wpage;
+
+ sebh_ptr->nodetype = c->summary->sum_list_head->eh.nodetype;
+ sebh_ptr->totlen = c->summary->sum_list_head->eh.totlen;
+ sebh_ptr->offset = c->summary->sum_list_head->eh.offset;
+ sebh_ptr->reserved = c->summary->sum_list_head->eh.reserved;
+ sebh_ptr->compat_fset = c->summary->sum_list_head->eh.compat_fset;
+ sebh_ptr->incompat_fset = c->summary->sum_list_head->eh.incompat_fset;
+ sebh_ptr->rocompat_fset = c->summary->sum_list_head->eh.rocompat_fset;
+ sebh_ptr->erase_count = c->summary->sum_list_head->eh.erase_count;
+
+ memcpy(sebh_ptr->data, c->summary->sum_list_head->eh.data,
+ je32_to_cpu(c->summary->sum_list_head->eh.totlen) - sizeof(struct jffs2_raw_ebh));
+ wpage += sizeof(struct jffs2_sum_ebh_flash) + je32_to_cpu(c->summary->sum_list_head->eh.totlen)
+ - sizeof(struct jffs2_raw_ebh);
+
+ break;
+ }
+
+ default : {
+ BUG(); /* unknown node in summary information */
+ }
+ }
+
+ temp = c->summary->sum_list_head;
+ c->summary->sum_list_head = c->summary->sum_list_head->u.next;
+ kfree(temp);
+
+ c->summary->sum_num--;
+ }
+
+ jffs2_sum_reset_collected(c->summary);
+
+ wpage += padsize;
+
+ sm = wpage;
+ sm->offset = cpu_to_je32(c->sector_size - jeb->free_size);
+ sm->magic = cpu_to_je32(JFFS2_SUM_MAGIC);
+
+ isum.sum_crc = cpu_to_je32(crc32(0, c->summary->sum_buf, datasize));
+ isum.node_crc = cpu_to_je32(crc32(0, &isum, sizeof(isum) - 8));
+
+ vecs[0].iov_base = &isum;
+ vecs[0].iov_len = sizeof(isum);
+ vecs[1].iov_base = c->summary->sum_buf;
+ vecs[1].iov_len = datasize;
+
+ dbg_summary("JFFS2: writing out data to flash to pos : 0x%08x\n",
+ jeb->offset + c->sector_size - jeb->free_size);
+
+ spin_unlock(&c->erase_completion_lock);
+ ret = jffs2_flash_writev(c, vecs, 2, jeb->offset + c->sector_size -
+ jeb->free_size, &retlen, 0);
+ spin_lock(&c->erase_completion_lock);
+
+
+ if (ret || (retlen != infosize)) {
+ JFFS2_WARNING("Write of %zd bytes at 0x%08x failed. returned %d, retlen %zd\n",
+ infosize, jeb->offset + c->sector_size - jeb->free_size, ret, retlen);
+
+ c->summary->sum_size = JFFS2_SUMMARY_NOSUM_SIZE;
+ WASTED_SPACE(infosize);
+
+ return 1;
+ }
+
+ return 0;
+}
+
+/* Write out summary information - called from jffs2_do_reserve_space */
+
+int jffs2_sum_write_sumnode(struct jffs2_sb_info *c)
+{
+ struct jffs2_raw_node_ref *summary_ref;
+ int datasize, infosize, padsize, ret;
+ struct jffs2_eraseblock *jeb;
+
+ dbg_summary("called\n");
+
+ jeb = c->nextblock;
+
+ if (!c->summary->sum_num || !c->summary->sum_list_head) {
+ JFFS2_WARNING("Empty summary info!!!\n");
+ BUG();
+ }
+
+ datasize = c->summary->sum_size + sizeof(struct jffs2_sum_marker);
+ infosize = sizeof(struct jffs2_raw_summary) + datasize;
+ padsize = jeb->free_size - infosize;
+ infosize += padsize;
+ datasize += padsize;
+
+ /* Is there enough space for summary? */
+ if (padsize < 0) {
+ /* don't try to write out summary for this jeb */
+ jffs2_sum_disable_collecting(c->summary);
+
+ JFFS2_WARNING("Not enough space for summary, padsize = %d\n", padsize);
+ return 0;
+ }
+
+ ret = jffs2_sum_write_data(c, jeb, infosize, datasize, padsize);
+ if (ret)
+ return 0; /* can't write out summary, block is marked as NOSUM_SIZE */
+
+ /* for ACCT_PARANOIA_CHECK */
+ spin_unlock(&c->erase_completion_lock);
+ summary_ref = jffs2_alloc_raw_node_ref();
+ spin_lock(&c->erase_completion_lock);
+
+ if (!summary_ref) {
+ JFFS2_NOTICE("Failed to allocate node ref for summary\n");
+ return -ENOMEM;
+ }
+
+ summary_ref->next_in_ino = NULL;
+ summary_ref->next_phys = NULL;
+ summary_ref->flash_offset = (jeb->offset + c->sector_size - jeb->free_size) | REF_NORMAL;
+ summary_ref->__totlen = infosize;
+
+ if (!jeb->first_node)
+ jeb->first_node = summary_ref;
+ if (jeb->last_node)
+ jeb->last_node->next_phys = summary_ref;
+ jeb->last_node = summary_ref;
+
+ USED_SPACE(infosize);
+
+ return 0;
+}
diff --git a/linux-2.4.x/fs/jffs2/summary.h b/linux-2.4.x/fs/jffs2/summary.h
new file mode 100644
index 0000000..4b49054
--- /dev/null
+++ b/linux-2.4.x/fs/jffs2/summary.h
@@ -0,0 +1,217 @@
+/*
+ * JFFS2 -- Journalling Flash File System, Version 2.
+ *
+ * Copyright (C) 2004 Ferenc Havasi <havasi@inf.u-szeged.hu>,
+ * Zoltan Sogor <weth@inf.u-szeged.hu>,
+ * Patrik Kluba <pajko@halom.u-szeged.hu>,
+ * University of Szeged, Hungary
+ *
+ * For licensing information, see the file 'LICENCE' in this directory.
+ *
+ * $Id: summary.h,v 1.4 2006/02/15 09:42:02 havasi Exp $
+ *
+ */
+
+#ifndef JFFS2_SUMMARY_H
+#define JFFS2_SUMMARY_H
+
+#include <linux/uio.h>
+#include <linux/jffs2.h>
+
+#define DIRTY_SPACE(x) do { typeof(x) _x = (x); \
+ c->free_size -= _x; c->dirty_size += _x; \
+ jeb->free_size -= _x ; jeb->dirty_size += _x; \
+ }while(0)
+#define USED_SPACE(x) do { typeof(x) _x = (x); \
+ c->free_size -= _x; c->used_size += _x; \
+ jeb->free_size -= _x ; jeb->used_size += _x; \
+ }while(0)
+#define WASTED_SPACE(x) do { typeof(x) _x = (x); \
+ c->free_size -= _x; c->wasted_size += _x; \
+ jeb->free_size -= _x ; jeb->wasted_size += _x; \
+ }while(0)
+#define UNCHECKED_SPACE(x) do { typeof(x) _x = (x); \
+ c->free_size -= _x; c->unchecked_size += _x; \
+ jeb->free_size -= _x ; jeb->unchecked_size += _x; \
+ }while(0)
+
+#define BLK_STATE_ALLFF 0
+#define BLK_STATE_CLEAN 1
+#define BLK_STATE_PARTDIRTY 2
+#define BLK_STATE_CLEANMARKER 3
+#define BLK_STATE_ALLDIRTY 4
+#define BLK_STATE_BADBLOCK 5
+
+#define JFFS2_SUMMARY_NOSUM_SIZE 0xffffffff
+#define JFFS2_SUMMARY_INODE_SIZE (sizeof(struct jffs2_sum_inode_flash))
+#define JFFS2_SUMMARY_DIRENT_SIZE(x) (sizeof(struct jffs2_sum_dirent_flash) + (x))
+#define JFFS2_SUMMARY_EBH_SIZE(x) (sizeof(struct jffs2_sum_ebh_flash) + (x))
+
+/* Summary structures used on flash */
+
+struct jffs2_sum_unknown_flash
+{
+ jint16_t nodetype; /* node type */
+} __attribute__((packed));
+
+struct jffs2_sum_inode_flash
+{
+ jint16_t nodetype; /* node type */
+ jint32_t inode; /* inode number */
+ jint32_t version; /* inode version */
+ jint32_t offset; /* offset on jeb */
+ jint32_t totlen; /* record length */
+} __attribute__((packed));
+
+struct jffs2_sum_dirent_flash
+{
+ jint16_t nodetype; /* == JFFS_NODETYPE_DIRENT */
+ jint32_t totlen; /* record length */
+ jint32_t offset; /* offset on jeb */
+ jint32_t pino; /* parent inode */
+ jint32_t version; /* dirent version */
+ jint32_t ino; /* == zero for unlink */
+ uint8_t nsize; /* dirent name size */
+ uint8_t type; /* dirent type */
+ uint8_t name[0]; /* dirent name */
+} __attribute__((packed));
+
+struct jffs2_sum_ebh_flash
+{
+ jint16_t nodetype;
+ jint32_t totlen;
+ jint32_t offset;
+ uint8_t reserved;
+ uint8_t compat_fset;
+ uint8_t incompat_fset;
+ uint8_t rocompat_fset;
+ jint32_t erase_count;
+ jint16_t dsize;
+ jint32_t data[0];
+} __attribute__((packed));
+
+union jffs2_sum_flash
+{
+ struct jffs2_sum_unknown_flash u;
+ struct jffs2_sum_inode_flash i;
+ struct jffs2_sum_dirent_flash d;
+ struct jffs2_sum_ebh_flash eh;
+};
+
+/* Summary structures used in the memory */
+
+struct jffs2_sum_unknown_mem
+{
+ union jffs2_sum_mem *next;
+ jint16_t nodetype; /* node type */
+} __attribute__((packed));
+
+struct jffs2_sum_inode_mem
+{
+ union jffs2_sum_mem *next;
+ jint16_t nodetype; /* node type */
+ jint32_t inode; /* inode number */
+ jint32_t version; /* inode version */
+ jint32_t offset; /* offset on jeb */
+ jint32_t totlen; /* record length */
+} __attribute__((packed));
+
+struct jffs2_sum_dirent_mem
+{
+ union jffs2_sum_mem *next;
+ jint16_t nodetype; /* == JFFS_NODETYPE_DIRENT */
+ jint32_t totlen; /* record length */
+ jint32_t offset; /* ofset on jeb */
+ jint32_t pino; /* parent inode */
+ jint32_t version; /* dirent version */
+ jint32_t ino; /* == zero for unlink */
+ uint8_t nsize; /* dirent name size */
+ uint8_t type; /* dirent type */
+ uint8_t name[0]; /* dirent name */
+} __attribute__((packed));
+
+struct jffs2_sum_ebh_mem
+{
+ union jffs2_sum_mem *next;
+ jint16_t nodetype;
+ jint32_t totlen;
+ jint32_t offset;
+ uint8_t reserved;
+ uint8_t compat_fset;
+ uint8_t incompat_fset;
+ uint8_t rocompat_fset;
+ jint32_t erase_count;
+ jint16_t dsize;
+ jint32_t data[0];
+} __attribute__((packed));
+
+union jffs2_sum_mem
+{
+ struct jffs2_sum_unknown_mem u;
+ struct jffs2_sum_inode_mem i;
+ struct jffs2_sum_dirent_mem d;
+ struct jffs2_sum_ebh_mem eh;
+};
+
+/* Summary related information stored in superblock */
+
+struct jffs2_summary
+{
+ uint32_t sum_size; /* collected summary information for nextblock */
+ uint32_t sum_num;
+ uint32_t sum_padded;
+ union jffs2_sum_mem *sum_list_head;
+ union jffs2_sum_mem *sum_list_tail;
+
+ jint32_t *sum_buf; /* buffer for writing out summary */
+};
+
+/* Summary marker is stored at the end of every sumarized erase block */
+
+struct jffs2_sum_marker
+{
+ jint32_t offset; /* offset of the summary node in the jeb */
+ jint32_t magic; /* == JFFS2_SUM_MAGIC */
+};
+
+#define JFFS2_SUMMARY_FRAME_SIZE (sizeof(struct jffs2_raw_summary) + sizeof(struct jffs2_sum_marker))
+
+#ifdef CONFIG_JFFS2_SUMMARY /* SUMMARY SUPPORT ENABLED */
+
+#define jffs2_sum_active() (1)
+int jffs2_sum_init(struct jffs2_sb_info *c);
+void jffs2_sum_exit(struct jffs2_sb_info *c);
+void jffs2_sum_disable_collecting(struct jffs2_summary *s);
+int jffs2_sum_is_disabled(struct jffs2_summary *s);
+void jffs2_sum_reset_collected(struct jffs2_summary *s);
+void jffs2_sum_move_collected(struct jffs2_sb_info *c, struct jffs2_summary *s);
+int jffs2_sum_add_kvec(struct jffs2_sb_info *c, const struct kvec *invecs,
+ unsigned long count, uint32_t to);
+int jffs2_sum_write_sumnode(struct jffs2_sb_info *c);
+int jffs2_sum_add_padding_mem(struct jffs2_summary *s, uint32_t size);
+int jffs2_sum_add_inode_mem(struct jffs2_summary *s, struct jffs2_raw_inode *ri, uint32_t ofs);
+int jffs2_sum_add_dirent_mem(struct jffs2_summary *s, struct jffs2_raw_dirent *rd, uint32_t ofs);
+int jffs2_sum_add_ebh_mem(struct jffs2_summary *s, struct jffs2_raw_ebh *eh, uint32_t ofs);
+int jffs2_sum_scan_sumnode(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
+ uint32_t ofs, uint32_t *pseudo_random);
+
+#else /* SUMMARY DISABLED */
+
+#define jffs2_sum_active() (0)
+#define jffs2_sum_init(a) (0)
+#define jffs2_sum_exit(a)
+#define jffs2_sum_disable_collecting(a)
+#define jffs2_sum_is_disabled(a) (0)
+#define jffs2_sum_reset_collected(a)
+#define jffs2_sum_add_kvec(a,b,c,d) (0)
+#define jffs2_sum_move_collected(a,b)
+#define jffs2_sum_write_sumnode(a) (0)
+#define jffs2_sum_add_padding_mem(a,b)
+#define jffs2_sum_add_inode_mem(a,b,c)
+#define jffs2_sum_add_dirent_mem(a,b,c)
+#define jffs2_sum_add_ebh_mem(a,b,c)
+#define jffs2_sum_scan_sumnode(a,b,c,d) (0)
+
+#endif /* CONFIG_JFFS2_SUMMARY */
+
+#endif /* JFFS2_SUMMARY_H */
diff --git a/linux-2.4.x/fs/jffs2/super-v24.c b/linux-2.4.x/fs/jffs2/super-v24.c
new file mode 100644
index 0000000..ba692d0
--- /dev/null
+++ b/linux-2.4.x/fs/jffs2/super-v24.c
@@ -0,0 +1,183 @@
+/*
+ * JFFS2 -- Journalling Flash File System, Version 2.
+ *
+ * Copyright (C) 2001-2003 Red Hat, Inc.
+ *
+ * Created by David Woodhouse <dwmw2@infradead.org>
+ *
+ * For licensing information, see the file 'LICENCE' in this directory.
+ *
+ * $Id: super-v24.c,v 1.88 2005/11/11 08:51:39 forrest Exp $
+ *
+ */
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/version.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/fs.h>
+#include <linux/jffs2.h>
+#include <linux/pagemap.h>
+#include <linux/mtd/mtd.h>
+#include "compr.h"
+#include "nodelist.h"
+#include "summary.h"
+
+#ifndef MTD_BLOCK_MAJOR
+#define MTD_BLOCK_MAJOR 31
+#endif
+
+static void jffs2_put_super (struct super_block *);
+
+static struct super_operations jffs2_super_operations =
+{
+ .read_inode = jffs2_read_inode,
+ .put_super = jffs2_put_super,
+ .write_super = jffs2_write_super,
+ .statfs = jffs2_statfs,
+ .remount_fs = jffs2_remount_fs,
+ .clear_inode = jffs2_clear_inode,
+ .dirty_inode = jffs2_dirty_inode,
+};
+
+
+static struct super_block *jffs2_read_super(struct super_block *sb, void *data, int silent)
+{
+ struct jffs2_sb_info *c;
+ int ret;
+
+ D1(printk(KERN_DEBUG "jffs2: read_super for device %s\n", kdevname(sb->s_dev)));
+
+ if (major(sb->s_dev) != MTD_BLOCK_MAJOR) {
+ if (!silent)
+ printk(KERN_DEBUG "jffs2: attempt to mount non-MTD device %s\n", kdevname(sb->s_dev));
+ return NULL;
+ }
+
+ c = JFFS2_SB_INFO(sb);
+ memset(c, 0, sizeof(*c));
+
+ /* Initialize JFFS2 superblock locks now, the further superblock
+ * initialization will be done later */
+ init_MUTEX(&c->alloc_sem);
+ init_MUTEX(&c->erase_free_sem);
+ init_waitqueue_head(&c->erase_wait);
+ init_waitqueue_head(&c->inocache_wq);
+ spin_lock_init(&c->erase_completion_lock);
+ spin_lock_init(&c->inocache_lock);
+
+ sb->s_op = &jffs2_super_operations;
+
+ c->mtd = get_mtd_device(NULL, minor(sb->s_dev));
+ if (!c->mtd) {
+ D1(printk(KERN_DEBUG "jffs2: MTD device #%u doesn't appear to exist\n", minor(sb->s_dev)));
+ return NULL;
+ }
+
+ ret = jffs2_do_fill_super(sb, data, silent);
+ if (ret) {
+ put_mtd_device(c->mtd);
+ return NULL;
+ }
+
+ return sb;
+}
+
+static void jffs2_put_super (struct super_block *sb)
+{
+ struct jffs2_sb_info *c = JFFS2_SB_INFO(sb);
+
+ D2(printk(KERN_DEBUG "jffs2: jffs2_put_super()\n"));
+
+
+ if (!(sb->s_flags & MS_RDONLY))
+ jffs2_stop_garbage_collect_thread(c);
+ down(&c->alloc_sem);
+ jffs2_flush_wbuf_pad(c);
+ up(&c->alloc_sem);
+
+ jffs2_sum_exit(c);
+
+ jffs2_free_ino_caches(c);
+ jffs2_free_raw_node_refs(c);
+ jffs2_free_eraseblocks(c);
+ jffs2_flash_cleanup(c);
+ kfree(c->inocache_list);
+ if (c->mtd->sync)
+ c->mtd->sync(c->mtd);
+ put_mtd_device(c->mtd);
+
+ D1(printk(KERN_DEBUG "jffs2_put_super returning\n"));
+}
+
+static DECLARE_FSTYPE_DEV(jffs2_fs_type, "jffs2", jffs2_read_super);
+
+static int __init init_jffs2_fs(void)
+{
+ int ret;
+
+ printk(KERN_INFO "JFFS2 version 2.2."
+#ifdef CONFIG_JFFS2_FS_WRITEBUFFER
+ " (NAND)"
+#endif
+#ifdef CONFIG_JFFS2_SUMMARY
+ " (SUMMARY)"
+#endif
+ " (C) 2001-2003 Red Hat, Inc.\n");
+
+#ifdef JFFS2_OUT_OF_KERNEL
+ /* sanity checks. Could we do these at compile time? */
+ if (sizeof(struct jffs2_sb_info) > sizeof (((struct super_block *)NULL)->u)) {
+ printk(KERN_ERR "JFFS2 error: struct jffs2_sb_info (%d bytes) doesn't fit in the super_block union (%d bytes)\n",
+ sizeof(struct jffs2_sb_info), sizeof (((struct super_block *)NULL)->u));
+ return -EIO;
+ }
+
+ if (sizeof(struct jffs2_inode_info) > sizeof (((struct inode *)NULL)->u)) {
+ printk(KERN_ERR "JFFS2 error: struct jffs2_inode_info (%d bytes) doesn't fit in the inode union (%d bytes)\n",
+ sizeof(struct jffs2_inode_info), sizeof (((struct inode *)NULL)->u));
+ return -EIO;
+ }
+#endif
+ ret = jffs2_compressors_init();
+ if (ret) {
+ printk(KERN_ERR "JFFS2 error: Failed to initialise compressors\n");
+ goto out;
+ }
+ ret = jffs2_create_slab_caches();
+ if (ret) {
+ printk(KERN_ERR "JFFS2 error: Failed to initialise slab caches\n");
+ goto out_compressors;
+ }
+ ret = register_filesystem(&jffs2_fs_type);
+ if (ret) {
+ printk(KERN_ERR "JFFS2 error: Failed to register filesystem\n");
+ goto out_slab;
+ }
+ return 0;
+
+ out_slab:
+ jffs2_destroy_slab_caches();
+ out_compressors:
+ jffs2_compressors_exit();
+ out:
+ return ret;
+}
+
+static void __exit exit_jffs2_fs(void)
+{
+ jffs2_destroy_slab_caches();
+ jffs2_compressors_exit();
+ unregister_filesystem(&jffs2_fs_type);
+}
+
+module_init(init_jffs2_fs);
+module_exit(exit_jffs2_fs);
+
+MODULE_DESCRIPTION("The Journalling Flash File System, v2");
+MODULE_AUTHOR("Red Hat, Inc.");
+MODULE_LICENSE("GPL"); // Actually dual-licensed, but it doesn't matter for
+ // the sake of this tag. It's Free Software.
diff --git a/linux-2.4.x/fs/jffs2/super.c b/linux-2.4.x/fs/jffs2/super.c
index 5f01337..5efe0a8 100644
--- a/linux-2.4.x/fs/jffs2/super.c
+++ b/linux-2.4.x/fs/jffs2/super.c
@@ -1,375 +1,348 @@
/*
* JFFS2 -- Journalling Flash File System, Version 2.
*
- * Copyright (C) 2001 Red Hat, Inc.
+ * Copyright (C) 2001-2003 Red Hat, Inc.
*
- * Created by David Woodhouse <dwmw2@cambridge.redhat.com>
+ * Created by David Woodhouse <dwmw2@infradead.org>
*
- * The original JFFS, from which the design for JFFS2 was derived,
- * was designed and implemented by Axis Communications AB.
+ * For licensing information, see the file 'LICENCE' in this directory.
*
- * The contents of this file are subject to the Red Hat eCos Public
- * License Version 1.1 (the "Licence"); you may not use this file
- * except in compliance with the Licence. You may obtain a copy of
- * the Licence at http://www.redhat.com/
- *
- * Software distributed under the Licence is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied.
- * See the Licence for the specific language governing rights and
- * limitations under the Licence.
- *
- * The Original Code is JFFS2 - Journalling Flash File System, version 2
- *
- * Alternatively, the contents of this file may be used under the
- * terms of the GNU General Public License version 2 (the "GPL"), in
- * which case the provisions of the GPL are applicable instead of the
- * above. If you wish to allow the use of your version of this file
- * only under the terms of the GPL and not to allow others to use your
- * version of this file under the RHEPL, indicate your decision by
- * deleting the provisions above and replace them with the notice and
- * other provisions required by the GPL. If you do not delete the
- * provisions above, a recipient may use your version of this file
- * under either the RHEPL or the GPL.
- *
- * $Id: super.c,v 1.48.2.3 2002/10/11 09:04:44 dwmw2 Exp $
+ * $Id: super.c,v 1.112 2005/11/24 16:13:25 dedekind Exp $
*
*/
#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/version.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/list.h>
#include <linux/fs.h>
+#include <linux/mount.h>
#include <linux/jffs2.h>
#include <linux/pagemap.h>
#include <linux/mtd/mtd.h>
-#include <linux/interrupt.h>
+#include <linux/ctype.h>
+#include <linux/namei.h>
+#include "compr.h"
#include "nodelist.h"
-#ifndef MTD_BLOCK_MAJOR
-#define MTD_BLOCK_MAJOR 31
-#endif
+static void jffs2_put_super(struct super_block *);
-extern void jffs2_read_inode (struct inode *);
-void jffs2_put_super (struct super_block *);
-void jffs2_write_super (struct super_block *);
-static int jffs2_statfs (struct super_block *, struct statfs *);
-int jffs2_remount_fs (struct super_block *, int *, char *);
-extern void jffs2_clear_inode (struct inode *);
+static kmem_cache_t *jffs2_inode_cachep;
-static struct super_operations jffs2_super_operations =
+static struct inode *jffs2_alloc_inode(struct super_block *sb)
{
- read_inode: jffs2_read_inode,
-// delete_inode: jffs2_delete_inode,
- put_super: jffs2_put_super,
- write_super: jffs2_write_super,
- statfs: jffs2_statfs,
- remount_fs: jffs2_remount_fs,
- clear_inode: jffs2_clear_inode
-};
+ struct jffs2_inode_info *ei;
+ ei = (struct jffs2_inode_info *)kmem_cache_alloc(jffs2_inode_cachep, SLAB_KERNEL);
+ if (!ei)
+ return NULL;
+ return &ei->vfs_inode;
+}
-static int jffs2_statfs(struct super_block *sb, struct statfs *buf)
+static void jffs2_destroy_inode(struct inode *inode)
{
- struct jffs2_sb_info *c = JFFS2_SB_INFO(sb);
- unsigned long avail;
-
- buf->f_type = JFFS2_SUPER_MAGIC;
- buf->f_bsize = 1 << PAGE_SHIFT;
- buf->f_blocks = c->flash_size >> PAGE_SHIFT;
- buf->f_files = 0;
- buf->f_ffree = 0;
- buf->f_namelen = JFFS2_MAX_NAME_LEN;
-
- spin_lock_bh(&c->erase_completion_lock);
-
- avail = c->dirty_size + c->free_size;
- if (avail > c->sector_size * JFFS2_RESERVED_BLOCKS_WRITE)
- avail -= c->sector_size * JFFS2_RESERVED_BLOCKS_WRITE;
- else
- avail = 0;
-
- buf->f_bavail = buf->f_bfree = avail >> PAGE_SHIFT;
-
-#if CONFIG_JFFS2_FS_DEBUG > 0
- printk(KERN_DEBUG "STATFS:\n");
- printk(KERN_DEBUG "flash_size: %08x\n", c->flash_size);
- printk(KERN_DEBUG "used_size: %08x\n", c->used_size);
- printk(KERN_DEBUG "dirty_size: %08x\n", c->dirty_size);
- printk(KERN_DEBUG "free_size: %08x\n", c->free_size);
- printk(KERN_DEBUG "erasing_size: %08x\n", c->erasing_size);
- printk(KERN_DEBUG "bad_size: %08x\n", c->bad_size);
- printk(KERN_DEBUG "sector_size: %08x\n", c->sector_size);
-
- if (c->nextblock) {
- printk(KERN_DEBUG "nextblock: 0x%08x\n", c->nextblock->offset);
- } else {
- printk(KERN_DEBUG "nextblock: NULL\n");
- }
- if (c->gcblock) {
- printk(KERN_DEBUG "gcblock: 0x%08x\n", c->gcblock->offset);
- } else {
- printk(KERN_DEBUG "gcblock: NULL\n");
- }
- if (list_empty(&c->clean_list)) {
- printk(KERN_DEBUG "clean_list: empty\n");
- } else {
- struct list_head *this;
+ kmem_cache_free(jffs2_inode_cachep, JFFS2_INODE_INFO(inode));
+}
- list_for_each(this, &c->clean_list) {
- struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
- printk(KERN_DEBUG "clean_list: %08x\n", jeb->offset);
- }
- }
- if (list_empty(&c->dirty_list)) {
- printk(KERN_DEBUG "dirty_list: empty\n");
- } else {
- struct list_head *this;
+static void jffs2_i_init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
+{
+ struct jffs2_inode_info *ei = (struct jffs2_inode_info *) foo;
- list_for_each(this, &c->dirty_list) {
- struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
- printk(KERN_DEBUG "dirty_list: %08x\n", jeb->offset);
- }
+ if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
+ SLAB_CTOR_CONSTRUCTOR) {
+ init_MUTEX(&ei->sem);
+ inode_init_once(&ei->vfs_inode);
}
- if (list_empty(&c->erasing_list)) {
- printk(KERN_DEBUG "erasing_list: empty\n");
- } else {
- struct list_head *this;
+}
- list_for_each(this, &c->erasing_list) {
- struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
- printk(KERN_DEBUG "erasing_list: %08x\n", jeb->offset);
- }
- }
- if (list_empty(&c->erase_pending_list)) {
- printk(KERN_DEBUG "erase_pending_list: empty\n");
- } else {
- struct list_head *this;
+static int jffs2_sync_fs(struct super_block *sb, int wait)
+{
+ struct jffs2_sb_info *c = JFFS2_SB_INFO(sb);
- list_for_each(this, &c->erase_pending_list) {
- struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
- printk(KERN_DEBUG "erase_pending_list: %08x\n", jeb->offset);
- }
- }
- if (list_empty(&c->free_list)) {
- printk(KERN_DEBUG "free_list: empty\n");
- } else {
- struct list_head *this;
+ down(&c->alloc_sem);
+ jffs2_flush_wbuf_pad(c);
+ up(&c->alloc_sem);
+ return 0;
+}
- list_for_each(this, &c->free_list) {
- struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
- printk(KERN_DEBUG "free_list: %08x\n", jeb->offset);
- }
- }
- if (list_empty(&c->bad_list)) {
- printk(KERN_DEBUG "bad_list: empty\n");
- } else {
- struct list_head *this;
+static struct super_operations jffs2_super_operations =
+{
+ .alloc_inode = jffs2_alloc_inode,
+ .destroy_inode =jffs2_destroy_inode,
+ .read_inode = jffs2_read_inode,
+ .put_super = jffs2_put_super,
+ .write_super = jffs2_write_super,
+ .statfs = jffs2_statfs,
+ .remount_fs = jffs2_remount_fs,
+ .clear_inode = jffs2_clear_inode,
+ .dirty_inode = jffs2_dirty_inode,
+ .sync_fs = jffs2_sync_fs,
+};
- list_for_each(this, &c->bad_list) {
- struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
- printk(KERN_DEBUG "bad_list: %08x\n", jeb->offset);
- }
- }
- if (list_empty(&c->bad_used_list)) {
- printk(KERN_DEBUG "bad_used_list: empty\n");
- } else {
- struct list_head *this;
+static int jffs2_sb_compare(struct super_block *sb, void *data)
+{
+ struct jffs2_sb_info *p = data;
+ struct jffs2_sb_info *c = JFFS2_SB_INFO(sb);
- list_for_each(this, &c->bad_used_list) {
- struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
- printk(KERN_DEBUG "bad_used_list: %08x\n", jeb->offset);
- }
+ /* The superblocks are considered to be equivalent if the underlying MTD
+ device is the same one */
+ if (c->mtd == p->mtd) {
+ D1(printk(KERN_DEBUG "jffs2_sb_compare: match on device %d (\"%s\")\n", p->mtd->index, p->mtd->name));
+ return 1;
+ } else {
+ D1(printk(KERN_DEBUG "jffs2_sb_compare: No match, device %d (\"%s\"), device %d (\"%s\")\n",
+ c->mtd->index, c->mtd->name, p->mtd->index, p->mtd->name));
+ return 0;
}
-#endif /* CONFIG_JFFS2_FS_DEBUG */
+}
- spin_unlock_bh(&c->erase_completion_lock);
+static int jffs2_sb_set(struct super_block *sb, void *data)
+{
+ struct jffs2_sb_info *p = data;
+ /* For persistence of NFS exports etc. we use the same s_dev
+ each time we mount the device, don't just use an anonymous
+ device */
+ sb->s_fs_info = p;
+ p->os_priv = sb;
+ sb->s_dev = MKDEV(MTD_BLOCK_MAJOR, p->mtd->index);
return 0;
}
-static struct super_block *jffs2_read_super(struct super_block *sb, void *data, int silent)
+static struct super_block *jffs2_get_sb_mtd(struct file_system_type *fs_type,
+ int flags, const char *dev_name,
+ void *data, struct mtd_info *mtd)
{
+ struct super_block *sb;
struct jffs2_sb_info *c;
- struct inode *root_i;
- int i;
+ int ret;
- D1(printk(KERN_DEBUG "jffs2: read_super for device %s\n", kdevname(sb->s_dev)));
+ c = kmalloc(sizeof(*c), GFP_KERNEL);
+ if (!c)
+ return ERR_PTR(-ENOMEM);
+ memset(c, 0, sizeof(*c));
+ c->mtd = mtd;
- if (MAJOR(sb->s_dev) != MTD_BLOCK_MAJOR) {
- if (!silent)
- printk(KERN_DEBUG "jffs2: attempt to mount non-MTD device %s\n", kdevname(sb->s_dev));
- return NULL;
- }
+ sb = sget(fs_type, jffs2_sb_compare, jffs2_sb_set, c);
- c = JFFS2_SB_INFO(sb);
- memset(c, 0, sizeof(*c));
-
- c->mtd = get_mtd_device(NULL, MINOR(sb->s_dev));
- if (!c->mtd) {
- D1(printk(KERN_DEBUG "jffs2: MTD device #%u doesn't appear to exist\n", MINOR(sb->s_dev)));
- return NULL;
- }
- c->sector_size = c->mtd->erasesize;
- c->free_size = c->flash_size = c->mtd->size;
- c->nr_blocks = c->mtd->size / c->mtd->erasesize;
- c->blocks = kmalloc(sizeof(struct jffs2_eraseblock) * c->nr_blocks, GFP_KERNEL);
- if (!c->blocks)
- goto out_mtd;
- for (i=0; i<c->nr_blocks; i++) {
- INIT_LIST_HEAD(&c->blocks[i].list);
- c->blocks[i].offset = i * c->sector_size;
- c->blocks[i].free_size = c->sector_size;
- c->blocks[i].dirty_size = 0;
- c->blocks[i].used_size = 0;
- c->blocks[i].first_node = NULL;
- c->blocks[i].last_node = NULL;
+ if (IS_ERR(sb))
+ goto out_put;
+
+ if (sb->s_root) {
+ /* New mountpoint for JFFS2 which is already mounted */
+ D1(printk(KERN_DEBUG "jffs2_get_sb_mtd(): Device %d (\"%s\") is already mounted\n",
+ mtd->index, mtd->name));
+ goto out_put;
}
-
- spin_lock_init(&c->nodelist_lock);
+
+ D1(printk(KERN_DEBUG "jffs2_get_sb_mtd(): New superblock for device %d (\"%s\")\n",
+ mtd->index, mtd->name));
+
+ /* Initialize JFFS2 superblock locks, the further initialization will be
+ * done later */
init_MUTEX(&c->alloc_sem);
+ init_MUTEX(&c->erase_free_sem);
init_waitqueue_head(&c->erase_wait);
+ init_waitqueue_head(&c->inocache_wq);
spin_lock_init(&c->erase_completion_lock);
spin_lock_init(&c->inocache_lock);
- INIT_LIST_HEAD(&c->clean_list);
- INIT_LIST_HEAD(&c->dirty_list);
- INIT_LIST_HEAD(&c->erasing_list);
- INIT_LIST_HEAD(&c->erase_pending_list);
- INIT_LIST_HEAD(&c->erase_complete_list);
- INIT_LIST_HEAD(&c->free_list);
- INIT_LIST_HEAD(&c->bad_list);
- INIT_LIST_HEAD(&c->bad_used_list);
- c->highest_ino = 1;
-
- if (jffs2_build_filesystem(c)) {
- D1(printk(KERN_DEBUG "build_fs failed\n"));
- goto out_nodes;
- }
-
sb->s_op = &jffs2_super_operations;
+ sb->s_flags = flags | MS_NOATIME;
- D1(printk(KERN_DEBUG "jffs2_read_super(): Getting root inode\n"));
- root_i = iget(sb, 1);
- if (is_bad_inode(root_i)) {
- D1(printk(KERN_WARNING "get root inode failed\n"));
- goto out_nodes;
- }
+ ret = jffs2_do_fill_super(sb, data, (flags&MS_VERBOSE)?1:0);
- D1(printk(KERN_DEBUG "jffs2_read_super(): d_alloc_root()\n"));
- sb->s_root = d_alloc_root(root_i);
- if (!sb->s_root)
- goto out_root_i;
+ if (ret) {
+ /* Failure case... */
+ up_write(&sb->s_umount);
+ deactivate_super(sb);
+ return ERR_PTR(ret);
+ }
-#if LINUX_VERSION_CODE >= 0x20403
- sb->s_maxbytes = 0xFFFFFFFF;
-#endif
- sb->s_blocksize = PAGE_CACHE_SIZE;
- sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
- sb->s_magic = JFFS2_SUPER_MAGIC;
- if (!(sb->s_flags & MS_RDONLY))
- jffs2_start_garbage_collect_thread(c);
+ sb->s_flags |= MS_ACTIVE;
return sb;
- out_root_i:
- iput(root_i);
- out_nodes:
- jffs2_free_ino_caches(c);
- jffs2_free_raw_node_refs(c);
- kfree(c->blocks);
- out_mtd:
- put_mtd_device(c->mtd);
- return NULL;
+ out_put:
+ kfree(c);
+ put_mtd_device(mtd);
+
+ return sb;
}
-void jffs2_put_super (struct super_block *sb)
+static struct super_block *jffs2_get_sb_mtdnr(struct file_system_type *fs_type,
+ int flags, const char *dev_name,
+ void *data, int mtdnr)
{
- struct jffs2_sb_info *c = JFFS2_SB_INFO(sb);
+ struct mtd_info *mtd;
- D2(printk(KERN_DEBUG "jffs2: jffs2_put_super()\n"));
+ mtd = get_mtd_device(NULL, mtdnr);
+ if (!mtd) {
+ D1(printk(KERN_DEBUG "jffs2: MTD device #%u doesn't appear to exist\n", mtdnr));
+ return ERR_PTR(-EINVAL);
+ }
- if (!(sb->s_flags & MS_RDONLY))
- jffs2_stop_garbage_collect_thread(c);
- jffs2_free_ino_caches(c);
- jffs2_free_raw_node_refs(c);
- kfree(c->blocks);
- if (c->mtd->sync)
- c->mtd->sync(c->mtd);
- put_mtd_device(c->mtd);
-
- D1(printk(KERN_DEBUG "jffs2_put_super returning\n"));
+ return jffs2_get_sb_mtd(fs_type, flags, dev_name, data, mtd);
}
-int jffs2_remount_fs (struct super_block *sb, int *flags, char *data)
+static struct super_block *jffs2_get_sb(struct file_system_type *fs_type,
+ int flags, const char *dev_name,
+ void *data)
{
- struct jffs2_sb_info *c = JFFS2_SB_INFO(sb);
+ int err;
+ struct nameidata nd;
+ int mtdnr;
+
+ if (!dev_name)
+ return ERR_PTR(-EINVAL);
+
+ D1(printk(KERN_DEBUG "jffs2_get_sb(): dev_name \"%s\"\n", dev_name));
+
+ /* The preferred way of mounting in future; especially when
+ CONFIG_BLK_DEV is implemented - we specify the underlying
+ MTD device by number or by name, so that we don't require
+ block device support to be present in the kernel. */
+
+ /* FIXME: How to do the root fs this way? */
+
+ if (dev_name[0] == 'm' && dev_name[1] == 't' && dev_name[2] == 'd') {
+ /* Probably mounting without the blkdev crap */
+ if (dev_name[3] == ':') {
+ struct mtd_info *mtd;
+
+ /* Mount by MTD device name */
+ D1(printk(KERN_DEBUG "jffs2_get_sb(): mtd:%%s, name \"%s\"\n", dev_name+4));
+ for (mtdnr = 0; mtdnr < MAX_MTD_DEVICES; mtdnr++) {
+ mtd = get_mtd_device(NULL, mtdnr);
+ if (mtd) {
+ if (!strcmp(mtd->name, dev_name+4))
+ return jffs2_get_sb_mtd(fs_type, flags, dev_name, data, mtd);
+ put_mtd_device(mtd);
+ }
+ }
+ printk(KERN_NOTICE "jffs2_get_sb(): MTD device with name \"%s\" not found.\n", dev_name+4);
+ } else if (isdigit(dev_name[3])) {
+ /* Mount by MTD device number name */
+ char *endptr;
+
+ mtdnr = simple_strtoul(dev_name+3, &endptr, 0);
+ if (!*endptr) {
+ /* It was a valid number */
+ D1(printk(KERN_DEBUG "jffs2_get_sb(): mtd%%d, mtdnr %d\n", mtdnr));
+ return jffs2_get_sb_mtdnr(fs_type, flags, dev_name, data, mtdnr);
+ }
+ }
+ }
- if (c->flags & JFFS2_SB_FLAG_RO && !(sb->s_flags & MS_RDONLY))
- return -EROFS;
+ /* Try the old way - the hack where we allowed users to mount
+ /dev/mtdblock$(n) but didn't actually _use_ the blkdev */
- /* We stop if it was running, then restart if it needs to.
- This also catches the case where it was stopped and this
- is just a remount to restart it */
- if (!(sb->s_flags & MS_RDONLY))
- jffs2_stop_garbage_collect_thread(c);
+ err = path_lookup(dev_name, LOOKUP_FOLLOW, &nd);
- if (!(*flags & MS_RDONLY))
- jffs2_start_garbage_collect_thread(c);
-
- sb->s_flags = (sb->s_flags & ~MS_RDONLY)|(*flags & MS_RDONLY);
+ D1(printk(KERN_DEBUG "jffs2_get_sb(): path_lookup() returned %d, inode %p\n",
+ err, nd.dentry->d_inode));
- return 0;
+ if (err)
+ return ERR_PTR(err);
+
+ err = -EINVAL;
+
+ if (!S_ISBLK(nd.dentry->d_inode->i_mode))
+ goto out;
+
+ if (nd.mnt->mnt_flags & MNT_NODEV) {
+ err = -EACCES;
+ goto out;
+ }
+
+ if (imajor(nd.dentry->d_inode) != MTD_BLOCK_MAJOR) {
+ if (!(flags & MS_VERBOSE)) /* Yes I mean this. Strangely */
+ printk(KERN_NOTICE "Attempt to mount non-MTD device \"%s\" as JFFS2\n",
+ dev_name);
+ goto out;
+ }
+
+ mtdnr = iminor(nd.dentry->d_inode);
+ path_release(&nd);
+
+ return jffs2_get_sb_mtdnr(fs_type, flags, dev_name, data, mtdnr);
+
+out:
+ path_release(&nd);
+ return ERR_PTR(err);
}
-void jffs2_write_super (struct super_block *sb)
+static void jffs2_put_super (struct super_block *sb)
{
struct jffs2_sb_info *c = JFFS2_SB_INFO(sb);
- sb->s_dirt = 0;
- if (sb->s_flags & MS_RDONLY)
- return;
+ D2(printk(KERN_DEBUG "jffs2: jffs2_put_super()\n"));
+
+ down(&c->alloc_sem);
+ jffs2_flush_wbuf_pad(c);
+ up(&c->alloc_sem);
- jffs2_garbage_collect_trigger(c);
- jffs2_erase_pending_blocks(c);
- jffs2_mark_erased_blocks(c);
+ jffs2_sum_exit(c);
+
+ jffs2_free_ino_caches(c);
+ jffs2_free_raw_node_refs(c);
+ jffs2_free_eraseblocks(c);
+ jffs2_flash_cleanup(c);
+ kfree(c->inocache_list);
+ if (c->mtd->sync)
+ c->mtd->sync(c->mtd);
+
+ D1(printk(KERN_DEBUG "jffs2_put_super returning\n"));
}
+static void jffs2_kill_sb(struct super_block *sb)
+{
+ struct jffs2_sb_info *c = JFFS2_SB_INFO(sb);
+ if (!(sb->s_flags & MS_RDONLY))
+ jffs2_stop_garbage_collect_thread(c);
+ generic_shutdown_super(sb);
+ put_mtd_device(c->mtd);
+ kfree(c);
+}
-static DECLARE_FSTYPE_DEV(jffs2_fs_type, "jffs2", jffs2_read_super);
+static struct file_system_type jffs2_fs_type = {
+ .owner = THIS_MODULE,
+ .name = "jffs2",
+ .get_sb = jffs2_get_sb,
+ .kill_sb = jffs2_kill_sb,
+};
static int __init init_jffs2_fs(void)
{
int ret;
- printk(KERN_NOTICE "JFFS2 version 2.1. (C) 2001 Red Hat, Inc., designed by Axis Communications AB.\n");
-
-#ifdef JFFS2_OUT_OF_KERNEL
- /* sanity checks. Could we do these at compile time? */
- if (sizeof(struct jffs2_sb_info) > sizeof (((struct super_block *)NULL)->u)) {
- printk(KERN_ERR "JFFS2 error: struct jffs2_sb_info (%d bytes) doesn't fit in the super_block union (%d bytes)\n",
- sizeof(struct jffs2_sb_info), sizeof (((struct super_block *)NULL)->u));
- return -EIO;
- }
-
- if (sizeof(struct jffs2_inode_info) > sizeof (((struct inode *)NULL)->u)) {
- printk(KERN_ERR "JFFS2 error: struct jffs2_inode_info (%d bytes) doesn't fit in the inode union (%d bytes)\n",
- sizeof(struct jffs2_inode_info), sizeof (((struct inode *)NULL)->u));
- return -EIO;
- }
+ printk(KERN_INFO "JFFS2 version 2.2."
+#ifdef CONFIG_JFFS2_FS_WRITEBUFFER
+ " (NAND)"
#endif
-
- ret = jffs2_zlib_init();
+#ifdef CONFIG_JFFS2_SUMMARY
+ " (SUMMARY) "
+#endif
+ " (C) 2001-2003 Red Hat, Inc.\n");
+
+ jffs2_inode_cachep = kmem_cache_create("jffs2_i",
+ sizeof(struct jffs2_inode_info),
+ 0, SLAB_RECLAIM_ACCOUNT,
+ jffs2_i_init_once, NULL);
+ if (!jffs2_inode_cachep) {
+ printk(KERN_ERR "JFFS2 error: Failed to initialise inode cache\n");
+ return -ENOMEM;
+ }
+ ret = jffs2_compressors_init();
if (ret) {
- printk(KERN_ERR "JFFS2 error: Failed to initialise zlib workspaces\n");
+ printk(KERN_ERR "JFFS2 error: Failed to initialise compressors\n");
goto out;
}
ret = jffs2_create_slab_caches();
if (ret) {
printk(KERN_ERR "JFFS2 error: Failed to initialise slab caches\n");
- goto out_zlib;
+ goto out_compressors;
}
ret = register_filesystem(&jffs2_fs_type);
if (ret) {
@@ -380,17 +353,19 @@ static int __init init_jffs2_fs(void)
out_slab:
jffs2_destroy_slab_caches();
- out_zlib:
- jffs2_zlib_exit();
+ out_compressors:
+ jffs2_compressors_exit();
out:
+ kmem_cache_destroy(jffs2_inode_cachep);
return ret;
}
static void __exit exit_jffs2_fs(void)
{
- jffs2_destroy_slab_caches();
- jffs2_zlib_exit();
unregister_filesystem(&jffs2_fs_type);
+ jffs2_destroy_slab_caches();
+ jffs2_compressors_exit();
+ kmem_cache_destroy(jffs2_inode_cachep);
}
module_init(init_jffs2_fs);
@@ -398,5 +373,5 @@ module_exit(exit_jffs2_fs);
MODULE_DESCRIPTION("The Journalling Flash File System, v2");
MODULE_AUTHOR("Red Hat, Inc.");
-MODULE_LICENSE("GPL"); // Actually dual-licensed, but it doesn't matter for
+MODULE_LICENSE("GPL"); // Actually dual-licensed, but it doesn't matter for
// the sake of this tag. It's Free Software.
diff --git a/linux-2.4.x/fs/jffs2/symlink-v24.c b/linux-2.4.x/fs/jffs2/symlink-v24.c
new file mode 100644
index 0000000..0643f10
--- /dev/null
+++ b/linux-2.4.x/fs/jffs2/symlink-v24.c
@@ -0,0 +1,52 @@
+/*
+ * JFFS2 -- Journalling Flash File System, Version 2.
+ *
+ * Copyright (C) 2001, 2002 Red Hat, Inc.
+ *
+ * Created by David Woodhouse <dwmw2@infradead.org>
+ *
+ * For licensing information, see the file 'LICENCE' in this directory.
+ *
+ * $Id: symlink-v24.c,v 1.19 2005/11/07 11:14:42 gleixner Exp $
+ *
+ */
+
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/fs.h>
+#include "nodelist.h"
+
+int jffs2_readlink(struct dentry *dentry, char *buffer, int buflen);
+int jffs2_follow_link(struct dentry *dentry, struct nameidata *nd);
+
+struct inode_operations jffs2_symlink_inode_operations =
+{
+ .readlink = jffs2_readlink,
+ .follow_link = jffs2_follow_link,
+ .setattr = jffs2_setattr
+};
+
+int jffs2_readlink(struct dentry *dentry, char *buffer, int buflen)
+{
+ struct jffs2_inode_info *f = JFFS2_INODE_INFO(dentry->d_inode);
+
+ if (!f->target) {
+ printk(KERN_ERR "jffs2_readlink(): can't find symlink taerget\n");
+ return -EIO;
+ }
+
+ return vfs_readlink(dentry, buffer, buflen, (char *)f->target);
+}
+
+int jffs2_follow_link(struct dentry *dentry, struct nameidata *nd)
+{
+ struct jffs2_inode_info *f = JFFS2_INODE_INFO(dentry->d_inode);
+
+ if (!f->target) {
+ printk(KERN_ERR "jffs2_follow_link(): can't find symlink taerget\n");
+ return -EIO;
+ }
+
+ return vfs_follow_link(nd, (char *)f->target);
+}
diff --git a/linux-2.4.x/fs/jffs2/symlink.c b/linux-2.4.x/fs/jffs2/symlink.c
index e1f7dec..d55754f 100644
--- a/linux-2.4.x/fs/jffs2/symlink.c
+++ b/linux-2.4.x/fs/jffs2/symlink.c
@@ -3,35 +3,11 @@
*
* Copyright (C) 2001, 2002 Red Hat, Inc.
*
- * Created by David Woodhouse <dwmw2@cambridge.redhat.com>
+ * Created by David Woodhouse <dwmw2@infradead.org>
*
- * The original JFFS, from which the design for JFFS2 was derived,
- * was designed and implemented by Axis Communications AB.
+ * For licensing information, see the file 'LICENCE' in this directory.
*
- * The contents of this file are subject to the Red Hat eCos Public
- * License Version 1.1 (the "Licence"); you may not use this file
- * except in compliance with the Licence. You may obtain a copy of
- * the Licence at http://www.redhat.com/
- *
- * Software distributed under the Licence is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied.
- * See the Licence for the specific language governing rights and
- * limitations under the Licence.
- *
- * The Original Code is JFFS2 - Journalling Flash File System, version 2
- *
- * Alternatively, the contents of this file may be used under the
- * terms of the GNU General Public License version 2 (the "GPL"), in
- * which case the provisions of the GPL are applicable instead of the
- * above. If you wish to allow the use of your version of this file
- * only under the terms of the GPL and not to allow others to use your
- * version of this file under the RHEPL, indicate your decision by
- * deleting the provisions above and replace them with the notice and
- * other provisions required by the GPL. If you do not delete the
- * provisions above, a recipient may use your version of this file
- * under either the RHEPL or the GPL.
- *
- * $Id: symlink.c,v 1.5.2.1 2002/01/15 10:39:06 dwmw2 Exp $
+ * $Id: symlink.c,v 1.19 2005/11/07 11:14:42 gleixner Exp $
*
*/
@@ -39,72 +15,49 @@
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/fs.h>
-#include <linux/jffs2.h>
+#include <linux/namei.h>
#include "nodelist.h"
-int jffs2_readlink(struct dentry *dentry, char *buffer, int buflen);
-int jffs2_follow_link(struct dentry *dentry, struct nameidata *nd);
+static void *jffs2_follow_link(struct dentry *dentry, struct nameidata *nd);
struct inode_operations jffs2_symlink_inode_operations =
-{
- readlink: jffs2_readlink,
- follow_link: jffs2_follow_link,
- setattr: jffs2_setattr
+{
+ .readlink = generic_readlink,
+ .follow_link = jffs2_follow_link,
+ .setattr = jffs2_setattr
};
-static char *jffs2_getlink(struct dentry *dentry)
+static void *jffs2_follow_link(struct dentry *dentry, struct nameidata *nd)
{
struct jffs2_inode_info *f = JFFS2_INODE_INFO(dentry->d_inode);
- char *buf;
- int ret;
-
- down(&f->sem);
- if (!f->metadata) {
- up(&f->sem);
- printk(KERN_NOTICE "No metadata for symlink inode #%lu\n", dentry->d_inode->i_ino);
- return ERR_PTR(-EINVAL);
- }
- buf = kmalloc(f->metadata->size+1, GFP_USER);
- if (!buf) {
- up(&f->sem);
- return ERR_PTR(-ENOMEM);
+ char *p = (char *)f->target;
+
+ /*
+ * We don't acquire the f->sem mutex here since the only data we
+ * use is f->target.
+ *
+ * 1. If we are here the inode has already built and f->target has
+ * to point to the target path.
+ * 2. Nobody uses f->target (if the inode is symlink's inode). The
+ * exception is inode freeing function which frees f->target. But
+ * it can't be called while we are here and before VFS has
+ * stopped using our f->target string which we provide by means of
+ * nd_set_link() call.
+ */
+
+ if (!p) {
+ printk(KERN_ERR "jffs2_follow_link(): can't find symlink taerget\n");
+ p = ERR_PTR(-EIO);
}
- buf[f->metadata->size]=0;
+ D1(printk(KERN_DEBUG "jffs2_follow_link(): target path is '%s'\n", (char *) f->target));
- ret = jffs2_read_dnode(JFFS2_SB_INFO(dentry->d_inode->i_sb), f->metadata, buf, 0, f->metadata->size);
- up(&f->sem);
- if (ret) {
- kfree(buf);
- return ERR_PTR(ret);
- }
- return buf;
+ nd_set_link(nd, p);
+ /*
+ * We will unlock the f->sem mutex but VFS will use the f->target string. This is safe
+ * since the only way that may cause f->target to be changed is iput() operation.
+ * But VFS will not use f->target after iput() has been called.
+ */
+ return NULL;
}
-int jffs2_readlink(struct dentry *dentry, char *buffer, int buflen)
-{
- unsigned char *kbuf;
- int ret;
- kbuf = jffs2_getlink(dentry);
- if (IS_ERR(kbuf))
- return PTR_ERR(kbuf);
-
- ret = vfs_readlink(dentry, buffer, buflen, kbuf);
- kfree(kbuf);
- return ret;
-}
-
-int jffs2_follow_link(struct dentry *dentry, struct nameidata *nd)
-{
- unsigned char *buf;
- int ret;
-
- buf = jffs2_getlink(dentry);
-
- if (IS_ERR(buf))
- return PTR_ERR(buf);
-
- ret = vfs_follow_link(nd, buf);
- kfree(buf);
- return ret;
-}
diff --git a/linux-2.4.x/fs/jffs2/wbuf.c b/linux-2.4.x/fs/jffs2/wbuf.c
new file mode 100644
index 0000000..2324229
--- /dev/null
+++ b/linux-2.4.x/fs/jffs2/wbuf.c
@@ -0,0 +1,1372 @@
+/*
+ * JFFS2 -- Journalling Flash File System, Version 2.
+ *
+ * Copyright (C) 2001-2003 Red Hat, Inc.
+ * Copyright (C) 2004 Thomas Gleixner <tglx@linutronix.de>
+ *
+ * Created by David Woodhouse <dwmw2@infradead.org>
+ * Modified debugged and enhanced by Thomas Gleixner <tglx@linutronix.de>
+ *
+ * For licensing information, see the file 'LICENCE' in this directory.
+ *
+ * $Id: wbuf.c,v 1.110 2006/02/09 16:13:35 dwmw2 Exp $
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/mtd/mtd.h>
+#include <linux/crc32.h>
+#include <linux/mtd/nand.h>
+#include <linux/jiffies.h>
+
+#include "nodelist.h"
+
+/* For testing write failures */
+#undef BREAKME
+#undef BREAKMEHEADER
+
+#ifdef BREAKME
+static unsigned char *brokenbuf;
+#endif
+
+#define PAGE_DIV(x) ( ((unsigned long)(x) / (unsigned long)(c->wbuf_pagesize)) * (unsigned long)(c->wbuf_pagesize) )
+#define PAGE_MOD(x) ( (unsigned long)(x) % (unsigned long)(c->wbuf_pagesize) )
+
+/* max. erase failures before we mark a block bad */
+#define MAX_ERASE_FAILURES 2
+
+struct jffs2_inodirty {
+ uint32_t ino;
+ struct jffs2_inodirty *next;
+};
+
+static struct jffs2_inodirty inodirty_nomem;
+
+static int jffs2_wbuf_pending_for_ino(struct jffs2_sb_info *c, uint32_t ino)
+{
+ struct jffs2_inodirty *this = c->wbuf_inodes;
+
+ /* If a malloc failed, consider _everything_ dirty */
+ if (this == &inodirty_nomem)
+ return 1;
+
+ /* If ino == 0, _any_ non-GC writes mean 'yes' */
+ if (this && !ino)
+ return 1;
+
+ /* Look to see if the inode in question is pending in the wbuf */
+ while (this) {
+ if (this->ino == ino)
+ return 1;
+ this = this->next;
+ }
+ return 0;
+}
+
+static void jffs2_clear_wbuf_ino_list(struct jffs2_sb_info *c)
+{
+ struct jffs2_inodirty *this;
+
+ this = c->wbuf_inodes;
+
+ if (this != &inodirty_nomem) {
+ while (this) {
+ struct jffs2_inodirty *next = this->next;
+ kfree(this);
+ this = next;
+ }
+ }
+ c->wbuf_inodes = NULL;
+}
+
+static void jffs2_wbuf_dirties_inode(struct jffs2_sb_info *c, uint32_t ino)
+{
+ struct jffs2_inodirty *new;
+
+ /* Mark the superblock dirty so that kupdated will flush... */
+ jffs2_erase_pending_trigger(c);
+
+ if (jffs2_wbuf_pending_for_ino(c, ino))
+ return;
+
+ new = kmalloc(sizeof(*new), GFP_KERNEL);
+ if (!new) {
+ D1(printk(KERN_DEBUG "No memory to allocate inodirty. Fallback to all considered dirty\n"));
+ jffs2_clear_wbuf_ino_list(c);
+ c->wbuf_inodes = &inodirty_nomem;
+ return;
+ }
+ new->ino = ino;
+ new->next = c->wbuf_inodes;
+ c->wbuf_inodes = new;
+ return;
+}
+
+static inline void jffs2_refile_wbuf_blocks(struct jffs2_sb_info *c)
+{
+ struct list_head *this, *next;
+ static int n;
+
+ if (list_empty(&c->erasable_pending_wbuf_list))
+ return;
+
+ list_for_each_safe(this, next, &c->erasable_pending_wbuf_list) {
+ struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
+
+ D1(printk(KERN_DEBUG "Removing eraseblock at 0x%08x from erasable_pending_wbuf_list...\n", jeb->offset));
+ list_del(this);
+ if ((jiffies + (n++)) & 127) {
+ /* Most of the time, we just erase it immediately. Otherwise we
+ spend ages scanning it on mount, etc. */
+ D1(printk(KERN_DEBUG "...and adding to erase_pending_list\n"));
+ list_add_tail(&jeb->list, &c->erase_pending_list);
+ c->nr_erasing_blocks++;
+ jffs2_erase_pending_trigger(c);
+ } else {
+ /* Sometimes, however, we leave it elsewhere so it doesn't get
+ immediately reused, and we spread the load a bit. */
+ D1(printk(KERN_DEBUG "...and adding to erasable_list\n"));
+ list_add_tail(&jeb->list, &c->erasable_list);
+ }
+ }
+}
+
+#define REFILE_NOTEMPTY 0
+#define REFILE_ANYWAY 1
+
+static void jffs2_block_refile(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, int allow_empty)
+{
+ D1(printk("About to refile bad block at %08x\n", jeb->offset));
+
+ /* File the existing block on the bad_used_list.... */
+ if (c->nextblock == jeb)
+ c->nextblock = NULL;
+ else { /* Not sure this should ever happen... need more coffee */
+ list_del(&jeb->list);
+ jffs2_remove_from_hash_table(c, jeb, 1);
+ }
+ if (jeb->first_node) {
+ D1(printk("Refiling block at %08x to bad_used_list\n", jeb->offset));
+ list_add(&jeb->list, &c->bad_used_list);
+ } else {
+ BUG_ON(allow_empty == REFILE_NOTEMPTY);
+ /* It has to have had some nodes or we couldn't be here */
+ D1(printk("Refiling block at %08x to erase_pending_list\n", jeb->offset));
+ list_add(&jeb->list, &c->erase_pending_list);
+ c->nr_erasing_blocks++;
+ jffs2_erase_pending_trigger(c);
+ }
+
+ /* Adjust its size counts accordingly */
+ c->wasted_size += jeb->free_size;
+ c->free_size -= jeb->free_size;
+ jeb->wasted_size += jeb->free_size;
+ jeb->free_size = 0;
+
+ jffs2_dbg_dump_block_lists_nolock(c);
+ jffs2_dbg_acct_sanity_check_nolock(c,jeb);
+ jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
+}
+
+/* Recover from failure to write wbuf. Recover the nodes up to the
+ * wbuf, not the one which we were starting to try to write. */
+
+static void jffs2_wbuf_recover(struct jffs2_sb_info *c)
+{
+ struct jffs2_eraseblock *jeb, *new_jeb;
+ struct jffs2_raw_node_ref **first_raw, **raw;
+ size_t retlen;
+ int ret;
+ unsigned char *buf;
+ uint32_t start, end, ofs, len;
+
+ spin_lock(&c->erase_completion_lock);
+
+ jeb = c->blocks[c->wbuf_ofs / c->sector_size];
+
+ jffs2_block_refile(c, jeb, REFILE_NOTEMPTY);
+
+ /* Find the first node to be recovered, by skipping over every
+ node which ends before the wbuf starts, or which is obsolete. */
+ first_raw = &jeb->first_node;
+ while (*first_raw &&
+ (ref_obsolete(*first_raw) ||
+ (ref_offset(*first_raw)+ref_totlen(c, jeb, *first_raw)) < c->wbuf_ofs)) {
+ D1(printk(KERN_DEBUG "Skipping node at 0x%08x(%d)-0x%08x which is either before 0x%08x or obsolete\n",
+ ref_offset(*first_raw), ref_flags(*first_raw),
+ (ref_offset(*first_raw) + ref_totlen(c, jeb, *first_raw)),
+ c->wbuf_ofs));
+ first_raw = &(*first_raw)->next_phys;
+ }
+
+ if (!*first_raw) {
+ /* All nodes were obsolete. Nothing to recover. */
+ D1(printk(KERN_DEBUG "No non-obsolete nodes to be recovered. Just filing block bad\n"));
+ spin_unlock(&c->erase_completion_lock);
+ return;
+ }
+
+ start = ref_offset(*first_raw);
+ end = ref_offset(*first_raw) + ref_totlen(c, jeb, *first_raw);
+
+ /* Find the last node to be recovered */
+ raw = first_raw;
+ while ((*raw)) {
+ if (!ref_obsolete(*raw))
+ end = ref_offset(*raw) + ref_totlen(c, jeb, *raw);
+
+ raw = &(*raw)->next_phys;
+ }
+ spin_unlock(&c->erase_completion_lock);
+
+ D1(printk(KERN_DEBUG "wbuf recover %08x-%08x\n", start, end));
+
+ buf = NULL;
+ if (start < c->wbuf_ofs) {
+ /* First affected node was already partially written.
+ * Attempt to reread the old data into our buffer. */
+
+ buf = kmalloc(end - start, GFP_KERNEL);
+ if (!buf) {
+ printk(KERN_CRIT "Malloc failure in wbuf recovery. Data loss ensues.\n");
+
+ goto read_failed;
+ }
+
+ /* Do the read... */
+ if (jffs2_ebh_oob(c))
+ ret = c->mtd->read_ecc(c->mtd, start, c->wbuf_ofs - start, &retlen, buf, NULL, c->oobinfo);
+ else
+ ret = c->mtd->read(c->mtd, start, c->wbuf_ofs - start, &retlen, buf);
+
+ if (ret == -EBADMSG && retlen == c->wbuf_ofs - start) {
+ /* ECC recovered */
+ ret = 0;
+ }
+ if (ret || retlen != c->wbuf_ofs - start) {
+ printk(KERN_CRIT "Old data are already lost in wbuf recovery. Data loss ensues.\n");
+
+ kfree(buf);
+ buf = NULL;
+ read_failed:
+ first_raw = &(*first_raw)->next_phys;
+ /* If this was the only node to be recovered, give up */
+ if (!(*first_raw))
+ return;
+
+ /* It wasn't. Go on and try to recover nodes complete in the wbuf */
+ start = ref_offset(*first_raw);
+ } else {
+ /* Read succeeded. Copy the remaining data from the wbuf */
+ memcpy(buf + (c->wbuf_ofs - start), c->wbuf, end - c->wbuf_ofs);
+ }
+ }
+ /* OK... we're to rewrite (end-start) bytes of data from first_raw onwards.
+ Either 'buf' contains the data, or we find it in the wbuf */
+
+
+ /* ... and get an allocation of space from a shiny new block instead */
+ ret = jffs2_reserve_space_gc(c, end-start, &ofs, &len, JFFS2_SUMMARY_NOSUM_SIZE);
+ if (ret) {
+ printk(KERN_WARNING "Failed to allocate space for wbuf recovery. Data loss ensues.\n");
+ kfree(buf);
+ return;
+ }
+ if (end-start >= c->wbuf_pagesize) {
+ /* Need to do another write immediately, but it's possible
+ that this is just because the wbuf itself is completely
+ full, and there's nothing earlier read back from the
+ flash. Hence 'buf' isn't necessarily what we're writing
+ from. */
+ unsigned char *rewrite_buf = buf?:c->wbuf;
+ uint32_t towrite = (end-start) - ((end-start)%c->wbuf_pagesize);
+
+ D1(printk(KERN_DEBUG "Write 0x%x bytes at 0x%08x in wbuf recover\n",
+ towrite, ofs));
+
+#ifdef BREAKMEHEADER
+ static int breakme;
+ if (breakme++ == 20) {
+ printk(KERN_NOTICE "Faking write error at 0x%08x\n", ofs);
+ breakme = 0;
+ c->mtd->write_ecc(c->mtd, ofs, towrite, &retlen,
+ brokenbuf, NULL, c->oobinfo);
+ ret = -EIO;
+ } else
+#endif
+ if (jffs2_ebh_oob(c))
+ ret = c->mtd->write_ecc(c->mtd, ofs, towrite, &retlen,
+ rewrite_buf, NULL, c->oobinfo);
+ else
+ ret = c->mtd->write(c->mtd, ofs, towrite, &retlen, rewrite_buf);
+
+ if (ret || retlen != towrite) {
+ /* Argh. We tried. Really we did. */
+ printk(KERN_CRIT "Recovery of wbuf failed due to a second write error\n");
+ kfree(buf);
+
+ if (retlen) {
+ struct jffs2_raw_node_ref *raw2;
+
+ raw2 = jffs2_alloc_raw_node_ref();
+ if (!raw2)
+ return;
+
+ raw2->flash_offset = ofs | REF_OBSOLETE;
+ raw2->__totlen = ref_totlen(c, jeb, *first_raw);
+ raw2->next_phys = NULL;
+ raw2->next_in_ino = NULL;
+
+ jffs2_add_physical_node_ref(c, raw2);
+ }
+ return;
+ }
+ printk(KERN_NOTICE "Recovery of wbuf succeeded to %08x\n", ofs);
+
+ c->wbuf_len = (end - start) - towrite;
+ c->wbuf_ofs = ofs + towrite;
+ memmove(c->wbuf, rewrite_buf + towrite, c->wbuf_len);
+ /* Don't muck about with c->wbuf_inodes. False positives are harmless. */
+ if (buf)
+ kfree(buf);
+ } else {
+ /* OK, now we're left with the dregs in whichever buffer we're using */
+ if (buf) {
+ memcpy(c->wbuf, buf, end-start);
+ kfree(buf);
+ } else {
+ memmove(c->wbuf, c->wbuf + (start - c->wbuf_ofs), end - start);
+ }
+ c->wbuf_ofs = ofs;
+ c->wbuf_len = end - start;
+ }
+
+ /* Now sort out the jffs2_raw_node_refs, moving them from the old to the next block */
+ new_jeb = c->blocks[ofs / c->sector_size];
+
+ spin_lock(&c->erase_completion_lock);
+ if (new_jeb->first_node) {
+ /* Odd, but possible with ST flash later maybe */
+ new_jeb->last_node->next_phys = *first_raw;
+ } else {
+ new_jeb->first_node = *first_raw;
+ }
+
+ raw = first_raw;
+ while (*raw) {
+ uint32_t rawlen = ref_totlen(c, jeb, *raw);
+
+ D1(printk(KERN_DEBUG "Refiling block of %08x at %08x(%d) to %08x\n",
+ rawlen, ref_offset(*raw), ref_flags(*raw), ofs));
+
+ if (ref_obsolete(*raw)) {
+ /* Shouldn't really happen much */
+ new_jeb->dirty_size += rawlen;
+ new_jeb->free_size -= rawlen;
+ c->dirty_size += rawlen;
+ } else {
+ new_jeb->used_size += rawlen;
+ new_jeb->free_size -= rawlen;
+ jeb->dirty_size += rawlen;
+ jeb->used_size -= rawlen;
+ c->dirty_size += rawlen;
+ }
+ c->free_size -= rawlen;
+ (*raw)->flash_offset = ofs | ref_flags(*raw);
+ ofs += rawlen;
+ new_jeb->last_node = *raw;
+
+ raw = &(*raw)->next_phys;
+ }
+
+ /* Fix up the original jeb now it's on the bad_list */
+ *first_raw = NULL;
+ if (first_raw == &jeb->first_node) {
+ jeb->last_node = NULL;
+ D1(printk(KERN_DEBUG "Failing block at %08x is now empty. Moving to erase_pending_list\n", jeb->offset));
+ list_del(&jeb->list);
+ list_add(&jeb->list, &c->erase_pending_list);
+ c->nr_erasing_blocks++;
+ jffs2_erase_pending_trigger(c);
+ }
+ else
+ jeb->last_node = container_of(first_raw, struct jffs2_raw_node_ref, next_phys);
+
+ jffs2_dbg_acct_sanity_check_nolock(c, jeb);
+ jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
+
+ jffs2_dbg_acct_sanity_check_nolock(c, new_jeb);
+ jffs2_dbg_acct_paranoia_check_nolock(c, new_jeb);
+
+ spin_unlock(&c->erase_completion_lock);
+
+ D1(printk(KERN_DEBUG "wbuf recovery completed OK\n"));
+}
+
+/* Meaning of pad argument:
+ 0: Do not pad. Probably pointless - we only ever use this when we can't pad anyway.
+ 1: Pad, do not adjust nextblock free_size
+ 2: Pad, adjust nextblock free_size
+*/
+#define NOPAD 0
+#define PAD_NOACCOUNT 1
+#define PAD_ACCOUNTING 2
+
+static int __jffs2_flush_wbuf(struct jffs2_sb_info *c, int pad)
+{
+ int ret;
+ size_t retlen;
+
+ /* Nothing to do if not write-buffering the flash. In particular, we shouldn't
+ del_timer() the timer we never initialised. */
+ if (!jffs2_is_writebuffered(c))
+ return 0;
+
+ if (!down_trylock(&c->alloc_sem)) {
+ up(&c->alloc_sem);
+ printk(KERN_CRIT "jffs2_flush_wbuf() called with alloc_sem not locked!\n");
+ BUG();
+ }
+
+ if (!c->wbuf_len) /* already checked c->wbuf above */
+ return 0;
+
+ /* claim remaining space on the page
+ this happens, if we have a change to a new block,
+ or if fsync forces us to flush the writebuffer.
+ if we have a switch to next page, we will not have
+ enough remaining space for this.
+ */
+ if (pad ) {
+ c->wbuf_len = PAD(c->wbuf_len);
+
+ /* Pad with JFFS2_DIRTY_BITMASK initially. this helps out ECC'd NOR
+ with 8 byte page size */
+ memset(c->wbuf + c->wbuf_len, 0, c->wbuf_pagesize - c->wbuf_len);
+
+ if ( c->wbuf_len + sizeof(struct jffs2_unknown_node) < c->wbuf_pagesize) {
+ struct jffs2_unknown_node *padnode = (void *)(c->wbuf + c->wbuf_len);
+ padnode->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
+ padnode->nodetype = cpu_to_je16(JFFS2_NODETYPE_PADDING);
+ padnode->totlen = cpu_to_je32(c->wbuf_pagesize - c->wbuf_len);
+ padnode->hdr_crc = cpu_to_je32(crc32(0, padnode, sizeof(*padnode)-4));
+ }
+ }
+ /* else jffs2_flash_writev has actually filled in the rest of the
+ buffer for us, and will deal with the node refs etc. later. */
+
+#ifdef BREAKME
+ static int breakme;
+ if (breakme++ == 20) {
+ printk(KERN_NOTICE "Faking write error at 0x%08x\n", c->wbuf_ofs);
+ breakme = 0;
+ c->mtd->write_ecc(c->mtd, c->wbuf_ofs, c->wbuf_pagesize,
+ &retlen, brokenbuf, NULL, c->oobinfo);
+ ret = -EIO;
+ } else
+#endif
+
+ if (jffs2_ebh_oob(c))
+ ret = c->mtd->write_ecc(c->mtd, c->wbuf_ofs, c->wbuf_pagesize, &retlen, c->wbuf, NULL, c->oobinfo);
+ else
+ ret = c->mtd->write(c->mtd, c->wbuf_ofs, c->wbuf_pagesize, &retlen, c->wbuf);
+
+ if (ret || retlen != c->wbuf_pagesize) {
+ if (ret)
+ printk(KERN_WARNING "jffs2_flush_wbuf(): Write failed with %d\n",ret);
+ else {
+ printk(KERN_WARNING "jffs2_flush_wbuf(): Write was short: %zd instead of %d\n",
+ retlen, c->wbuf_pagesize);
+ ret = -EIO;
+ }
+
+ jffs2_wbuf_recover(c);
+
+ return ret;
+ }
+
+ spin_lock(&c->erase_completion_lock);
+
+ /* Adjust free size of the block if we padded. */
+ if (pad) {
+ struct jffs2_eraseblock *jeb;
+
+ jeb = c->blocks[c->wbuf_ofs / c->sector_size];
+
+ D1(printk(KERN_DEBUG "jffs2_flush_wbuf() adjusting free_size of %sblock at %08x\n",
+ (jeb==c->nextblock)?"next":"", jeb->offset));
+
+ /* wbuf_pagesize - wbuf_len is the amount of space that's to be
+ padded. If there is less free space in the block than that,
+ something screwed up */
+ if (jeb->free_size < (c->wbuf_pagesize - c->wbuf_len)) {
+ printk(KERN_CRIT "jffs2_flush_wbuf(): Accounting error. wbuf at 0x%08x has 0x%03x bytes, 0x%03x left.\n",
+ c->wbuf_ofs, c->wbuf_len, c->wbuf_pagesize-c->wbuf_len);
+ printk(KERN_CRIT "jffs2_flush_wbuf(): But free_size for block at 0x%08x is only 0x%08x\n",
+ jeb->offset, jeb->free_size);
+ BUG();
+ }
+ jeb->free_size -= (c->wbuf_pagesize - c->wbuf_len);
+ c->free_size -= (c->wbuf_pagesize - c->wbuf_len);
+ jeb->wasted_size += (c->wbuf_pagesize - c->wbuf_len);
+ c->wasted_size += (c->wbuf_pagesize - c->wbuf_len);
+ }
+
+ /* Stick any now-obsoleted blocks on the erase_pending_list */
+ jffs2_refile_wbuf_blocks(c);
+ jffs2_clear_wbuf_ino_list(c);
+ spin_unlock(&c->erase_completion_lock);
+
+ memset(c->wbuf,0xff,c->wbuf_pagesize);
+ /* adjust write buffer offset, else we get a non contiguous write bug */
+ c->wbuf_ofs += c->wbuf_pagesize;
+ c->wbuf_len = 0;
+ return 0;
+}
+
+/* Trigger garbage collection to flush the write-buffer.
+ If ino arg is zero, do it if _any_ real (i.e. not GC) writes are
+ outstanding. If ino arg non-zero, do it only if a write for the
+ given inode is outstanding. */
+int jffs2_flush_wbuf_gc(struct jffs2_sb_info *c, uint32_t ino)
+{
+ uint32_t old_wbuf_ofs;
+ uint32_t old_wbuf_len;
+ int ret = 0;
+
+ D1(printk(KERN_DEBUG "jffs2_flush_wbuf_gc() called for ino #%u...\n", ino));
+
+ if (!c->wbuf)
+ return 0;
+
+ down(&c->alloc_sem);
+ if (!jffs2_wbuf_pending_for_ino(c, ino)) {
+ D1(printk(KERN_DEBUG "Ino #%d not pending in wbuf. Returning\n", ino));
+ up(&c->alloc_sem);
+ return 0;
+ }
+
+ old_wbuf_ofs = c->wbuf_ofs;
+ old_wbuf_len = c->wbuf_len;
+
+ if (c->unchecked_size) {
+ /* GC won't make any progress for a while */
+ D1(printk(KERN_DEBUG "jffs2_flush_wbuf_gc() padding. Not finished checking\n"));
+ down_write(&c->wbuf_sem);
+ ret = __jffs2_flush_wbuf(c, PAD_ACCOUNTING);
+ /* retry flushing wbuf in case jffs2_wbuf_recover
+ left some data in the wbuf */
+ if (ret)
+ ret = __jffs2_flush_wbuf(c, PAD_ACCOUNTING);
+ up_write(&c->wbuf_sem);
+ } else while (old_wbuf_len &&
+ old_wbuf_ofs == c->wbuf_ofs) {
+
+ up(&c->alloc_sem);
+
+ D1(printk(KERN_DEBUG "jffs2_flush_wbuf_gc() calls gc pass\n"));
+
+ ret = jffs2_garbage_collect_pass(c);
+ if (ret) {
+ /* GC failed. Flush it with padding instead */
+ down(&c->alloc_sem);
+ down_write(&c->wbuf_sem);
+ ret = __jffs2_flush_wbuf(c, PAD_ACCOUNTING);
+ /* retry flushing wbuf in case jffs2_wbuf_recover
+ left some data in the wbuf */
+ if (ret)
+ ret = __jffs2_flush_wbuf(c, PAD_ACCOUNTING);
+ up_write(&c->wbuf_sem);
+ break;
+ }
+ down(&c->alloc_sem);
+ }
+
+ D1(printk(KERN_DEBUG "jffs2_flush_wbuf_gc() ends...\n"));
+
+ up(&c->alloc_sem);
+ return ret;
+}
+
+/* Pad write-buffer to end and write it, wasting space. */
+int jffs2_flush_wbuf_pad(struct jffs2_sb_info *c)
+{
+ int ret;
+
+ if (!c->wbuf)
+ return 0;
+
+ down_write(&c->wbuf_sem);
+ ret = __jffs2_flush_wbuf(c, PAD_NOACCOUNT);
+ /* retry - maybe wbuf recover left some data in wbuf. */
+ if (ret)
+ ret = __jffs2_flush_wbuf(c, PAD_NOACCOUNT);
+ up_write(&c->wbuf_sem);
+
+ return ret;
+}
+int jffs2_flash_writev(struct jffs2_sb_info *c, const struct kvec *invecs, unsigned long count, loff_t to, size_t *retlen, uint32_t ino)
+{
+ struct kvec outvecs[3];
+ uint32_t totlen = 0;
+ uint32_t split_ofs = 0;
+ uint32_t old_totlen;
+ int ret, splitvec = -1;
+ int invec, outvec;
+ size_t wbuf_retlen;
+ unsigned char *wbuf_ptr;
+ size_t donelen = 0;
+ uint32_t outvec_to = to;
+
+ /* If not NAND flash, don't bother */
+ if (!jffs2_is_writebuffered(c))
+ return jffs2_flash_direct_writev(c, invecs, count, to, retlen);
+
+ down_write(&c->wbuf_sem);
+
+ /* If wbuf_ofs is not initialized, set it to target address */
+ if (c->wbuf_ofs == 0xFFFFFFFF) {
+ c->wbuf_ofs = PAGE_DIV(to);
+ c->wbuf_len = PAGE_MOD(to);
+ memset(c->wbuf,0xff,c->wbuf_pagesize);
+ }
+
+ /* Fixup the wbuf if we are moving to a new eraseblock. The checks below
+ fail for ECC'd NOR because cleanmarker == 16, so a block starts at
+ xxx0010. */
+ if (jffs2_nor_ecc(c) || jffs2_nor_wbuf_flash(c)) {
+ if (((c->wbuf_ofs % c->sector_size) == 0) && !c->wbuf_len) {
+ c->wbuf_ofs = PAGE_DIV(to);
+ c->wbuf_len = PAGE_MOD(to);
+ memset(c->wbuf,0xff,c->wbuf_pagesize);
+ }
+ }
+
+ /* Sanity checks on target address.
+ It's permitted to write at PAD(c->wbuf_len+c->wbuf_ofs),
+ and it's permitted to write at the beginning of a new
+ erase block. Anything else, and you die.
+ New block starts at xxx000c (0-b = block header)
+ */
+ if (SECTOR_ADDR(to) != SECTOR_ADDR(c->wbuf_ofs)) {
+ /* It's a write to a new block */
+ if (c->wbuf_len) {
+ D1(printk(KERN_DEBUG "jffs2_flash_writev() to 0x%lx causes flush of wbuf at 0x%08x\n", (unsigned long)to, c->wbuf_ofs));
+ ret = __jffs2_flush_wbuf(c, PAD_NOACCOUNT);
+ if (ret) {
+ /* the underlying layer has to check wbuf_len to do the cleanup */
+ D1(printk(KERN_WARNING "jffs2_flush_wbuf() called from jffs2_flash_writev() failed %d\n", ret));
+ *retlen = 0;
+ goto exit;
+ }
+ }
+ /* set pointer to new block */
+ c->wbuf_ofs = PAGE_DIV(to);
+ c->wbuf_len = PAGE_MOD(to);
+ }
+
+ if (to != PAD(c->wbuf_ofs + c->wbuf_len)) {
+ /* We're not writing immediately after the writebuffer. Bad. */
+ printk(KERN_CRIT "jffs2_flash_writev(): Non-contiguous write to %08lx\n", (unsigned long)to);
+ if (c->wbuf_len)
+ printk(KERN_CRIT "wbuf was previously %08x-%08x\n",
+ c->wbuf_ofs, c->wbuf_ofs+c->wbuf_len);
+ BUG();
+ }
+
+ /* Note outvecs[3] above. We know count is never greater than 2 */
+ if (count > 2) {
+ printk(KERN_CRIT "jffs2_flash_writev(): count is %ld\n", count);
+ BUG();
+ }
+
+ invec = 0;
+ outvec = 0;
+
+ /* Fill writebuffer first, if already in use */
+ if (c->wbuf_len) {
+ uint32_t invec_ofs = 0;
+
+ /* adjust alignment offset */
+ if (c->wbuf_len != PAGE_MOD(to)) {
+ c->wbuf_len = PAGE_MOD(to);
+ /* take care of alignment to next page */
+ if (!c->wbuf_len)
+ c->wbuf_len = c->wbuf_pagesize;
+ }
+
+ while(c->wbuf_len < c->wbuf_pagesize) {
+ uint32_t thislen;
+
+ if (invec == count)
+ goto alldone;
+
+ thislen = c->wbuf_pagesize - c->wbuf_len;
+
+ if (thislen >= invecs[invec].iov_len)
+ thislen = invecs[invec].iov_len;
+
+ invec_ofs = thislen;
+
+ memcpy(c->wbuf + c->wbuf_len, invecs[invec].iov_base, thislen);
+ c->wbuf_len += thislen;
+ donelen += thislen;
+ /* Get next invec, if actual did not fill the buffer */
+ if (c->wbuf_len < c->wbuf_pagesize)
+ invec++;
+ }
+
+ /* write buffer is full, flush buffer */
+ ret = __jffs2_flush_wbuf(c, NOPAD);
+ if (ret) {
+ /* the underlying layer has to check wbuf_len to do the cleanup */
+ D1(printk(KERN_WARNING "jffs2_flush_wbuf() called from jffs2_flash_writev() failed %d\n", ret));
+ /* Retlen zero to make sure our caller doesn't mark the space dirty.
+ We've already done everything that's necessary */
+ *retlen = 0;
+ goto exit;
+ }
+ outvec_to += donelen;
+ c->wbuf_ofs = outvec_to;
+
+ /* All invecs done ? */
+ if (invec == count)
+ goto alldone;
+
+ /* Set up the first outvec, containing the remainder of the
+ invec we partially used */
+ if (invecs[invec].iov_len > invec_ofs) {
+ outvecs[0].iov_base = invecs[invec].iov_base+invec_ofs;
+ totlen = outvecs[0].iov_len = invecs[invec].iov_len-invec_ofs;
+ if (totlen > c->wbuf_pagesize) {
+ splitvec = outvec;
+ split_ofs = outvecs[0].iov_len - PAGE_MOD(totlen);
+ }
+ outvec++;
+ }
+ invec++;
+ }
+
+ /* OK, now we've flushed the wbuf and the start of the bits
+ we have been asked to write, now to write the rest.... */
+
+ /* totlen holds the amount of data still to be written */
+ old_totlen = totlen;
+ for ( ; invec < count; invec++,outvec++ ) {
+ outvecs[outvec].iov_base = invecs[invec].iov_base;
+ totlen += outvecs[outvec].iov_len = invecs[invec].iov_len;
+ if (PAGE_DIV(totlen) != PAGE_DIV(old_totlen)) {
+ splitvec = outvec;
+ split_ofs = outvecs[outvec].iov_len - PAGE_MOD(totlen);
+ old_totlen = totlen;
+ }
+ }
+
+ /* Now the outvecs array holds all the remaining data to write */
+ /* Up to splitvec,split_ofs is to be written immediately. The rest
+ goes into the (now-empty) wbuf */
+
+ if (splitvec != -1) {
+ uint32_t remainder;
+
+ remainder = outvecs[splitvec].iov_len - split_ofs;
+ outvecs[splitvec].iov_len = split_ofs;
+
+ /* We did cross a page boundary, so we write some now */
+ if (jffs2_ebh_oob(c))
+ ret = c->mtd->writev_ecc(c->mtd, outvecs, splitvec+1, outvec_to, &wbuf_retlen, NULL, c->oobinfo);
+ else
+ ret = jffs2_flash_direct_writev(c, outvecs, splitvec+1, outvec_to, &wbuf_retlen);
+
+ if (ret < 0 || wbuf_retlen != PAGE_DIV(totlen)) {
+ /* At this point we have no problem,
+ c->wbuf is empty. However refile nextblock to avoid
+ writing again to same address.
+ */
+ struct jffs2_eraseblock *jeb;
+
+ spin_lock(&c->erase_completion_lock);
+
+ jeb = c->blocks[outvec_to / c->sector_size];
+ jffs2_block_refile(c, jeb, REFILE_ANYWAY);
+
+ *retlen = 0;
+ spin_unlock(&c->erase_completion_lock);
+ goto exit;
+ }
+
+ donelen += wbuf_retlen;
+ c->wbuf_ofs = PAGE_DIV(outvec_to) + PAGE_DIV(totlen);
+
+ if (remainder) {
+ outvecs[splitvec].iov_base += split_ofs;
+ outvecs[splitvec].iov_len = remainder;
+ } else {
+ splitvec++;
+ }
+
+ } else {
+ splitvec = 0;
+ }
+
+ /* Now splitvec points to the start of the bits we have to copy
+ into the wbuf */
+ wbuf_ptr = c->wbuf;
+
+ for ( ; splitvec < outvec; splitvec++) {
+ /* Don't copy the wbuf into itself */
+ if (outvecs[splitvec].iov_base == c->wbuf)
+ continue;
+ memcpy(wbuf_ptr, outvecs[splitvec].iov_base, outvecs[splitvec].iov_len);
+ wbuf_ptr += outvecs[splitvec].iov_len;
+ donelen += outvecs[splitvec].iov_len;
+ }
+ c->wbuf_len = wbuf_ptr - c->wbuf;
+
+ /* If there's a remainder in the wbuf and it's a non-GC write,
+ remember that the wbuf affects this ino */
+alldone:
+ *retlen = donelen;
+
+ if (jffs2_sum_active()) {
+ int res = jffs2_sum_add_kvec(c, invecs, count, (uint32_t) to);
+ if (res)
+ return res;
+ }
+
+ if (c->wbuf_len && ino)
+ jffs2_wbuf_dirties_inode(c, ino);
+
+ ret = 0;
+
+exit:
+ up_write(&c->wbuf_sem);
+ return ret;
+}
+
+/*
+ * This is the entry for flash write.
+ * Check, if we work on NAND FLASH, if so build an kvec and write it via vritev
+*/
+int jffs2_flash_write(struct jffs2_sb_info *c, loff_t ofs, size_t len, size_t *retlen, const u_char *buf)
+{
+ struct kvec vecs[1];
+
+ if (!jffs2_is_writebuffered(c))
+ return jffs2_flash_direct_write(c, ofs, len, retlen, buf);
+
+ vecs[0].iov_base = (unsigned char *) buf;
+ vecs[0].iov_len = len;
+ return jffs2_flash_writev(c, vecs, 1, ofs, retlen, 0);
+}
+
+/*
+ Handle readback from writebuffer and ECC failure return
+*/
+int jffs2_flash_read(struct jffs2_sb_info *c, loff_t ofs, size_t len, size_t *retlen, u_char *buf)
+{
+ loff_t orbf = 0, owbf = 0, lwbf = 0;
+ int ret;
+
+ if (!jffs2_is_writebuffered(c))
+ return c->mtd->read(c->mtd, ofs, len, retlen, buf);
+
+ /* Read flash */
+ down_read(&c->wbuf_sem);
+ if (jffs2_ebh_oob(c))
+ ret = c->mtd->read_ecc(c->mtd, ofs, len, retlen, buf, NULL, c->oobinfo);
+ else
+ ret = c->mtd->read(c->mtd, ofs, len, retlen, buf);
+
+ if ( (ret == -EBADMSG) && (*retlen == len) ) {
+ printk(KERN_WARNING "mtd->read(0x%zx bytes from 0x%llx) returned ECC error\n",
+ len, ofs);
+ /*
+ * We have the raw data without ECC correction in the buffer, maybe
+ * we are lucky and all data or parts are correct. We check the node.
+ * If data are corrupted node check will sort it out.
+ * We keep this block, it will fail on write or erase and the we
+ * mark it bad. Or should we do that now? But we should give him a chance.
+ * Maybe we had a system crash or power loss before the ecc write or
+ * a erase was completed.
+ * So we return success. :)
+ */
+ ret = 0;
+ }
+
+ /* if no writebuffer available or write buffer empty, return */
+ if (!c->wbuf_pagesize || !c->wbuf_len)
+ goto exit;
+
+ /* if we read in a different block, return */
+ if (SECTOR_ADDR(ofs) != SECTOR_ADDR(c->wbuf_ofs))
+ goto exit;
+
+ if (ofs >= c->wbuf_ofs) {
+ owbf = (ofs - c->wbuf_ofs); /* offset in write buffer */
+ if (owbf > c->wbuf_len) /* is read beyond write buffer ? */
+ goto exit;
+ lwbf = c->wbuf_len - owbf; /* number of bytes to copy */
+ if (lwbf > len)
+ lwbf = len;
+ } else {
+ orbf = (c->wbuf_ofs - ofs); /* offset in read buffer */
+ if (orbf > len) /* is write beyond write buffer ? */
+ goto exit;
+ lwbf = len - orbf; /* number of bytes to copy */
+ if (lwbf > c->wbuf_len)
+ lwbf = c->wbuf_len;
+ }
+ if (lwbf > 0)
+ memcpy(buf+orbf,c->wbuf+owbf,lwbf);
+
+exit:
+ up_read(&c->wbuf_sem);
+ return ret;
+}
+
+/*
+ * Check if jffs2_flash_read was successful
+ */
+int jffs2_flash_read_safe(struct jffs2_sb_info *c, uint32_t ofs, int len, u_char *buf)
+{
+ size_t retlen;
+ int err = jffs2_flash_read(c, ofs, len, &retlen, buf);
+
+ /* did the read succeed? */
+ if (unlikely(err)) {
+ JFFS2_ERROR("can not read %d bytes from 0x%08x, error code: %d.\n", len, ofs, err);
+ return -EIO;
+ }
+
+ /* did we read all? */
+ if (unlikely(retlen != len)) {
+ JFFS2_ERROR("short read at 0x%08x: %d instead of %d.\n", ofs, retlen, len);
+ return -EIO;
+ }
+ return 0;
+}
+
+/*
+ * Check, if the out of band area is empty
+ */
+
+int jffs2_check_oob_empty( struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, uint32_t data_len)
+{
+ size_t offset, retlen;
+ uint32_t i = 0, j, oob_nr;
+ unsigned char *buf;
+ int oob_size, ret;
+
+ offset = jeb->offset;
+ oob_size = c->mtd->oobsize;
+ oob_nr = (data_len+c->fsdata_len-1)/c->fsdata_len;
+ if (oob_nr < 4) oob_nr = 4;
+ buf = kmalloc(oob_size * oob_nr, GFP_KERNEL);
+ ret = c->mtd->read_oob(c->mtd, offset, oob_size * oob_nr, &retlen, buf);
+
+ for (i=0; i<oob_nr; i++) {
+ for (j=0; j<oob_size; j++) {
+ if (data_len && j>=c->fsdata_pos && j<c->fsdata_pos + c->fsdata_len) {
+ data_len--;
+ continue;
+ }
+ if (buf[i*oob_size+j] != 0xFF) {
+ ret = 1;
+ goto out;
+ }
+ }
+ }
+
+out:
+ kfree(buf);
+ return ret;
+}
+
+/*
+* Scan for a valid cleanmarker and for bad blocks
+* For virtual blocks (concatenated physical blocks) check the cleanmarker
+* only in the first page of the first physical block, but scan for bad blocks in all
+* physical blocks
+*/
+int jffs2_check_nand_cleanmarker_ebh (struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, uint32_t *data_len)
+{
+ size_t offset, retlen;
+ int oob_size;
+ uint32_t oob_nr, total_len;
+ unsigned char *buf;
+ int ret;
+ struct jffs2_unknown_node *n, un;
+ struct jffs2_raw_ebh eh;
+ uint32_t read_in = 0, i = 0, copy_len, node_crc;
+
+ offset = jeb->offset;
+ *data_len = 0;
+
+ if (c->mtd->block_isbad (c->mtd, offset)) {
+ D1 (printk (KERN_WARNING "jffs2_check_nand_cleanmarker_ebh(): Bad block at %08x\n", jeb->offset));
+ return 2;
+ }
+
+ oob_size = c->mtd->oobsize;
+ oob_nr = (sizeof(struct jffs2_raw_ebh)+c->fsdata_len-1)/c->fsdata_len;
+ total_len = oob_size * oob_nr;
+
+ buf = kmalloc(total_len, GFP_KERNEL);
+ if (!buf) {
+ return -ENOMEM;
+ }
+ ret = c->mtd->read_oob(c->mtd, offset, total_len, &retlen, buf);
+ if (ret) {
+ D1 (printk (KERN_WARNING "jffs2_check_nand_cleanmarker_ebh(): Read OOB failed %d for block at %08x\n", ret, jeb->offset));
+ goto out;
+ }
+ if (retlen < total_len) {
+ D1 (printk (KERN_WARNING "jffs2_check_nand_cleanmarker_ebh(): Read OOB return short read (%zd bytes not %d) for block at %08x\n", retlen, total_len, jeb->offset));
+ ret = -EIO;
+ goto out;
+ }
+
+ i = 0;
+ read_in = 0;
+ while (read_in < sizeof(struct jffs2_unknown_node)) {
+ copy_len = min_t(uint32_t, c->fsdata_len, sizeof(struct jffs2_unknown_node) - read_in);
+ memcpy((unsigned char *)&un + read_in, &buf[oob_size*i + c->fsdata_pos], copy_len);
+ read_in += copy_len;
+ i++;
+ }
+ n = &un;
+
+ if (je16_to_cpu(n->magic) != JFFS2_MAGIC_BITMASK) {
+ D1 (printk(KERN_WARNING "jffs2_check_nand_cleanmarker_ebh(): Cleanmarker node not detected in block at %08x\n", jeb->offset));
+ ret = 1;
+ goto out;
+ }
+
+ if (je16_to_cpu(n->nodetype) == JFFS2_NODETYPE_CLEANMARKER) {
+ if (je32_to_cpu(n->totlen) == 8) {
+ *data_len = 8;
+ ret = 0;
+ } else {
+ ret = 1;
+ }
+ goto out;
+ }else if (je16_to_cpu(n->nodetype) == JFFS2_NODETYPE_ERASEBLOCK_HEADER) {
+ /* Read the scattered data(in buf[]) into struct jffs2_raw_ebh */
+ i = 0;
+ read_in = 0;
+ while (read_in < sizeof(struct jffs2_raw_ebh)) {
+ copy_len = min_t(uint32_t, c->fsdata_len, sizeof(struct jffs2_raw_ebh) - read_in);
+ memcpy((unsigned char *)&eh + read_in, &buf[oob_size*i + c->fsdata_pos], copy_len);
+ read_in += copy_len;
+ i++;
+ }
+
+ node_crc = crc32(0, &eh + sizeof(struct jffs2_unknown_node) + 4, sizeof(struct jffs2_raw_ebh) - sizeof(struct jffs2_unknown_node) - 4);
+ if (node_crc != je32_to_cpu(eh.node_crc)) {
+ ret = 1;
+ goto out;
+ }
+
+ if ((JFFS2_EBH_INCOMPAT_FSET | eh.incompat_fset) != JFFS2_EBH_INCOMPAT_FSET) {
+ printk(KERN_NOTICE "The incompat_fset of fs image EBH %d execeed the incompat_fset \
+ of JFFS2 module %d. Reject to mount.\n", eh.incompat_fset, JFFS2_EBH_INCOMPAT_FSET);
+ ret = -EINVAL;
+ goto out;
+ }
+ if ((JFFS2_EBH_ROCOMPAT_FSET | eh.rocompat_fset) != JFFS2_EBH_ROCOMPAT_FSET) {
+ printk(KERN_NOTICE "Read-only compatible EBH feature found at offset 0x%08x\n ", jeb->offset);
+ if (!(jffs2_is_readonly(c))) {
+ ret = -EROFS;
+ goto out;
+ }
+ }
+
+ EBFLAGS_SET_EBH(jeb);
+ jeb->erase_count = je32_to_cpu(eh.erase_count);
+ record_erase_count(c, jeb);
+ *data_len = je32_to_cpu(eh.totlen);
+ ret = 0;
+ }else {
+ ret = 1;
+ }
+out:
+ kfree(buf);
+ return ret;
+}
+
+int jffs2_write_nand_ebh(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb)
+{
+ uint32_t i = 0, written = 0, write_len = 0;
+ int ret;
+ size_t retlen;
+ struct jffs2_raw_ebh ebh = {
+ .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
+ .nodetype = cpu_to_je16(JFFS2_NODETYPE_ERASEBLOCK_HEADER),
+ .totlen = cpu_to_je32(sizeof(struct jffs2_raw_ebh)),
+ .reserved = 0,
+ .compat_fset = JFFS2_EBH_COMPAT_FSET,
+ .incompat_fset = JFFS2_EBH_INCOMPAT_FSET,
+ .rocompat_fset = JFFS2_EBH_ROCOMPAT_FSET,
+ };
+
+ ebh.erase_count = cpu_to_je32(jeb->erase_count);
+
+ ebh.hdr_crc = cpu_to_je32(crc32(0, &ebh, sizeof(struct jffs2_unknown_node)-4));
+ ebh.node_crc = cpu_to_je32(crc32(0, (unsigned char *)&ebh + sizeof(struct jffs2_unknown_node) + 4,
+ sizeof(struct jffs2_raw_ebh) - sizeof(struct jffs2_unknown_node) - 4));
+
+ while (written < sizeof(struct jffs2_raw_ebh)) {
+ write_len = min_t(uint32_t, c->fsdata_len, sizeof(struct jffs2_raw_ebh) - written);
+ ret = jffs2_flash_write_oob(c, jeb->offset + c->mtd->oobblock*i + c->fsdata_pos,
+ write_len, &retlen, (unsigned char *)&ebh + written);
+ if (ret || retlen != write_len) {
+ D1(printk(KERN_WARNING "jffs2_write_nand_ebh(): Write failed for block at %08x: error %d\n", jeb->offset, ret));
+ return ret;
+ }
+ written += write_len;
+ i++;
+ }
+ return 0;
+}
+
+/*
+ * On NAND we try to mark this block bad. If the block was erased more
+ * than MAX_ERASE_FAILURES we mark it finaly bad.
+ * Don't care about failures. This block remains on the erase-pending
+ * or badblock list as long as nobody manipulates the flash with
+ * a bootloader or something like that.
+ */
+
+int jffs2_write_nand_badblock(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, uint32_t bad_offset)
+{
+ int ret;
+
+ /* if the count is < max, we try to write the counter to the 2nd page oob area */
+ if( ++jeb->bad_count < MAX_ERASE_FAILURES)
+ return 0;
+
+ if (!c->mtd->block_markbad)
+ return 1; // What else can we do?
+
+ D1(printk(KERN_WARNING "jffs2_write_nand_badblock(): Marking bad block at %08x\n", bad_offset));
+ ret = c->mtd->block_markbad(c->mtd, bad_offset);
+
+ if (ret) {
+ D1(printk(KERN_WARNING "jffs2_write_nand_badblock(): Write failed for block at %08x: error %d\n", jeb->offset, ret));
+ return ret;
+ }
+ return 1;
+}
+
+#define NAND_JFFS2_OOB16_FSDALEN 8
+
+static struct nand_oobinfo jffs2_oobinfo_docecc = {
+ .useecc = MTD_NANDECC_PLACE,
+ .eccbytes = 6,
+ .eccpos = {0,1,2,3,4,5}
+};
+
+
+static int jffs2_nand_set_oobinfo(struct jffs2_sb_info *c)
+{
+ struct nand_oobinfo *oinfo = &c->mtd->oobinfo;
+
+ /* Do this only, if we have an oob buffer */
+ if (!c->mtd->oobsize)
+ return 0;
+
+ /* Cleanmarker is out-of-band, so inline size zero */
+ c->cleanmarker_size = 0;
+ c->ebh_size = 0;
+
+ /* Should we use autoplacement ? */
+ if (oinfo && oinfo->useecc == MTD_NANDECC_AUTOPLACE) {
+ D1(printk(KERN_DEBUG "JFFS2 using autoplace on NAND\n"));
+ /* Get the position of the free bytes */
+ if (!oinfo->oobfree[0][1]) {
+ printk (KERN_WARNING "jffs2_nand_set_oobinfo(): Eeep. Autoplacement selected and no empty space in oob\n");
+ return -ENOSPC;
+ }
+ c->fsdata_pos = oinfo->oobfree[0][0];
+ c->fsdata_len = oinfo->oobfree[0][1];
+ } else {
+ /* This is just a legacy fallback and should go away soon */
+ switch(c->mtd->ecctype) {
+ case MTD_ECC_RS_DiskOnChip:
+ printk(KERN_WARNING "JFFS2 using DiskOnChip hardware ECC without autoplacement. Fix it!\n");
+ c->oobinfo = &jffs2_oobinfo_docecc;
+ c->fsdata_pos = 6;
+ c->fsdata_len = NAND_JFFS2_OOB16_FSDALEN;
+ c->badblock_pos = 15;
+ break;
+
+ default:
+ D1(printk(KERN_DEBUG "JFFS2 on NAND. No autoplacment info found\n"));
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
+/* To check if the OOB area has enough space for eraseblock header */
+static int jffs2_nand_check_oobspace_for_ebh(struct jffs2_sb_info *c)
+{
+ uint32_t pages_per_eraseblock, available_oob_space;
+
+ pages_per_eraseblock = c->sector_size/c->mtd->oobblock;
+ available_oob_space = c->fsdata_len * pages_per_eraseblock;
+ if (available_oob_space < sizeof(struct jffs2_raw_ebh)) {
+ printk(KERN_NOTICE "The OOB area(%d) is not big enough to hold eraseblock_header(%d), reject to mount.\n",
+ available_oob_space, sizeof(struct jffs2_raw_ebh));
+ return -EINVAL;
+ }
+ return 0;
+}
+
+int jffs2_nand_flash_setup(struct jffs2_sb_info *c)
+{
+ int res;
+
+ /* Initialise write buffer */
+ init_rwsem(&c->wbuf_sem);
+ c->wbuf_pagesize = c->mtd->oobblock;
+ c->wbuf_ofs = 0xFFFFFFFF;
+
+ c->wbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
+ if (!c->wbuf)
+ return -ENOMEM;
+
+ res = jffs2_nand_set_oobinfo(c);
+ if (res) {
+ return res;
+ }
+
+ res = jffs2_nand_check_oobspace_for_ebh(c);
+
+#ifdef BREAKME
+ if (!brokenbuf)
+ brokenbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
+ if (!brokenbuf) {
+ kfree(c->wbuf);
+ return -ENOMEM;
+ }
+ memset(brokenbuf, 0xdb, c->wbuf_pagesize);
+#endif
+ return res;
+}
+
+void jffs2_nand_flash_cleanup(struct jffs2_sb_info *c)
+{
+ kfree(c->wbuf);
+}
+
+int jffs2_dataflash_setup(struct jffs2_sb_info *c) {
+ c->cleanmarker_size = 0; /* No cleanmarkers needed */
+
+ /* Initialize write buffer */
+ init_rwsem(&c->wbuf_sem);
+
+
+ c->wbuf_pagesize = c->mtd->erasesize;
+
+ /* Find a suitable c->sector_size
+ * - Not too much sectors
+ * - Sectors have to be at least 4 K + some bytes
+ * - All known dataflashes have erase sizes of 528 or 1056
+ * - we take at least 8 eraseblocks and want to have at least 8K size
+ * - The concatenation should be a power of 2
+ */
+
+ c->sector_size = 8 * c->mtd->erasesize;
+
+ while (c->sector_size < 8192) {
+ c->sector_size *= 2;
+ }
+
+ /* It may be necessary to adjust the flash size */
+ c->flash_size = c->mtd->size;
+
+ if ((c->flash_size % c->sector_size) != 0) {
+ c->flash_size = (c->flash_size / c->sector_size) * c->sector_size;
+ printk(KERN_WARNING "JFFS2 flash size adjusted to %dKiB\n", c->flash_size);
+ };
+
+ c->wbuf_ofs = 0xFFFFFFFF;
+ c->wbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
+ if (!c->wbuf)
+ return -ENOMEM;
+
+ printk(KERN_INFO "JFFS2 write-buffering enabled buffer (%d) erasesize (%d)\n", c->wbuf_pagesize, c->sector_size);
+
+ return 0;
+}
+
+void jffs2_dataflash_cleanup(struct jffs2_sb_info *c) {
+ kfree(c->wbuf);
+}
+
+int jffs2_nor_ecc_flash_setup(struct jffs2_sb_info *c) {
+ /* Cleanmarker is actually larger on the flashes */
+ c->cleanmarker_size = 16;
+ c->ebh_size = 24;
+
+ /* Initialize write buffer */
+ init_rwsem(&c->wbuf_sem);
+ c->wbuf_pagesize = c->mtd->eccsize;
+ c->wbuf_ofs = 0xFFFFFFFF;
+
+ c->wbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
+ if (!c->wbuf)
+ return -ENOMEM;
+
+ return 0;
+}
+
+void jffs2_nor_ecc_flash_cleanup(struct jffs2_sb_info *c) {
+ kfree(c->wbuf);
+}
+
+int jffs2_nor_wbuf_flash_setup(struct jffs2_sb_info *c) {
+ /* Cleanmarker currently occupies a whole programming region */
+ c->cleanmarker_size = MTD_PROGREGION_SIZE(c->mtd);
+ c->ebh_size = MTD_PROGREGION_SIZE(c->mtd);
+
+ /* Initialize write buffer */
+ init_rwsem(&c->wbuf_sem);
+ c->wbuf_pagesize = MTD_PROGREGION_SIZE(c->mtd);
+ c->wbuf_ofs = 0xFFFFFFFF;
+
+ c->wbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
+ if (!c->wbuf)
+ return -ENOMEM;
+
+ return 0;
+}
+
+void jffs2_nor_wbuf_flash_cleanup(struct jffs2_sb_info *c) {
+ kfree(c->wbuf);
+}
+
+int jffs2_block_mtd_setup(struct jffs2_sb_info *c) {
+ /* Use a 512-byte sector as wbuf region, for performance */
+ /* Cleanmarker currently occupies a whole programming region */
+ c->cleanmarker_size = 512;
+ c->ebh_size = 512;
+
+ /* Initialize write buffer */
+ init_rwsem(&c->wbuf_sem);
+ c->wbuf_pagesize = 512;
+ c->wbuf_ofs = 0xFFFFFFFF;
+
+ c->wbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
+ if (!c->wbuf)
+ return -ENOMEM;
+
+ return 0;
+}
+
+void jffs2_block_mtd_cleanup(struct jffs2_sb_info *c) {
+ kfree(c->wbuf);
+}
diff --git a/linux-2.4.x/fs/jffs2/wear_leveling.c b/linux-2.4.x/fs/jffs2/wear_leveling.c
new file mode 100644
index 0000000..1eeb713
--- /dev/null
+++ b/linux-2.4.x/fs/jffs2/wear_leveling.c
@@ -0,0 +1,107 @@
+/*
+ * JFFS2 -- Journalling Flash File System, Version 2.
+ *
+ * Copyright (C) 2005 Zhao Forrest <forrest.zhao@intel.com>
+ *
+ * For licensing information, see the file 'LICENCE' in this directory.
+ *
+ */
+
+#include "nodelist.h"
+
+void jffs2_add_to_hash_table(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, uint8_t flag)
+{
+ struct jffs2_blocks_bucket *hash_table;
+ uint32_t index, *current_index_p;
+
+ if (flag == 1) {
+ hash_table = c->used_blocks;
+ current_index_p = &(c->used_blocks_current_index);
+ }else if (flag == 2) {
+ hash_table = c->free_blocks;
+ current_index_p = &(c->free_blocks_current_index);
+ }else {
+ return;
+ }
+
+ index = (jeb->erase_count >> BUCKET_RANGE_BIT_LEN);
+ if (index >= HASH_SIZE) {
+ return;
+ }
+ if (index < *current_index_p) {
+ *current_index_p = index;
+ }
+ hash_table[index].number++;
+ list_add_tail(&jeb->hash_list, &(hash_table[index].chain));
+ return;
+}
+
+void jffs2_remove_from_hash_table(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, uint8_t flag)
+{
+ struct jffs2_blocks_bucket *hash_table;
+ uint32_t index, *current_index_p, i;
+
+ if (flag == 1) {
+ hash_table = c->used_blocks;
+ current_index_p = &(c->used_blocks_current_index);
+ }else if (flag == 2) {
+ hash_table = c->free_blocks;
+ current_index_p = &(c->free_blocks_current_index);
+ }else {
+ return;
+ }
+
+ index = (jeb->erase_count >> BUCKET_RANGE_BIT_LEN);
+ if (index >= HASH_SIZE) {
+ return;
+ }
+ hash_table[index].number--;
+ list_del(&jeb->hash_list);
+
+ if (hash_table[index].number == 0) {
+ for (i=index+1; i<HASH_SIZE; i++) {
+ if (hash_table[i].number != 0) {
+ *current_index_p = i;
+ break;
+ }
+ }
+ if (i == HASH_SIZE) {
+ *current_index_p = HASH_SIZE;
+ }
+ }
+ return;
+}
+
+struct jffs2_eraseblock *jffs2_get_free_block(struct jffs2_sb_info *c)
+{
+ struct list_head *next;
+ struct jffs2_eraseblock *jeb;
+
+ if (c->free_blocks_current_index == HASH_SIZE) {
+ return NULL;
+ }
+ next = c->free_blocks[c->free_blocks_current_index].chain.next;
+ jeb = list_entry(next, struct jffs2_eraseblock, hash_list);
+ list_del(&jeb->list);
+ jffs2_remove_from_hash_table(c, jeb, 2);
+ c->nr_free_blocks--;
+
+ return jeb;
+}
+
+struct jffs2_eraseblock *jffs2_get_used_block(struct jffs2_sb_info *c)
+{
+ struct list_head *next;
+ struct jffs2_eraseblock *jeb;
+
+ if (c->used_blocks_current_index == HASH_SIZE) {
+ return NULL;
+ }
+ next = c->used_blocks[c->used_blocks_current_index].chain.next;
+ jeb = list_entry(next, struct jffs2_eraseblock, hash_list);
+ list_del(&jeb->list);
+ jffs2_remove_from_hash_table(c, jeb, 1);
+
+ return jeb;
+}
+
diff --git a/linux-2.4.x/fs/jffs2/write.c b/linux-2.4.x/fs/jffs2/write.c
index 9685ef8..75d7d97 100644
--- a/linux-2.4.x/fs/jffs2/write.c
+++ b/linux-2.4.x/fs/jffs2/write.c
@@ -1,188 +1,74 @@
/*
* JFFS2 -- Journalling Flash File System, Version 2.
*
- * Copyright (C) 2001 Red Hat, Inc.
+ * Copyright (C) 2001-2003 Red Hat, Inc.
*
- * Created by David Woodhouse <dwmw2@cambridge.redhat.com>
+ * Created by David Woodhouse <dwmw2@infradead.org>
*
- * The original JFFS, from which the design for JFFS2 was derived,
- * was designed and implemented by Axis Communications AB.
+ * For licensing information, see the file 'LICENCE' in this directory.
*
- * The contents of this file are subject to the Red Hat eCos Public
- * License Version 1.1 (the "Licence"); you may not use this file
- * except in compliance with the Licence. You may obtain a copy of
- * the Licence at http://www.redhat.com/
- *
- * Software distributed under the Licence is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied.
- * See the Licence for the specific language governing rights and
- * limitations under the Licence.
- *
- * The Original Code is JFFS2 - Journalling Flash File System, version 2
- *
- * Alternatively, the contents of this file may be used under the
- * terms of the GNU General Public License version 2 (the "GPL"), in
- * which case the provisions of the GPL are applicable instead of the
- * above. If you wish to allow the use of your version of this file
- * only under the terms of the GPL and not to allow others to use your
- * version of this file under the RHEPL, indicate your decision by
- * deleting the provisions above and replace them with the notice and
- * other provisions required by the GPL. If you do not delete the
- * provisions above, a recipient may use your version of this file
- * under either the RHEPL or the GPL.
- *
- * $Id: write.c,v 1.30.2.1 2002/08/08 08:36:31 dwmw2 Exp $
+ * $Id: write.c,v 1.98 2005/11/11 08:51:39 forrest Exp $
*
*/
#include <linux/kernel.h>
#include <linux/fs.h>
-#include <linux/jffs2.h>
+#include <linux/crc32.h>
+#include <linux/slab.h>
+#include <linux/pagemap.h>
#include <linux/mtd/mtd.h>
#include "nodelist.h"
-#include "crc32.h"
+#include "compr.h"
+
-/* jffs2_new_inode: allocate a new inode and inocache, add it to the hash,
- fill in the raw_inode while you're at it. */
-struct inode *jffs2_new_inode (struct inode *dir_i, int mode, struct jffs2_raw_inode *ri)
+int jffs2_do_new_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, uint32_t mode, struct jffs2_raw_inode *ri)
{
- struct inode *inode;
- struct super_block *sb = dir_i->i_sb;
struct jffs2_inode_cache *ic;
- struct jffs2_sb_info *c;
- struct jffs2_inode_info *f;
-
- D1(printk(KERN_DEBUG "jffs2_new_inode(): dir_i %ld, mode 0x%x\n", dir_i->i_ino, mode));
-
- c = JFFS2_SB_INFO(sb);
- memset(ri, 0, sizeof(*ri));
ic = jffs2_alloc_inode_cache();
if (!ic) {
- return ERR_PTR(-ENOMEM);
- }
- memset(ic, 0, sizeof(*ic));
-
- inode = new_inode(sb);
-
- if (!inode) {
- jffs2_free_inode_cache(ic);
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
}
- /* Alloc jffs2_inode_info when that's split in 2.5 */
+ memset(ic, 0, sizeof(*ic));
- f = JFFS2_INODE_INFO(inode);
- memset(f, 0, sizeof(*f));
- init_MUTEX_LOCKED(&f->sem);
f->inocache = ic;
- inode->i_nlink = f->inocache->nlink = 1;
+ f->inocache->nlink = 1;
f->inocache->nodes = (struct jffs2_raw_node_ref *)f->inocache;
- f->inocache->ino = ri->ino = inode->i_ino = ++c->highest_ino;
- D1(printk(KERN_DEBUG "jffs2_new_inode(): Assigned ino# %d\n", ri->ino));
- jffs2_add_ino_cache(c, f->inocache);
-
- ri->magic = JFFS2_MAGIC_BITMASK;
- ri->nodetype = JFFS2_NODETYPE_INODE;
- ri->totlen = PAD(sizeof(*ri));
- ri->hdr_crc = crc32(0, ri, sizeof(struct jffs2_unknown_node)-4);
- ri->mode = mode;
- f->highest_version = ri->version = 1;
- ri->uid = current->fsuid;
- if (dir_i->i_mode & S_ISGID) {
- ri->gid = dir_i->i_gid;
- if (S_ISDIR(mode))
- ri->mode |= S_ISGID;
- } else {
- ri->gid = current->fsgid;
- }
- inode->i_mode = ri->mode;
- inode->i_gid = ri->gid;
- inode->i_uid = ri->uid;
- inode->i_atime = inode->i_ctime = inode->i_mtime =
- ri->atime = ri->mtime = ri->ctime = CURRENT_TIME;
- inode->i_blksize = PAGE_SIZE;
- inode->i_blocks = 0;
- inode->i_size = 0;
-
- insert_inode_hash(inode);
-
- return inode;
-}
+ f->inocache->state = INO_STATE_PRESENT;
-/* This ought to be in core MTD code. All registered MTD devices
- without writev should have this put in place. Bug the MTD
- maintainer */
-static int mtd_fake_writev(struct mtd_info *mtd, const struct iovec *vecs, unsigned long count, loff_t to, size_t *retlen)
-{
- unsigned long i;
- size_t totlen = 0, thislen;
- int ret = 0;
- for (i=0; i<count; i++) {
- ret = mtd->write(mtd, to, vecs[i].iov_len, &thislen, vecs[i].iov_base);
- totlen += thislen;
- if (ret || thislen != vecs[i].iov_len)
- break;
- to += vecs[i].iov_len;
- }
- if (retlen)
- *retlen = totlen;
- return ret;
-}
+ jffs2_add_ino_cache(c, f->inocache);
+ D1(printk(KERN_DEBUG "jffs2_do_new_inode(): Assigned ino# %d\n", f->inocache->ino));
+ ri->ino = cpu_to_je32(f->inocache->ino);
+ ri->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
+ ri->nodetype = cpu_to_je16(JFFS2_NODETYPE_INODE);
+ ri->totlen = cpu_to_je32(PAD(sizeof(*ri)));
+ ri->hdr_crc = cpu_to_je32(crc32(0, ri, sizeof(struct jffs2_unknown_node)-4));
+ ri->mode = cpu_to_jemode(mode);
-static inline int mtd_writev(struct mtd_info *mtd, const struct iovec *vecs, unsigned long count, loff_t to, size_t *retlen)
-{
- if (mtd->writev)
- return mtd->writev(mtd,vecs,count,to,retlen);
- else
- return mtd_fake_writev(mtd, vecs, count, to, retlen);
-}
+ f->highest_version = 1;
+ ri->version = cpu_to_je32(f->highest_version);
-static void writecheck(struct mtd_info *mtd, __u32 ofs)
-{
- unsigned char buf[16];
- ssize_t retlen;
- int ret, i;
-
- ret = mtd->read(mtd, ofs, 16, &retlen, buf);
- if (ret && retlen != 16) {
- D1(printk(KERN_DEBUG "read failed or short in writecheck(). ret %d, retlen %d\n", ret, retlen));
- return;
- }
- ret = 0;
- for (i=0; i<16; i++) {
- if (buf[i] != 0xff)
- ret = 1;
- }
- if (ret) {
- printk(KERN_WARNING "ARGH. About to write node to 0x%08x on flash, but there's data already there:\n", ofs);
- printk(KERN_WARNING "0x%08x: %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x\n",
- ofs,
- buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], buf[7],
- buf[8], buf[9], buf[10], buf[11], buf[12], buf[13], buf[14], buf[15]);
- }
+ return 0;
}
-
-
-
-/* jffs2_write_dnode - given a raw_inode, allocate a full_dnode for it,
+/* jffs2_write_dnode - given a raw_inode, allocate a full_dnode for it,
write it to the flash, link it into the existing inode/fragment list */
-struct jffs2_full_dnode *jffs2_write_dnode(struct inode *inode, struct jffs2_raw_inode *ri, const unsigned char *data, __u32 datalen, __u32 flash_ofs, __u32 *writelen)
+struct jffs2_full_dnode *jffs2_write_dnode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, struct jffs2_raw_inode *ri, const unsigned char *data, uint32_t datalen, uint32_t flash_ofs, int alloc_mode)
{
- struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
- struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
struct jffs2_raw_node_ref *raw;
struct jffs2_full_dnode *fn;
- ssize_t retlen;
- struct iovec vecs[2];
+ size_t retlen;
+ struct kvec vecs[2];
int ret;
+ int retried = 0;
+ unsigned long cnt = 2;
- D1(if(ri->hdr_crc != crc32(0, ri, sizeof(struct jffs2_unknown_node)-4)) {
+ D1(if(je32_to_cpu(ri->hdr_crc) != crc32(0, ri, sizeof(struct jffs2_unknown_node)-4)) {
printk(KERN_CRIT "Eep. CRC not correct in jffs2_write_dnode()\n");
BUG();
}
@@ -192,85 +78,154 @@ struct jffs2_full_dnode *jffs2_write_dnode(struct inode *inode, struct jffs2_raw
vecs[1].iov_base = (unsigned char *)data;
vecs[1].iov_len = datalen;
- writecheck(c->mtd, flash_ofs);
+ jffs2_dbg_prewrite_paranoia_check(c, flash_ofs, vecs[0].iov_len + vecs[1].iov_len);
- if (ri->totlen != sizeof(*ri) + datalen) {
- printk(KERN_WARNING "jffs2_write_dnode: ri->totlen (0x%08x) != sizeof(*ri) (0x%08x) + datalen (0x%08x)\n", ri->totlen, sizeof(*ri), datalen);
+ if (je32_to_cpu(ri->totlen) != sizeof(*ri) + datalen) {
+ printk(KERN_WARNING "jffs2_write_dnode: ri->totlen (0x%08x) != sizeof(*ri) (0x%08zx) + datalen (0x%08x)\n", je32_to_cpu(ri->totlen), sizeof(*ri), datalen);
}
raw = jffs2_alloc_raw_node_ref();
if (!raw)
return ERR_PTR(-ENOMEM);
-
+
fn = jffs2_alloc_full_dnode();
if (!fn) {
jffs2_free_raw_node_ref(raw);
return ERR_PTR(-ENOMEM);
}
- raw->flash_offset = flash_ofs;
- raw->totlen = PAD(ri->totlen);
- raw->next_phys = NULL;
- fn->ofs = ri->offset;
- fn->size = ri->dsize;
+ fn->ofs = je32_to_cpu(ri->offset);
+ fn->size = je32_to_cpu(ri->dsize);
fn->frags = 0;
+
+ /* check number of valid vecs */
+ if (!datalen || !data)
+ cnt = 1;
+ retry:
fn->raw = raw;
- ret = mtd_writev(c->mtd, vecs, 2, flash_ofs, &retlen);
+ raw->flash_offset = flash_ofs;
+ raw->__totlen = PAD(sizeof(*ri)+datalen);
+ raw->next_phys = NULL;
+
+ if ((alloc_mode!=ALLOC_GC) && (je32_to_cpu(ri->version) < f->highest_version)) {
+ BUG_ON(!retried);
+ D1(printk(KERN_DEBUG "jffs2_write_dnode : dnode_version %d, "
+ "highest version %d -> updating dnode\n",
+ je32_to_cpu(ri->version), f->highest_version));
+ ri->version = cpu_to_je32(++f->highest_version);
+ ri->node_crc = cpu_to_je32(crc32(0, ri, sizeof(*ri)-8));
+ }
+
+ ret = jffs2_flash_writev(c, vecs, cnt, flash_ofs, &retlen,
+ (alloc_mode==ALLOC_GC)?0:f->inocache->ino);
+
if (ret || (retlen != sizeof(*ri) + datalen)) {
- printk(KERN_NOTICE "Write of %d bytes at 0x%08x failed. returned %d, retlen %d\n",
+ printk(KERN_NOTICE "Write of %zd bytes at 0x%08x failed. returned %d, retlen %zd\n",
sizeof(*ri)+datalen, flash_ofs, ret, retlen);
+
/* Mark the space as dirtied */
if (retlen) {
/* Doesn't belong to any inode */
raw->next_in_ino = NULL;
- /* Don't change raw->size to match retlen. We may have
+ /* Don't change raw->size to match retlen. We may have
written the node header already, and only the data will
seem corrupted, in which case the scan would skip over
- any node we write before the original intended end of
+ any node we write before the original intended end of
this node */
- jffs2_add_physical_node_ref(c, raw, sizeof(*ri)+datalen, 1);
+ raw->flash_offset |= REF_OBSOLETE;
+ jffs2_add_physical_node_ref(c, raw);
jffs2_mark_node_obsolete(c, raw);
} else {
printk(KERN_NOTICE "Not marking the space at 0x%08x as dirty because the flash driver returned retlen zero\n", raw->flash_offset);
jffs2_free_raw_node_ref(raw);
}
+ if (!retried && alloc_mode != ALLOC_NORETRY && (raw = jffs2_alloc_raw_node_ref())) {
+ /* Try to reallocate space and retry */
+ uint32_t dummy;
+ struct jffs2_eraseblock *jeb = c->blocks[flash_ofs / c->sector_size];
+
+ retried = 1;
+
+ D1(printk(KERN_DEBUG "Retrying failed write.\n"));
+
+ jffs2_dbg_acct_sanity_check(c,jeb);
+ jffs2_dbg_acct_paranoia_check(c, jeb);
+
+ if (alloc_mode == ALLOC_GC) {
+ ret = jffs2_reserve_space_gc(c, sizeof(*ri) + datalen, &flash_ofs,
+ &dummy, JFFS2_SUMMARY_INODE_SIZE);
+ } else {
+ /* Locking pain */
+ up(&f->sem);
+ jffs2_complete_reservation(c);
+ ret = jffs2_reserve_space(c, sizeof(*ri) + datalen, &flash_ofs,
+ &dummy, alloc_mode, JFFS2_SUMMARY_INODE_SIZE);
+ down(&f->sem);
+ }
+
+ if (!ret) {
+ D1(printk(KERN_DEBUG "Allocated space at 0x%08x to retry failed write.\n", flash_ofs));
+
+ jffs2_dbg_acct_sanity_check(c,jeb);
+ jffs2_dbg_acct_paranoia_check(c, jeb);
+
+ goto retry;
+ }
+ D1(printk(KERN_DEBUG "Failed to allocate space to retry failed write: %d!\n", ret));
+ jffs2_free_raw_node_ref(raw);
+ }
/* Release the full_dnode which is now useless, and return */
jffs2_free_full_dnode(fn);
- if (writelen)
- *writelen = retlen;
return ERR_PTR(ret?ret:-EIO);
}
/* Mark the space used */
- jffs2_add_physical_node_ref(c, raw, retlen, 0);
+ /* If node covers at least a whole page, or if it starts at the
+ beginning of a page and runs to the end of the file, or if
+ it's a hole node, mark it REF_PRISTINE, else REF_NORMAL.
+ */
+ if ((je32_to_cpu(ri->dsize) >= PAGE_CACHE_SIZE) ||
+ ( ((je32_to_cpu(ri->offset)&(PAGE_CACHE_SIZE-1))==0) &&
+ (je32_to_cpu(ri->dsize)+je32_to_cpu(ri->offset) == je32_to_cpu(ri->isize)))) {
+ raw->flash_offset |= REF_PRISTINE;
+ } else {
+ raw->flash_offset |= REF_NORMAL;
+ }
+ jffs2_add_physical_node_ref(c, raw);
/* Link into per-inode list */
+ spin_lock(&c->erase_completion_lock);
raw->next_in_ino = f->inocache->nodes;
f->inocache->nodes = raw;
+ spin_unlock(&c->erase_completion_lock);
- D1(printk(KERN_DEBUG "jffs2_write_dnode wrote node at 0x%08x with dsize 0x%x, csize 0x%x, node_crc 0x%08x, data_crc 0x%08x, totlen 0x%08x\n", flash_ofs, ri->dsize, ri->csize, ri->node_crc, ri->data_crc, ri->totlen));
- if (writelen)
- *writelen = retlen;
+ D1(printk(KERN_DEBUG "jffs2_write_dnode wrote node at 0x%08x(%d) with dsize 0x%x, csize 0x%x, node_crc 0x%08x, data_crc 0x%08x, totlen 0x%08x\n",
+ flash_ofs, ref_flags(raw), je32_to_cpu(ri->dsize),
+ je32_to_cpu(ri->csize), je32_to_cpu(ri->node_crc),
+ je32_to_cpu(ri->data_crc), je32_to_cpu(ri->totlen)));
+
+ if (retried) {
+ jffs2_dbg_acct_sanity_check(c,NULL);
+ }
- f->inocache->nodes = raw;
return fn;
}
-struct jffs2_full_dirent *jffs2_write_dirent(struct inode *inode, struct jffs2_raw_dirent *rd, const unsigned char *name, __u32 namelen, __u32 flash_ofs, __u32 *writelen)
+struct jffs2_full_dirent *jffs2_write_dirent(struct jffs2_sb_info *c, struct jffs2_inode_info *f, struct jffs2_raw_dirent *rd, const unsigned char *name, uint32_t namelen, uint32_t flash_ofs, int alloc_mode)
{
- struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
- struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
struct jffs2_raw_node_ref *raw;
struct jffs2_full_dirent *fd;
- ssize_t retlen;
- struct iovec vecs[2];
+ size_t retlen;
+ struct kvec vecs[2];
+ int retried = 0;
int ret;
- D1(printk(KERN_DEBUG "jffs2_write_dirent(ino #%u, name at *0x%p \"%s\"->ino #%u, name_crc 0x%08x)\n", rd->pino, name, name, rd->ino, rd->name_crc));
- writecheck(c->mtd, flash_ofs);
+ D1(printk(KERN_DEBUG "jffs2_write_dirent(ino #%u, name at *0x%p \"%s\"->ino #%u, name_crc 0x%08x)\n",
+ je32_to_cpu(rd->pino), name, name, je32_to_cpu(rd->ino),
+ je32_to_cpu(rd->name_crc)));
- D1(if(rd->hdr_crc != crc32(0, rd, sizeof(struct jffs2_unknown_node)-4)) {
+ D1(if(je32_to_cpu(rd->hdr_crc) != crc32(0, rd, sizeof(struct jffs2_unknown_node)-4)) {
printk(KERN_CRIT "Eep. CRC not correct in jffs2_write_dirent()\n");
BUG();
}
@@ -280,7 +235,9 @@ struct jffs2_full_dirent *jffs2_write_dirent(struct inode *inode, struct jffs2_r
vecs[0].iov_len = sizeof(*rd);
vecs[1].iov_base = (unsigned char *)name;
vecs[1].iov_len = namelen;
-
+
+ jffs2_dbg_prewrite_paranoia_check(c, flash_ofs, vecs[0].iov_len + vecs[1].iov_len);
+
raw = jffs2_alloc_raw_node_ref();
if (!raw)
@@ -291,44 +248,465 @@ struct jffs2_full_dirent *jffs2_write_dirent(struct inode *inode, struct jffs2_r
jffs2_free_raw_node_ref(raw);
return ERR_PTR(-ENOMEM);
}
- raw->flash_offset = flash_ofs;
- raw->totlen = PAD(rd->totlen);
- raw->next_in_ino = f->inocache->nodes;
- f->inocache->nodes = raw;
- raw->next_phys = NULL;
- fd->version = rd->version;
- fd->ino = rd->ino;
+ fd->version = je32_to_cpu(rd->version);
+ fd->ino = je32_to_cpu(rd->ino);
fd->nhash = full_name_hash(name, strlen(name));
fd->type = rd->type;
memcpy(fd->name, name, namelen);
fd->name[namelen]=0;
+
+ retry:
fd->raw = raw;
- ret = mtd_writev(c->mtd, vecs, 2, flash_ofs, &retlen);
- if (ret || (retlen != sizeof(*rd) + namelen)) {
- printk(KERN_NOTICE "Write of %d bytes at 0x%08x failed. returned %d, retlen %d\n",
+ raw->flash_offset = flash_ofs;
+ raw->__totlen = PAD(sizeof(*rd)+namelen);
+ raw->next_phys = NULL;
+
+ if ((alloc_mode!=ALLOC_GC) && (je32_to_cpu(rd->version) < f->highest_version)) {
+ BUG_ON(!retried);
+ D1(printk(KERN_DEBUG "jffs2_write_dirent : dirent_version %d, "
+ "highest version %d -> updating dirent\n",
+ je32_to_cpu(rd->version), f->highest_version));
+ rd->version = cpu_to_je32(++f->highest_version);
+ fd->version = je32_to_cpu(rd->version);
+ rd->node_crc = cpu_to_je32(crc32(0, rd, sizeof(*rd)-8));
+ }
+
+ ret = jffs2_flash_writev(c, vecs, 2, flash_ofs, &retlen,
+ (alloc_mode==ALLOC_GC)?0:je32_to_cpu(rd->pino));
+ if (ret || (retlen != sizeof(*rd) + namelen)) {
+ printk(KERN_NOTICE "Write of %zd bytes at 0x%08x failed. returned %d, retlen %zd\n",
sizeof(*rd)+namelen, flash_ofs, ret, retlen);
/* Mark the space as dirtied */
- if (retlen) {
- jffs2_add_physical_node_ref(c, raw, sizeof(*rd)+namelen, 1);
- jffs2_mark_node_obsolete(c, raw);
+ if (retlen) {
+ raw->next_in_ino = NULL;
+ raw->flash_offset |= REF_OBSOLETE;
+ jffs2_add_physical_node_ref(c, raw);
+ jffs2_mark_node_obsolete(c, raw);
+ } else {
+ printk(KERN_NOTICE "Not marking the space at 0x%08x as dirty because the flash driver returned retlen zero\n", raw->flash_offset);
+ jffs2_free_raw_node_ref(raw);
+ }
+ if (!retried && (raw = jffs2_alloc_raw_node_ref())) {
+ /* Try to reallocate space and retry */
+ uint32_t dummy;
+ struct jffs2_eraseblock *jeb = c->blocks[flash_ofs / c->sector_size];
+
+ retried = 1;
+
+ D1(printk(KERN_DEBUG "Retrying failed write.\n"));
+
+ jffs2_dbg_acct_sanity_check(c,jeb);
+ jffs2_dbg_acct_paranoia_check(c, jeb);
+
+ if (alloc_mode == ALLOC_GC) {
+ ret = jffs2_reserve_space_gc(c, sizeof(*rd) + namelen, &flash_ofs,
+ &dummy, JFFS2_SUMMARY_DIRENT_SIZE(namelen));
} else {
- printk(KERN_NOTICE "Not marking the space at 0x%08x as dirty because the flash driver returned retlen zero\n", raw->flash_offset);
- jffs2_free_raw_node_ref(raw);
+ /* Locking pain */
+ up(&f->sem);
+ jffs2_complete_reservation(c);
+
+ ret = jffs2_reserve_space(c, sizeof(*rd) + namelen, &flash_ofs,
+ &dummy, alloc_mode, JFFS2_SUMMARY_DIRENT_SIZE(namelen));
+ down(&f->sem);
}
+ if (!ret) {
+ D1(printk(KERN_DEBUG "Allocated space at 0x%08x to retry failed write.\n", flash_ofs));
+ jffs2_dbg_acct_sanity_check(c,jeb);
+ jffs2_dbg_acct_paranoia_check(c, jeb);
+ goto retry;
+ }
+ D1(printk(KERN_DEBUG "Failed to allocate space to retry failed write: %d!\n", ret));
+ jffs2_free_raw_node_ref(raw);
+ }
/* Release the full_dnode which is now useless, and return */
jffs2_free_full_dirent(fd);
- if (writelen)
- *writelen = retlen;
return ERR_PTR(ret?ret:-EIO);
}
/* Mark the space used */
- jffs2_add_physical_node_ref(c, raw, retlen, 0);
- if (writelen)
- *writelen = retlen;
+ raw->flash_offset |= REF_PRISTINE;
+ jffs2_add_physical_node_ref(c, raw);
+ spin_lock(&c->erase_completion_lock);
+ raw->next_in_ino = f->inocache->nodes;
f->inocache->nodes = raw;
+ spin_unlock(&c->erase_completion_lock);
+
+ if (retried) {
+ jffs2_dbg_acct_sanity_check(c,NULL);
+ }
+
return fd;
}
+
+/* The OS-specific code fills in the metadata in the jffs2_raw_inode for us, so that
+ we don't have to go digging in struct inode or its equivalent. It should set:
+ mode, uid, gid, (starting)isize, atime, ctime, mtime */
+int jffs2_write_inode_range(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
+ struct jffs2_raw_inode *ri, unsigned char *buf,
+ uint32_t offset, uint32_t writelen, uint32_t *retlen)
+{
+ int ret = 0;
+ uint32_t writtenlen = 0;
+
+ D1(printk(KERN_DEBUG "jffs2_write_inode_range(): Ino #%u, ofs 0x%x, len 0x%x\n",
+ f->inocache->ino, offset, writelen));
+
+ while(writelen) {
+ struct jffs2_full_dnode *fn;
+ unsigned char *comprbuf = NULL;
+ uint16_t comprtype = JFFS2_COMPR_NONE;
+ uint32_t phys_ofs, alloclen;
+ uint32_t datalen, cdatalen;
+ int retried = 0;
+
+ retry:
+ D2(printk(KERN_DEBUG "jffs2_commit_write() loop: 0x%x to write to 0x%x\n", writelen, offset));
+
+ ret = jffs2_reserve_space(c, sizeof(*ri) + JFFS2_MIN_DATA_LEN, &phys_ofs,
+ &alloclen, ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE);
+ if (ret) {
+ D1(printk(KERN_DEBUG "jffs2_reserve_space returned %d\n", ret));
+ break;
+ }
+ down(&f->sem);
+ datalen = min_t(uint32_t, writelen, PAGE_CACHE_SIZE - (offset & (PAGE_CACHE_SIZE-1)));
+ cdatalen = min_t(uint32_t, alloclen - sizeof(*ri), datalen);
+
+ comprtype = jffs2_compress(c, f, buf, &comprbuf, &datalen, &cdatalen);
+
+ ri->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
+ ri->nodetype = cpu_to_je16(JFFS2_NODETYPE_INODE);
+ ri->totlen = cpu_to_je32(sizeof(*ri) + cdatalen);
+ ri->hdr_crc = cpu_to_je32(crc32(0, ri, sizeof(struct jffs2_unknown_node)-4));
+
+ ri->ino = cpu_to_je32(f->inocache->ino);
+ ri->version = cpu_to_je32(++f->highest_version);
+ ri->isize = cpu_to_je32(max(je32_to_cpu(ri->isize), offset + datalen));
+ ri->offset = cpu_to_je32(offset);
+ ri->csize = cpu_to_je32(cdatalen);
+ ri->dsize = cpu_to_je32(datalen);
+ ri->compr = comprtype & 0xff;
+ ri->usercompr = (comprtype >> 8 ) & 0xff;
+ ri->node_crc = cpu_to_je32(crc32(0, ri, sizeof(*ri)-8));
+ ri->data_crc = cpu_to_je32(crc32(0, comprbuf, cdatalen));
+
+ fn = jffs2_write_dnode(c, f, ri, comprbuf, cdatalen, phys_ofs, ALLOC_NORETRY);
+
+ jffs2_free_comprbuf(comprbuf, buf);
+
+ if (IS_ERR(fn)) {
+ ret = PTR_ERR(fn);
+ up(&f->sem);
+ jffs2_complete_reservation(c);
+ if (!retried) {
+ /* Write error to be retried */
+ retried = 1;
+ D1(printk(KERN_DEBUG "Retrying node write in jffs2_write_inode_range()\n"));
+ goto retry;
+ }
+ break;
+ }
+ ret = jffs2_add_full_dnode_to_inode(c, f, fn);
+ if (f->metadata) {
+ jffs2_mark_node_obsolete(c, f->metadata->raw);
+ jffs2_free_full_dnode(f->metadata);
+ f->metadata = NULL;
+ }
+ if (ret) {
+ /* Eep */
+ D1(printk(KERN_DEBUG "Eep. add_full_dnode_to_inode() failed in commit_write, returned %d\n", ret));
+ jffs2_mark_node_obsolete(c, fn->raw);
+ jffs2_free_full_dnode(fn);
+
+ up(&f->sem);
+ jffs2_complete_reservation(c);
+ break;
+ }
+ up(&f->sem);
+ jffs2_complete_reservation(c);
+ if (!datalen) {
+ printk(KERN_WARNING "Eep. We didn't actually write any data in jffs2_write_inode_range()\n");
+ ret = -EIO;
+ break;
+ }
+ D1(printk(KERN_DEBUG "increasing writtenlen by %d\n", datalen));
+ writtenlen += datalen;
+ offset += datalen;
+ writelen -= datalen;
+ buf += datalen;
+ }
+ *retlen = writtenlen;
+ return ret;
+}
+
+int jffs2_do_create(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, struct jffs2_inode_info *f, struct jffs2_raw_inode *ri, const char *name, int namelen)
+{
+ struct jffs2_raw_dirent *rd;
+ struct jffs2_full_dnode *fn;
+ struct jffs2_full_dirent *fd;
+ uint32_t alloclen, phys_ofs;
+ int ret;
+
+ /* Try to reserve enough space for both node and dirent.
+ * Just the node will do for now, though
+ */
+ ret = jffs2_reserve_space(c, sizeof(*ri), &phys_ofs, &alloclen, ALLOC_NORMAL,
+ JFFS2_SUMMARY_INODE_SIZE);
+ D1(printk(KERN_DEBUG "jffs2_do_create(): reserved 0x%x bytes\n", alloclen));
+ if (ret) {
+ up(&f->sem);
+ return ret;
+ }
+
+ ri->data_crc = cpu_to_je32(0);
+ ri->node_crc = cpu_to_je32(crc32(0, ri, sizeof(*ri)-8));
+
+ fn = jffs2_write_dnode(c, f, ri, NULL, 0, phys_ofs, ALLOC_NORMAL);
+
+ D1(printk(KERN_DEBUG "jffs2_do_create created file with mode 0x%x\n",
+ jemode_to_cpu(ri->mode)));
+
+ if (IS_ERR(fn)) {
+ D1(printk(KERN_DEBUG "jffs2_write_dnode() failed\n"));
+ /* Eeek. Wave bye bye */
+ up(&f->sem);
+ jffs2_complete_reservation(c);
+ return PTR_ERR(fn);
+ }
+ /* No data here. Only a metadata node, which will be
+ obsoleted by the first data write
+ */
+ f->metadata = fn;
+
+ up(&f->sem);
+ jffs2_complete_reservation(c);
+ ret = jffs2_reserve_space(c, sizeof(*rd)+namelen, &phys_ofs, &alloclen,
+ ALLOC_NORMAL, JFFS2_SUMMARY_DIRENT_SIZE(namelen));
+
+ if (ret) {
+ /* Eep. */
+ D1(printk(KERN_DEBUG "jffs2_reserve_space() for dirent failed\n"));
+ return ret;
+ }
+
+ rd = jffs2_alloc_raw_dirent();
+ if (!rd) {
+ /* Argh. Now we treat it like a normal delete */
+ jffs2_complete_reservation(c);
+ return -ENOMEM;
+ }
+
+ down(&dir_f->sem);
+
+ rd->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
+ rd->nodetype = cpu_to_je16(JFFS2_NODETYPE_DIRENT);
+ rd->totlen = cpu_to_je32(sizeof(*rd) + namelen);
+ rd->hdr_crc = cpu_to_je32(crc32(0, rd, sizeof(struct jffs2_unknown_node)-4));
+
+ rd->pino = cpu_to_je32(dir_f->inocache->ino);
+ rd->version = cpu_to_je32(++dir_f->highest_version);
+ rd->ino = ri->ino;
+ rd->mctime = ri->ctime;
+ rd->nsize = namelen;
+ rd->type = DT_REG;
+ rd->node_crc = cpu_to_je32(crc32(0, rd, sizeof(*rd)-8));
+ rd->name_crc = cpu_to_je32(crc32(0, name, namelen));
+
+ fd = jffs2_write_dirent(c, dir_f, rd, name, namelen, phys_ofs, ALLOC_NORMAL);
+
+ jffs2_free_raw_dirent(rd);
+
+ if (IS_ERR(fd)) {
+ /* dirent failed to write. Delete the inode normally
+ as if it were the final unlink() */
+ jffs2_complete_reservation(c);
+ up(&dir_f->sem);
+ return PTR_ERR(fd);
+ }
+
+ /* Link the fd into the inode's list, obsoleting an old
+ one if necessary. */
+ jffs2_add_fd_to_list(c, fd, &dir_f->dents);
+
+ jffs2_complete_reservation(c);
+ up(&dir_f->sem);
+
+ return 0;
+}
+
+
+int jffs2_do_unlink(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f,
+ const char *name, int namelen, struct jffs2_inode_info *dead_f,
+ uint32_t time)
+{
+ struct jffs2_raw_dirent *rd;
+ struct jffs2_full_dirent *fd;
+ uint32_t alloclen, phys_ofs;
+ int ret;
+
+ if (1 /* alternative branch needs testing */ ||
+ !jffs2_can_mark_obsolete(c)) {
+ /* We can't mark stuff obsolete on the medium. We need to write a deletion dirent */
+
+ rd = jffs2_alloc_raw_dirent();
+ if (!rd)
+ return -ENOMEM;
+
+ ret = jffs2_reserve_space(c, sizeof(*rd)+namelen, &phys_ofs, &alloclen,
+ ALLOC_DELETION, JFFS2_SUMMARY_DIRENT_SIZE(namelen));
+ if (ret) {
+ jffs2_free_raw_dirent(rd);
+ return ret;
+ }
+
+ down(&dir_f->sem);
+
+ /* Build a deletion node */
+ rd->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
+ rd->nodetype = cpu_to_je16(JFFS2_NODETYPE_DIRENT);
+ rd->totlen = cpu_to_je32(sizeof(*rd) + namelen);
+ rd->hdr_crc = cpu_to_je32(crc32(0, rd, sizeof(struct jffs2_unknown_node)-4));
+
+ rd->pino = cpu_to_je32(dir_f->inocache->ino);
+ rd->version = cpu_to_je32(++dir_f->highest_version);
+ rd->ino = cpu_to_je32(0);
+ rd->mctime = cpu_to_je32(time);
+ rd->nsize = namelen;
+ rd->type = DT_UNKNOWN;
+ rd->node_crc = cpu_to_je32(crc32(0, rd, sizeof(*rd)-8));
+ rd->name_crc = cpu_to_je32(crc32(0, name, namelen));
+
+ fd = jffs2_write_dirent(c, dir_f, rd, name, namelen, phys_ofs, ALLOC_DELETION);
+
+ jffs2_free_raw_dirent(rd);
+
+ if (IS_ERR(fd)) {
+ jffs2_complete_reservation(c);
+ up(&dir_f->sem);
+ return PTR_ERR(fd);
+ }
+
+ /* File it. This will mark the old one obsolete. */
+ jffs2_add_fd_to_list(c, fd, &dir_f->dents);
+ up(&dir_f->sem);
+ } else {
+ struct jffs2_full_dirent **prev = &dir_f->dents;
+ uint32_t nhash = full_name_hash(name, namelen);
+
+ down(&dir_f->sem);
+
+ while ((*prev) && (*prev)->nhash <= nhash) {
+ if ((*prev)->nhash == nhash &&
+ !memcmp((*prev)->name, name, namelen) &&
+ !(*prev)->name[namelen]) {
+ struct jffs2_full_dirent *this = *prev;
+
+ D1(printk(KERN_DEBUG "Marking old dirent node (ino #%u) @%08x obsolete\n",
+ this->ino, ref_offset(this->raw)));
+
+ *prev = this->next;
+ jffs2_mark_node_obsolete(c, (this->raw));
+ jffs2_free_full_dirent(this);
+ break;
+ }
+ prev = &((*prev)->next);
+ }
+ up(&dir_f->sem);
+ }
+
+ /* dead_f is NULL if this was a rename not a real unlink */
+ /* Also catch the !f->inocache case, where there was a dirent
+ pointing to an inode which didn't exist. */
+ if (dead_f && dead_f->inocache) {
+
+ down(&dead_f->sem);
+
+ if (S_ISDIR(OFNI_EDONI_2SFFJ(dead_f)->i_mode)) {
+ while (dead_f->dents) {
+ /* There can be only deleted ones */
+ fd = dead_f->dents;
+
+ dead_f->dents = fd->next;
+
+ if (fd->ino) {
+ printk(KERN_WARNING "Deleting inode #%u with active dentry \"%s\"->ino #%u\n",
+ dead_f->inocache->ino, fd->name, fd->ino);
+ } else {
+ D1(printk(KERN_DEBUG "Removing deletion dirent for \"%s\" from dir ino #%u\n",
+ fd->name, dead_f->inocache->ino));
+ }
+ jffs2_mark_node_obsolete(c, fd->raw);
+ jffs2_free_full_dirent(fd);
+ }
+ }
+
+ dead_f->inocache->nlink--;
+ /* NB: Caller must set inode nlink if appropriate */
+ up(&dead_f->sem);
+ }
+
+ jffs2_complete_reservation(c);
+
+ return 0;
+}
+
+
+int jffs2_do_link (struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, uint32_t ino, uint8_t type, const char *name, int namelen, uint32_t time)
+{
+ struct jffs2_raw_dirent *rd;
+ struct jffs2_full_dirent *fd;
+ uint32_t alloclen, phys_ofs;
+ int ret;
+
+ rd = jffs2_alloc_raw_dirent();
+ if (!rd)
+ return -ENOMEM;
+
+ ret = jffs2_reserve_space(c, sizeof(*rd)+namelen, &phys_ofs, &alloclen,
+ ALLOC_NORMAL, JFFS2_SUMMARY_DIRENT_SIZE(namelen));
+ if (ret) {
+ jffs2_free_raw_dirent(rd);
+ return ret;
+ }
+
+ down(&dir_f->sem);
+
+ /* Build a deletion node */
+ rd->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
+ rd->nodetype = cpu_to_je16(JFFS2_NODETYPE_DIRENT);
+ rd->totlen = cpu_to_je32(sizeof(*rd) + namelen);
+ rd->hdr_crc = cpu_to_je32(crc32(0, rd, sizeof(struct jffs2_unknown_node)-4));
+
+ rd->pino = cpu_to_je32(dir_f->inocache->ino);
+ rd->version = cpu_to_je32(++dir_f->highest_version);
+ rd->ino = cpu_to_je32(ino);
+ rd->mctime = cpu_to_je32(time);
+ rd->nsize = namelen;
+
+ rd->type = type;
+
+ rd->node_crc = cpu_to_je32(crc32(0, rd, sizeof(*rd)-8));
+ rd->name_crc = cpu_to_je32(crc32(0, name, namelen));
+
+ fd = jffs2_write_dirent(c, dir_f, rd, name, namelen, phys_ofs, ALLOC_NORMAL);
+
+ jffs2_free_raw_dirent(rd);
+
+ if (IS_ERR(fd)) {
+ jffs2_complete_reservation(c);
+ up(&dir_f->sem);
+ return PTR_ERR(fd);
+ }
+
+ /* File it. This will mark the old one obsolete. */
+ jffs2_add_fd_to_list(c, fd, &dir_f->dents);
+
+ jffs2_complete_reservation(c);
+ up(&dir_f->sem);
+
+ return 0;
+}
diff --git a/linux-2.4.x/fs/jffs2/writev.c b/linux-2.4.x/fs/jffs2/writev.c
new file mode 100644
index 0000000..8f67bbd
--- /dev/null
+++ b/linux-2.4.x/fs/jffs2/writev.c
@@ -0,0 +1,82 @@
+/*
+ * JFFS2 -- Journalling Flash File System, Version 2.
+ *
+ * Copyright (C) 2001, 2002 Red Hat, Inc.
+ *
+ * Created by David Woodhouse <dwmw2@infradead.org>
+ *
+ * For licensing information, see the file 'LICENCE' in this directory.
+ *
+ * $Id: writev.c,v 1.9 2006/03/22 15:03:25 havasi Exp $
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/mtd/mtd.h>
+#include "nodelist.h"
+
+/* This ought to be in core MTD code. All registered MTD devices
+ without writev should have this put in place. Bug the MTD
+ maintainer */
+static inline int mtd_fake_writev(struct mtd_info *mtd, const struct kvec *vecs,
+ unsigned long count, loff_t to, size_t *retlen)
+{
+ unsigned long i;
+ size_t totlen = 0, thislen;
+ int ret = 0;
+
+ for (i=0; i<count; i++) {
+ if (!vecs[i].iov_len)
+ continue;
+ ret = mtd->write(mtd, to, vecs[i].iov_len, &thislen, vecs[i].iov_base);
+ totlen += thislen;
+ if (ret || thislen != vecs[i].iov_len)
+ break;
+ to += vecs[i].iov_len;
+ }
+ if (retlen)
+ *retlen = totlen;
+ return ret;
+}
+
+int jffs2_flash_direct_writev(struct jffs2_sb_info *c, const struct kvec *vecs,
+ unsigned long count, loff_t to, size_t *retlen)
+{
+ if (!jffs2_is_writebuffered(c)) {
+ if (jffs2_sum_active()) {
+ int res;
+ res = jffs2_sum_add_kvec(c, vecs, count, (uint32_t) to);
+ if (res) {
+ return res;
+ }
+ }
+ }
+
+ if (c->mtd->writev)
+ return c->mtd->writev(c->mtd, vecs, count, to, retlen);
+ else {
+ return mtd_fake_writev(c->mtd, vecs, count, to, retlen);
+ }
+}
+
+int jffs2_flash_direct_write(struct jffs2_sb_info *c, loff_t ofs, size_t len,
+ size_t *retlen, const u_char *buf)
+{
+ int ret;
+
+ if (jffs2_sum_active()) {
+ struct kvec vecs[1];
+ int res;
+
+ vecs[0].iov_base = (unsigned char *) buf;
+ vecs[0].iov_len = len;
+
+ res = jffs2_sum_add_kvec(c, vecs, 1, (uint32_t) ofs);
+ if (res) {
+ return res;
+ }
+ }
+
+ ret = c->mtd->write(c->mtd, ofs, len, retlen, buf);
+ return ret;
+}
diff --git a/linux-2.4.x/include/linux/crc32.h b/linux-2.4.x/include/linux/crc32.h
index 0149736..9dd38cb 100644
--- a/linux-2.4.x/include/linux/crc32.h
+++ b/linux-2.4.x/include/linux/crc32.h
@@ -46,4 +46,25 @@ static inline u32 ether_crc(int length, unsigned char *data)
return crc;
}
-#endif /* _LINUX_CRC32_H */
+#ifndef CRC32_H
+#define CRC32_H
+
+/* $Id: crc32.h,v 1.5 2005/11/07 11:14:38 gleixner Exp $ */
+
+#include <linux/types.h>
+
+extern const uint32_t crc32_table[256];
+
+/* Return a 32-bit CRC of the contents of the buffer. */
+
+static inline uint32_t
+crc32(uint32_t val, const void *ss, int len)
+{
+ const unsigned char *s = ss;
+ while (--len >= 0)
+ val = crc32_table[(val ^ *s++) & 0xff] ^ (val >> 8);
+ return val;
+}
+
+#endif
+#endif
diff --git a/linux-2.4.x/include/linux/jffs2.h b/linux-2.4.x/include/linux/jffs2.h
index 9c67732..9720da1 100644
--- a/linux-2.4.x/include/linux/jffs2.h
+++ b/linux-2.4.x/include/linux/jffs2.h
@@ -1,53 +1,41 @@
/*
* JFFS2 -- Journalling Flash File System, Version 2.
*
- * Copyright (C) 2001 Red Hat, Inc.
+ * Copyright (C) 2001-2003 Red Hat, Inc.
*
- * Created by David Woodhouse <dwmw2@cambridge.redhat.com>
+ * Created by David Woodhouse <dwmw2@infradead.org>
*
- * The original JFFS, from which the design for JFFS2 was derived,
- * was designed and implemented by Axis Communications AB.
+ * For licensing information, see the file 'LICENCE' in the
+ * jffs2 directory.
*
- * The contents of this file are subject to the Red Hat eCos Public
- * License Version 1.1 (the "Licence"); you may not use this file
- * except in compliance with the Licence. You may obtain a copy of
- * the Licence at http://www.redhat.com/
- *
- * Software distributed under the Licence is distributed on an "AS IS"
- * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied.
- * See the Licence for the specific language governing rights and
- * limitations under the Licence.
- *
- * The Original Code is JFFS2 - Journalling Flash File System, version 2
- *
- * Alternatively, the contents of this file may be used under the
- * terms of the GNU General Public License version 2 (the "GPL"), in
- * which case the provisions of the GPL are applicable instead of the
- * above. If you wish to allow the use of your version of this file
- * only under the terms of the GPL and not to allow others to use your
- * version of this file under the RHEPL, indicate your decision by
- * deleting the provisions above and replace them with the notice and
- * other provisions required by the GPL. If you do not delete the
- * provisions above, a recipient may use your version of this file
- * under either the RHEPL or the GPL.
- *
- * $Id: jffs2.h,v 1.19 2001/10/09 13:20:23 dwmw2 Exp $
+ * $Id: jffs2.h,v 1.40 2005/11/07 11:14:51 gleixner Exp $
*
*/
#ifndef __LINUX_JFFS2_H__
#define __LINUX_JFFS2_H__
-#include <asm/types.h>
+/* You must include something which defines the C99 uintXX_t types.
+ We don't do it from here because this file is used in too many
+ different environments. */
+
#define JFFS2_SUPER_MAGIC 0x72b6
/* Values we may expect to find in the 'magic' field */
#define JFFS2_OLD_MAGIC_BITMASK 0x1984
#define JFFS2_MAGIC_BITMASK 0x1985
-#define KSAMTIB_CIGAM_2SFFJ 0x5981 /* For detecting wrong-endian fs */
+#define KSAMTIB_CIGAM_2SFFJ 0x8519 /* For detecting wrong-endian fs */
#define JFFS2_EMPTY_BITMASK 0xffff
#define JFFS2_DIRTY_BITMASK 0x0000
+/* JFFS2 eraseblock header compat/incompat/rocompat features set */
+#define JFFS2_EBH_COMPAT_FSET 0x00
+#define JFFS2_EBH_INCOMPAT_FSET 0x00
+#define JFFS2_EBH_ROCOMPAT_FSET 0x00
+
+/* Summary node MAGIC marker */
+#define JFFS2_SUM_MAGIC 0x02851885
+
/* We only allow a single char for length, and 0xFF is empty flash so
we don't want it confused with a real length. Hence max 254.
*/
@@ -78,86 +66,133 @@
#define JFFS2_NODETYPE_DIRENT (JFFS2_FEATURE_INCOMPAT | JFFS2_NODE_ACCURATE | 1)
#define JFFS2_NODETYPE_INODE (JFFS2_FEATURE_INCOMPAT | JFFS2_NODE_ACCURATE | 2)
#define JFFS2_NODETYPE_CLEANMARKER (JFFS2_FEATURE_RWCOMPAT_DELETE | JFFS2_NODE_ACCURATE | 3)
+#define JFFS2_NODETYPE_PADDING (JFFS2_FEATURE_RWCOMPAT_DELETE | JFFS2_NODE_ACCURATE | 4)
+
+#define JFFS2_NODETYPE_ERASEBLOCK_HEADER (JFFS2_FEATURE_RWCOMPAT_DELETE | JFFS2_NODE_ACCURATE | 5)
+#define JFFS2_NODETYPE_SUMMARY (JFFS2_FEATURE_RWCOMPAT_DELETE | JFFS2_NODE_ACCURATE | 6)
// Maybe later...
//#define JFFS2_NODETYPE_CHECKPOINT (JFFS2_FEATURE_RWCOMPAT_DELETE | JFFS2_NODE_ACCURATE | 3)
//#define JFFS2_NODETYPE_OPTIONS (JFFS2_FEATURE_RWCOMPAT_COPY | JFFS2_NODE_ACCURATE | 4)
-/* Same as the non_ECC versions, but with extra space for real
- * ECC instead of just the checksum. For use on NAND flash
- */
-//#define JFFS2_NODETYPE_DIRENT_ECC (JFFS2_FEATURE_INCOMPAT | JFFS2_NODE_ACCURATE | 5)
-//#define JFFS2_NODETYPE_INODE_ECC (JFFS2_FEATURE_INCOMPAT | JFFS2_NODE_ACCURATE | 6)
-#define JFFS2_INO_FLAG_PREREAD 1 /* Do read_inode() for this one at
- mount time, don't wait for it to
+#define JFFS2_INO_FLAG_PREREAD 1 /* Do read_inode() for this one at
+ mount time, don't wait for it to
happen later */
-#define JFFS2_INO_FLAG_USERCOMPR 2 /* User has requested a specific
+#define JFFS2_INO_FLAG_USERCOMPR 2 /* User has requested a specific
compression type */
+/* These can go once we've made sure we've caught all uses without
+ byteswapping */
+
+typedef struct {
+ uint32_t v32;
+} __attribute__((packed)) jint32_t;
+
+typedef struct {
+ uint32_t m;
+} __attribute__((packed)) jmode_t;
+
+typedef struct {
+ uint16_t v16;
+} __attribute__((packed)) jint16_t;
+
struct jffs2_unknown_node
{
/* All start like this */
- __u16 magic;
- __u16 nodetype;
- __u32 totlen; /* So we can skip over nodes we don't grok */
- __u32 hdr_crc;
+ jint16_t magic;
+ jint16_t nodetype;
+ jint32_t totlen; /* So we can skip over nodes we don't grok */
+ jint32_t hdr_crc;
} __attribute__((packed));
struct jffs2_raw_dirent
{
- __u16 magic;
- __u16 nodetype; /* == JFFS_NODETYPE_DIRENT */
- __u32 totlen;
- __u32 hdr_crc;
- __u32 pino;
- __u32 version;
- __u32 ino; /* == zero for unlink */
- __u32 mctime;
- __u8 nsize;
- __u8 type;
- __u8 unused[2];
- __u32 node_crc;
- __u32 name_crc;
- __u8 name[0];
+ jint16_t magic;
+ jint16_t nodetype; /* == JFFS2_NODETYPE_DIRENT */
+ jint32_t totlen;
+ jint32_t hdr_crc;
+ jint32_t pino;
+ jint32_t version;
+ jint32_t ino; /* == zero for unlink */
+ jint32_t mctime;
+ uint8_t nsize;
+ uint8_t type;
+ uint8_t unused[2];
+ jint32_t node_crc;
+ jint32_t name_crc;
+ uint8_t name[0];
} __attribute__((packed));
/* The JFFS2 raw inode structure: Used for storage on physical media. */
-/* The uid, gid, atime, mtime and ctime members could be longer, but
+/* The uid, gid, atime, mtime and ctime members could be longer, but
are left like this for space efficiency. If and when people decide
they really need them extended, it's simple enough to add support for
a new type of raw node.
*/
struct jffs2_raw_inode
{
- __u16 magic; /* A constant magic number. */
- __u16 nodetype; /* == JFFS_NODETYPE_INODE */
- __u32 totlen; /* Total length of this node (inc data, etc.) */
- __u32 hdr_crc;
- __u32 ino; /* Inode number. */
- __u32 version; /* Version number. */
- __u32 mode; /* The file's type or mode. */
- __u16 uid; /* The file's owner. */
- __u16 gid; /* The file's group. */
- __u32 isize; /* Total resultant size of this inode (used for truncations) */
- __u32 atime; /* Last access time. */
- __u32 mtime; /* Last modification time. */
- __u32 ctime; /* Change time. */
- __u32 offset; /* Where to begin to write. */
- __u32 csize; /* (Compressed) data size */
- __u32 dsize; /* Size of the node's data. (after decompression) */
- __u8 compr; /* Compression algorithm used */
- __u8 usercompr; /* Compression algorithm requested by the user */
- __u16 flags; /* See JFFS2_INO_FLAG_* */
- __u32 data_crc; /* CRC for the (compressed) data. */
- __u32 node_crc; /* CRC for the raw inode (excluding data) */
-// __u8 data[dsize];
+ jint16_t magic; /* A constant magic number. */
+ jint16_t nodetype; /* == JFFS2_NODETYPE_INODE */
+ jint32_t totlen; /* Total length of this node (inc data, etc.) */
+ jint32_t hdr_crc;
+ jint32_t ino; /* Inode number. */
+ jint32_t version; /* Version number. */
+ jmode_t mode; /* The file's type or mode. */
+ jint16_t uid; /* The file's owner. */
+ jint16_t gid; /* The file's group. */
+ jint32_t isize; /* Total resultant size of this inode (used for truncations) */
+ jint32_t atime; /* Last access time. */
+ jint32_t mtime; /* Last modification time. */
+ jint32_t ctime; /* Change time. */
+ jint32_t offset; /* Where to begin to write. */
+ jint32_t csize; /* (Compressed) data size */
+ jint32_t dsize; /* Size of the node's data. (after decompression) */
+ uint8_t compr; /* Compression algorithm used */
+ uint8_t usercompr; /* Compression algorithm requested by the user */
+ jint16_t flags; /* See JFFS2_INO_FLAG_* */
+ jint32_t data_crc; /* CRC for the (compressed) data. */
+ jint32_t node_crc; /* CRC for the raw inode (excluding data) */
+ uint8_t data[0];
+} __attribute__((packed));
+
+struct jffs2_raw_summary
+{
+ jint16_t magic;
+ jint16_t nodetype; /* = JFFS2_NODETYPE_SUMMARY */
+ jint32_t totlen;
+ jint32_t hdr_crc;
+ jint32_t sum_num; /* number of sum entries*/
+ jint32_t cln_mkr; /* clean marker size, 0 = no cleanmarker */
+ jint32_t padded; /* sum of the size of padding nodes */
+ jint32_t sum_crc; /* summary information crc */
+ jint32_t node_crc; /* node crc */
+ jint32_t sum[0]; /* inode summary info */
} __attribute__((packed));
-union jffs2_node_union {
+struct jffs2_raw_ebh
+{
+ jint16_t magic;
+ jint16_t nodetype; /* == JFFS2_NODETYPE_ERASEBLOCK_HEADER */
+ jint32_t totlen;
+ jint32_t hdr_crc;
+ jint32_t node_crc;
+ uint8_t reserved; /* reserved for future use and alignment */
+ uint8_t compat_fset;
+ uint8_t incompat_fset;
+ uint8_t rocompat_fset;
+ jint32_t erase_count; /* the erase count of this erase block */
+ jint32_t data[0];
+} __attribute__((packed));
+
+
+union jffs2_node_union
+{
struct jffs2_raw_inode i;
struct jffs2_raw_dirent d;
+ struct jffs2_raw_summary s;
+ struct jffs2_raw_ebh eh;
struct jffs2_unknown_node u;
};
diff --git a/linux-2.4.x/include/linux/jffs2_fs_i.h b/linux-2.4.x/include/linux/jffs2_fs_i.h
index 6121a88..ef85ab5 100644
--- a/linux-2.4.x/include/linux/jffs2_fs_i.h
+++ b/linux-2.4.x/include/linux/jffs2_fs_i.h
@@ -1,22 +1,13 @@
-/* $Id: jffs2_fs_i.h,v 1.8 2001/04/18 13:05:28 dwmw2 Exp $ */
+/* $Id: jffs2_fs_i.h,v 1.19 2005/11/07 11:14:52 gleixner Exp $ */
#ifndef _JFFS2_FS_I
#define _JFFS2_FS_I
-/* Include the pipe_inode_info at the beginning so that we can still
- use the storage space in the inode when we have a pipe inode.
- This sucks.
-*/
-
-#undef THISSUCKS /* Only for 2.2 */
-#ifdef THISSUCKS
-#include <linux/pipe_fs_i.h>
-#endif
+#include <linux/version.h>
+#include <linux/rbtree.h>
+#include <asm/semaphore.h>
struct jffs2_inode_info {
-#ifdef THISSUCKS
- struct pipe_inode_info pipecrap;
-#endif
/* We need an internal semaphore similar to inode->i_sem.
Unfortunately, we can't used the existing one, because
either the GC would deadlock, or we'd have to release it
@@ -26,37 +17,34 @@ struct jffs2_inode_info {
struct semaphore sem;
/* The highest (datanode) version number used for this ino */
- __u32 highest_version;
+ uint32_t highest_version;
/* List of data fragments which make up the file */
- struct jffs2_node_frag *fraglist;
+ struct rb_root fragtree;
/* There may be one datanode which isn't referenced by any of the
above fragments, if it contains a metadata update but no actual
data - or if this is a directory inode */
- /* This also holds the _only_ dnode for symlinks/device nodes,
+ /* This also holds the _only_ dnode for symlinks/device nodes,
etc. */
struct jffs2_full_dnode *metadata;
/* Directory entries */
struct jffs2_full_dirent *dents;
+ /* The target path if this is the inode of a symlink */
+ unsigned char *target;
+
/* Some stuff we just have to keep in-core at all times, for each inode. */
struct jffs2_inode_cache *inocache;
- /* Keep a pointer to the last physical node in the list. We don't
- use the doubly-linked lists because we don't want to increase
- the memory usage that much. This is simpler */
- // struct jffs2_raw_node_ref *lastnode;
- __u16 flags;
- __u8 usercompr;
-};
-
-#ifdef JFFS2_OUT_OF_KERNEL
-#define JFFS2_INODE_INFO(i) ((struct jffs2_inode_info *) &(i)->u)
-#else
-#define JFFS2_INODE_INFO(i) (&i->u.jffs2_i)
+ uint16_t flags;
+ uint8_t usercompr;
+#if !defined (__ECOS)
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,2)
+ struct inode vfs_inode;
+#endif
#endif
+};
#endif /* _JFFS2_FS_I */
-
diff --git a/linux-2.4.x/include/linux/jffs2_fs_sb.h b/linux-2.4.x/include/linux/jffs2_fs_sb.h
index 626c8c5..f6ffe78 100644
--- a/linux-2.4.x/include/linux/jffs2_fs_sb.h
+++ b/linux-2.4.x/include/linux/jffs2_fs_sb.h
@@ -1,84 +1,147 @@
-/* $Id: jffs2_fs_sb.h,v 1.16.2.1 2002/02/23 14:13:34 dwmw2 Exp $ */
+/* $Id: jffs2_fs_sb.h,v 1.60 2005/11/29 14:34:37 gleixner Exp $ */
#ifndef _JFFS2_FS_SB
#define _JFFS2_FS_SB
#include <linux/types.h>
#include <linux/spinlock.h>
+#include <linux/workqueue.h>
#include <linux/completion.h>
#include <asm/semaphore.h>
+#include <linux/timer.h>
+#include <linux/wait.h>
#include <linux/list.h>
-
-#define INOCACHE_HASHSIZE 1
+#include <linux/rwsem.h>
#define JFFS2_SB_FLAG_RO 1
-#define JFFS2_SB_FLAG_MOUNTING 2
+#define JFFS2_SB_FLAG_SCANNING 2 /* Flash scanning is in progress */
+#define JFFS2_SB_FLAG_BUILDING 4 /* File system building is in progress */
+
+#define MAX_ERASE_COUNT_BIT_LEN 18
+#define MAX_ERASE_COUNT (1 << MAX_ERASE_COUNT_BIT_LEN) /* The maximum guaranteed erase cycles for NAND and NOR are ~ 100K at the moment */
+#define WL_DELTA_BIT_LEN 10
+#define WL_DELTA (1 << WL_DELTA_BIT_LEN) /* This is wear-leveling delta, which is defined as "maximum of all erase counts - minimum of all erase counts" */
+#define HASH_SIZE_BIT_LEN (MAX_ERASE_COUNT_BIT_LEN - WL_DELTA_BIT_LEN + 1) /* The range size of per-bucket is half of WL_DELTA */
+#define HASH_SIZE (1 << HASH_SIZE_BIT_LEN)
+#define BUCKET_RANGE_BIT_LEN (MAX_ERASE_COUNT_BIT_LEN - HASH_SIZE_BIT_LEN)
+#define BUCKET_RANGE (1 << BUCKET_RANGE_BIT_LEN)
+
+struct jffs2_blocks_bucket {
+ uint32_t number; /* The number of erase blocks in this bucket*/
+ struct list_head chain; /* The head of erase blocks in this bucket */
+};
+
+struct jffs2_inodirty;
/* A struct for the overall file system control. Pointers to
- jffs2_sb_info structs are named `c' in the source code.
+ jffs2_sb_info structs are named `c' in the source code.
Nee jffs_control
*/
struct jffs2_sb_info {
struct mtd_info *mtd;
- __u32 highest_ino;
+ uint32_t highest_ino;
+ uint32_t checked_ino;
+
unsigned int flags;
- spinlock_t nodelist_lock;
- // pid_t thread_pid; /* GC thread's PID */
struct task_struct *gc_task; /* GC task struct */
- struct semaphore gc_thread_start; /* GC thread start mutex */
+ struct completion gc_thread_start; /* GC thread start completion */
struct completion gc_thread_exit; /* GC thread exit completion port */
- // __u32 gc_minfree_threshold; /* GC trigger thresholds */
- // __u32 gc_maxdirty_threshold;
- struct semaphore alloc_sem; /* Used to protect all the following
+ struct semaphore alloc_sem; /* Used to protect all the following
fields, and also to protect against
- out-of-order writing of nodes.
- And GC.
- */
- __u32 flash_size;
- __u32 used_size;
- __u32 dirty_size;
- __u32 free_size;
- __u32 erasing_size;
- __u32 bad_size;
- __u32 sector_size;
- // __u32 min_free_size;
- // __u32 max_chunk_size;
-
- __u32 nr_free_blocks;
- __u32 nr_erasing_blocks;
-
- __u32 nr_blocks;
- struct jffs2_eraseblock *blocks; /* The whole array of blocks. Used for getting blocks
+ out-of-order writing of nodes. And GC. */
+ uint32_t cleanmarker_size; /* Size of an _inline_ CLEANMARKER
+ (i.e. zero for OOB CLEANMARKER */
+
+ uint32_t flash_size;
+ uint32_t used_size;
+ uint32_t dirty_size;
+ uint32_t wasted_size;
+ uint32_t free_size;
+ uint32_t erasing_size;
+ uint32_t bad_size;
+ uint32_t sector_size;
+ uint32_t unchecked_size;
+
+ uint32_t nr_free_blocks;
+ uint32_t nr_erasing_blocks;
+
+ /* Number of free blocks there must be before we... */
+ uint8_t resv_blocks_write; /* ... allow a normal filesystem write */
+ uint8_t resv_blocks_deletion; /* ... allow a normal filesystem deletion */
+ uint8_t resv_blocks_gctrigger; /* ... wake up the GC thread */
+ uint8_t resv_blocks_gcbad; /* ... pick a block from the bad_list to GC */
+ uint8_t resv_blocks_gcmerge; /* ... merge pages when garbage collecting */
+
+ uint32_t nospc_dirty_size;
+
+ uint32_t nr_blocks;
+ struct jffs2_eraseblock **blocks; /* The whole array of blocks. Used for getting blocks
* from the offset (blocks[ofs / sector_size]) */
struct jffs2_eraseblock *nextblock; /* The block we're currently filling */
struct jffs2_eraseblock *gcblock; /* The block we're currently garbage-collecting */
struct list_head clean_list; /* Blocks 100% full of clean data */
+ struct list_head very_dirty_list; /* Blocks with lots of dirty space */
struct list_head dirty_list; /* Blocks with some dirty space */
+ struct list_head erasable_list; /* Blocks which are completely dirty, and need erasing */
+ struct list_head erasable_pending_wbuf_list; /* Blocks which need erasing but only after the current wbuf is flushed */
struct list_head erasing_list; /* Blocks which are currently erasing */
- struct list_head erase_pending_list; /* Blocks which need erasing */
+ struct list_head erase_pending_list; /* Blocks which need erasing now */
struct list_head erase_complete_list; /* Blocks which are erased and need the clean marker written to them */
struct list_head free_list; /* Blocks which are free and ready to be used */
struct list_head bad_list; /* Bad blocks. */
struct list_head bad_used_list; /* Bad blocks with valid data in. */
- spinlock_t erase_completion_lock; /* Protect free_list and erasing_list
+ spinlock_t erase_completion_lock; /* Protect free_list and erasing_list
against erase completion handler */
wait_queue_head_t erase_wait; /* For waiting for erases to complete */
- struct jffs2_inode_cache *inocache_list[INOCACHE_HASHSIZE];
+
+ wait_queue_head_t inocache_wq;
+ struct jffs2_inode_cache **inocache_list;
spinlock_t inocache_lock;
-};
-#ifdef JFFS2_OUT_OF_KERNEL
-#define JFFS2_SB_INFO(sb) ((struct jffs2_sb_info *) &(sb)->u)
-#else
-#define JFFS2_SB_INFO(sb) (&sb->u.jffs2_sb)
+ /* Sem to allow jffs2_garbage_collect_deletion_dirent to
+ drop the erase_completion_lock while it's holding a pointer
+ to an obsoleted node. I don't like this. Alternatives welcomed. */
+ struct semaphore erase_free_sem;
+
+ uint32_t wbuf_pagesize; /* 0 for NOR and other flashes with no wbuf */
+
+#ifdef CONFIG_JFFS2_FS_WRITEBUFFER
+ /* Write-behind buffer for NAND flash */
+ unsigned char *wbuf;
+ uint32_t wbuf_ofs;
+ uint32_t wbuf_len;
+ struct jffs2_inodirty *wbuf_inodes;
+
+ struct rw_semaphore wbuf_sem; /* Protects the write buffer */
+
+ /* Information about out-of-band area usage... */
+ struct nand_oobinfo *oobinfo;
+ uint32_t badblock_pos;
+ uint32_t fsdata_pos;
+ uint32_t fsdata_len;
#endif
-#define OFNI_BS_2SFFJ(c) ((struct super_block *) ( ((char *)c) - ((char *)(&((struct super_block *)NULL)->u)) ) )
+ struct jffs2_summary *summary; /* Summary information */
+
+ uint32_t ebh_size; /* This is the space size occupied by eraseblock_header on flash */
+
+ uint32_t total_erase_count; /* The summary erase count of all erase blocks */
+ uint32_t nr_blocks_with_ebh; /* The number of erase blocks, which has eraseblock header on it */
+ uint32_t max_erase_count; /* The maximum erase count of all erase blocks */
+
+ uint32_t used_blocks_current_index;
+ uint32_t free_blocks_current_index;
+ struct jffs2_blocks_bucket used_blocks[HASH_SIZE]; /* The hash table for both dirty and clean erase blocks */
+ struct jffs2_blocks_bucket free_blocks[HASH_SIZE]; /* The hash table for free erase blocks */
+
+ /* OS-private pointer for getting back to master superblock info */
+ void *os_priv;
+};
#endif /* _JFFS2_FB_SB */
diff --git a/linux-2.4.x/include/linux/mtd/bbm.h b/linux-2.4.x/include/linux/mtd/bbm.h
new file mode 100644
index 0000000..7a7fbe8
--- /dev/null
+++ b/linux-2.4.x/include/linux/mtd/bbm.h
@@ -0,0 +1,122 @@
+/*
+ * linux/include/linux/mtd/bbm.h
+ *
+ * NAND family Bad Block Management (BBM) header file
+ * - Bad Block Table (BBT) implementation
+ *
+ * Copyright (c) 2005 Samsung Electronics
+ * Kyungmin Park <kyungmin.park@samsung.com>
+ *
+ * Copyright (c) 2000-2005
+ * Thomas Gleixner <tglx@linuxtronix.de>
+ *
+ */
+#ifndef __LINUX_MTD_BBM_H
+#define __LINUX_MTD_BBM_H
+
+/* The maximum number of NAND chips in an array */
+#define NAND_MAX_CHIPS 8
+
+/**
+ * struct nand_bbt_descr - bad block table descriptor
+ * @param options options for this descriptor
+ * @param pages the page(s) where we find the bbt, used with
+ * option BBT_ABSPAGE when bbt is searched,
+ * then we store the found bbts pages here.
+ * Its an array and supports up to 8 chips now
+ * @param offs offset of the pattern in the oob area of the page
+ * @param veroffs offset of the bbt version counter in the oob are of the page
+ * @param version version read from the bbt page during scan
+ * @param len length of the pattern, if 0 no pattern check is performed
+ * @param maxblocks maximum number of blocks to search for a bbt. This number of
+ * blocks is reserved at the end of the device
+ * where the tables are written.
+ * @param reserved_block_code if non-0, this pattern denotes a reserved
+ * (rather than bad) block in the stored bbt
+ * @param pattern pattern to identify bad block table or factory marked
+ * good / bad blocks, can be NULL, if len = 0
+ *
+ * Descriptor for the bad block table marker and the descriptor for the
+ * pattern which identifies good and bad blocks. The assumption is made
+ * that the pattern and the version count are always located in the oob area
+ * of the first block.
+ */
+struct nand_bbt_descr {
+ int options;
+ int pages[NAND_MAX_CHIPS];
+ int offs;
+ int veroffs;
+ uint8_t version[NAND_MAX_CHIPS];
+ int len;
+ int maxblocks;
+ int reserved_block_code;
+ uint8_t *pattern;
+};
+
+/* Options for the bad block table descriptors */
+
+/* The number of bits used per block in the bbt on the device */
+#define NAND_BBT_NRBITS_MSK 0x0000000F
+#define NAND_BBT_1BIT 0x00000001
+#define NAND_BBT_2BIT 0x00000002
+#define NAND_BBT_4BIT 0x00000004
+#define NAND_BBT_8BIT 0x00000008
+/* The bad block table is in the last good block of the device */
+#define NAND_BBT_LASTBLOCK 0x00000010
+/* The bbt is at the given page, else we must scan for the bbt */
+#define NAND_BBT_ABSPAGE 0x00000020
+/* The bbt is at the given page, else we must scan for the bbt */
+#define NAND_BBT_SEARCH 0x00000040
+/* bbt is stored per chip on multichip devices */
+#define NAND_BBT_PERCHIP 0x00000080
+/* bbt has a version counter at offset veroffs */
+#define NAND_BBT_VERSION 0x00000100
+/* Create a bbt if none axists */
+#define NAND_BBT_CREATE 0x00000200
+/* Search good / bad pattern through all pages of a block */
+#define NAND_BBT_SCANALLPAGES 0x00000400
+/* Scan block empty during good / bad block scan */
+#define NAND_BBT_SCANEMPTY 0x00000800
+/* Write bbt if neccecary */
+#define NAND_BBT_WRITE 0x00001000
+/* Read and write back block contents when writing bbt */
+#define NAND_BBT_SAVECONTENT 0x00002000
+/* Search good / bad pattern on the first and the second page */
+#define NAND_BBT_SCAN2NDPAGE 0x00004000
+
+/* The maximum number of blocks to scan for a bbt */
+#define NAND_BBT_SCAN_MAXBLOCKS 4
+
+/*
+ * Constants for oob configuration
+ */
+#define ONENAND_BADBLOCK_POS 0
+
+/**
+ * struct bbt_info - [GENERIC] Bad Block Table data structure
+ * @param bbt_erase_shift [INTERN] number of address bits in a bbt entry
+ * @param badblockpos [INTERN] position of the bad block marker in the oob area
+ * @param bbt [INTERN] bad block table pointer
+ * @param badblock_pattern [REPLACEABLE] bad block scan pattern used for initial bad block scan
+ * @param priv [OPTIONAL] pointer to private bbm date
+ */
+struct bbm_info {
+ int bbt_erase_shift;
+ int badblockpos;
+ int options;
+
+ uint8_t *bbt;
+
+ int (*isbad_bbt)(struct mtd_info *mtd, loff_t ofs, int allowbbt);
+
+ /* TODO Add more NAND specific fileds */
+ struct nand_bbt_descr *badblock_pattern;
+
+ void *priv;
+};
+
+/* OneNAND BBT interface */
+extern int onenand_scan_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd);
+extern int onenand_default_bbt(struct mtd_info *mtd);
+
+#endif /* __LINUX_MTD_BBM_H */
diff --git a/linux-2.4.x/include/linux/mtd/blktrans.h b/linux-2.4.x/include/linux/mtd/blktrans.h
new file mode 100644
index 0000000..23c00e4
--- /dev/null
+++ b/linux-2.4.x/include/linux/mtd/blktrans.h
@@ -0,0 +1,72 @@
+/*
+ * $Id: blktrans.h,v 1.7 2006/03/29 08:26:28 dwmw2 Exp $
+ *
+ * (C) 2003 David Woodhouse <dwmw2@infradead.org>
+ *
+ * Interface to Linux block layer for MTD 'translation layers'.
+ *
+ */
+
+#ifndef __MTD_TRANS_H__
+#define __MTD_TRANS_H__
+
+#include <linux/mutex.h>
+
+struct hd_geometry;
+struct mtd_info;
+struct mtd_blktrans_ops;
+struct file;
+struct inode;
+
+struct mtd_blktrans_dev {
+ struct mtd_blktrans_ops *tr;
+ struct list_head list;
+ struct mtd_info *mtd;
+ struct mutex lock;
+ int devnum;
+ int blksize;
+ unsigned long size;
+ int readonly;
+ void *blkcore_priv; /* gendisk in 2.5, devfs_handle in 2.4 */
+};
+
+struct blkcore_priv; /* Differs for 2.4 and 2.5 kernels; private */
+
+struct mtd_blktrans_ops {
+ char *name;
+ int major;
+ int part_bits;
+
+ /* Access functions */
+ int (*readsect)(struct mtd_blktrans_dev *dev,
+ unsigned long block, char *buffer);
+ int (*writesect)(struct mtd_blktrans_dev *dev,
+ unsigned long block, char *buffer);
+
+ /* Block layer ioctls */
+ int (*getgeo)(struct mtd_blktrans_dev *dev, struct hd_geometry *geo);
+ int (*flush)(struct mtd_blktrans_dev *dev);
+
+ /* Called with mtd_table_mutex held; no race with add/remove */
+ int (*open)(struct mtd_blktrans_dev *dev);
+ int (*release)(struct mtd_blktrans_dev *dev);
+
+ /* Called on {de,}registration and on subsequent addition/removal
+ of devices, with mtd_table_mutex held. */
+ void (*add_mtd)(struct mtd_blktrans_ops *tr, struct mtd_info *mtd);
+ void (*remove_dev)(struct mtd_blktrans_dev *dev);
+
+ struct list_head devs;
+ struct list_head list;
+ struct module *owner;
+
+ struct mtd_blkcore_priv *blkcore_priv;
+};
+
+extern int register_mtd_blktrans(struct mtd_blktrans_ops *tr);
+extern int deregister_mtd_blktrans(struct mtd_blktrans_ops *tr);
+extern int add_mtd_blktrans_dev(struct mtd_blktrans_dev *dev);
+extern int del_mtd_blktrans_dev(struct mtd_blktrans_dev *dev);
+
+
+#endif /* __MTD_TRANS_H__ */
diff --git a/linux-2.4.x/include/linux/mtd/cfi.h b/linux-2.4.x/include/linux/mtd/cfi.h
index 7d6b316..abf45ef 100644
--- a/linux-2.4.x/include/linux/mtd/cfi.h
+++ b/linux-2.4.x/include/linux/mtd/cfi.h
@@ -1,7 +1,7 @@
-/* Common Flash Interface structures
+/* Common Flash Interface structures
* See http://support.intel.com/design/flash/technote/index.htm
- * $Id: cfi.h,v 1.25 2001/09/04 07:06:21 dwmw2 Exp $
+ * $Id: cfi.h,v 1.58 2005/11/29 20:01:30 gleixner Exp $
*/
#ifndef __MTD_CFI_H__
@@ -12,150 +12,83 @@
#include <linux/types.h>
#include <linux/interrupt.h>
#include <linux/mtd/flashchip.h>
+#include <linux/mtd/map.h>
#include <linux/mtd/cfi_endian.h>
-/*
- * You can optimize the code size and performance by defining only
- * the geometry(ies) available on your hardware.
- * CFIDEV_INTERLEAVE_n, where represents the interleave (number of chips to fill the bus width)
- * CFIDEV_BUSWIDTH_n, where n is the bus width in bytes (1, 2 or 4 bytes)
- *
- * By default, all (known) geometries are supported.
- */
-
-#ifndef CONFIG_MTD_CFI_GEOMETRY
-
-#define CFIDEV_INTERLEAVE_1 (1)
-#define CFIDEV_INTERLEAVE_2 (2)
-#define CFIDEV_INTERLEAVE_4 (4)
-
-#define CFIDEV_BUSWIDTH_1 (1)
-#define CFIDEV_BUSWIDTH_2 (2)
-#define CFIDEV_BUSWIDTH_4 (4)
-
-#else
-
#ifdef CONFIG_MTD_CFI_I1
-#define CFIDEV_INTERLEAVE_1 (1)
-#endif
-#ifdef CONFIG_MTD_CFI_I2
-#define CFIDEV_INTERLEAVE_2 (2)
-#endif
-#ifdef CONFIG_MTD_CFI_I4
-#define CFIDEV_INTERLEAVE_4 (4)
-#endif
-
-#ifdef CONFIG_MTD_CFI_B1
-#define CFIDEV_BUSWIDTH_1 (1)
-#endif
-#ifdef CONFIG_MTD_CFI_B2
-#define CFIDEV_BUSWIDTH_2 (2)
-#endif
-#ifdef CONFIG_MTD_CFI_B4
-#define CFIDEV_BUSWIDTH_4 (4)
-#endif
-
+#define cfi_interleave(cfi) 1
+#define cfi_interleave_is_1(cfi) (cfi_interleave(cfi) == 1)
+#else
+#define cfi_interleave_is_1(cfi) (0)
#endif
-/*
- * The following macros are used to select the code to execute:
- * cfi_buswidth_is_*()
- * cfi_interleave_is_*()
- * [where * is either 1, 2 or 4]
- * Those macros should be used with 'if' statements. If only one of few
- * geometry arrangements are selected, they expand to constants thus allowing
- * the compiler (most of them being 0) to optimize away all the unneeded code,
- * while still validating the syntax (which is not possible with embedded
- * #if ... #endif constructs).
- */
-
-#ifdef CFIDEV_INTERLEAVE_1
-# ifdef CFIDEV_INTERLEAVE
-# undef CFIDEV_INTERLEAVE
-# define CFIDEV_INTERLEAVE (cfi->interleave)
+#ifdef CONFIG_MTD_CFI_I2
+# ifdef cfi_interleave
+# undef cfi_interleave
+# define cfi_interleave(cfi) ((cfi)->interleave)
# else
-# define CFIDEV_INTERLEAVE CFIDEV_INTERLEAVE_1
+# define cfi_interleave(cfi) 2
# endif
-# define cfi_interleave_is_1() (CFIDEV_INTERLEAVE == CFIDEV_INTERLEAVE_1)
+#define cfi_interleave_is_2(cfi) (cfi_interleave(cfi) == 2)
#else
-# define cfi_interleave_is_1() (0)
+#define cfi_interleave_is_2(cfi) (0)
#endif
-#ifdef CFIDEV_INTERLEAVE_2
-# ifdef CFIDEV_INTERLEAVE
-# undef CFIDEV_INTERLEAVE
-# define CFIDEV_INTERLEAVE (cfi->interleave)
+#ifdef CONFIG_MTD_CFI_I4
+# ifdef cfi_interleave
+# undef cfi_interleave
+# define cfi_interleave(cfi) ((cfi)->interleave)
# else
-# define CFIDEV_INTERLEAVE CFIDEV_INTERLEAVE_2
+# define cfi_interleave(cfi) 4
# endif
-# define cfi_interleave_is_2() (CFIDEV_INTERLEAVE == CFIDEV_INTERLEAVE_2)
+#define cfi_interleave_is_4(cfi) (cfi_interleave(cfi) == 4)
#else
-# define cfi_interleave_is_2() (0)
+#define cfi_interleave_is_4(cfi) (0)
#endif
-#ifdef CFIDEV_INTERLEAVE_4
-# ifdef CFIDEV_INTERLEAVE
-# undef CFIDEV_INTERLEAVE
-# define CFIDEV_INTERLEAVE (cfi->interleave)
+#ifdef CONFIG_MTD_CFI_I8
+# ifdef cfi_interleave
+# undef cfi_interleave
+# define cfi_interleave(cfi) ((cfi)->interleave)
# else
-# define CFIDEV_INTERLEAVE CFIDEV_INTERLEAVE_4
+# define cfi_interleave(cfi) 8
# endif
-# define cfi_interleave_is_4() (CFIDEV_INTERLEAVE == CFIDEV_INTERLEAVE_4)
+#define cfi_interleave_is_8(cfi) (cfi_interleave(cfi) == 8)
#else
-# define cfi_interleave_is_4() (0)
+#define cfi_interleave_is_8(cfi) (0)
#endif
-#ifndef CFIDEV_INTERLEAVE
-#error You must define at least one interleave to support!
+static inline int cfi_interleave_supported(int i)
+{
+ switch (i) {
+#ifdef CONFIG_MTD_CFI_I1
+ case 1:
#endif
-
-#ifdef CFIDEV_BUSWIDTH_1
-# ifdef CFIDEV_BUSWIDTH
-# undef CFIDEV_BUSWIDTH
-# define CFIDEV_BUSWIDTH (map->buswidth)
-# else
-# define CFIDEV_BUSWIDTH CFIDEV_BUSWIDTH_1
-# endif
-# define cfi_buswidth_is_1() (CFIDEV_BUSWIDTH == CFIDEV_BUSWIDTH_1)
-#else
-# define cfi_buswidth_is_1() (0)
+#ifdef CONFIG_MTD_CFI_I2
+ case 2:
#endif
-
-#ifdef CFIDEV_BUSWIDTH_2
-# ifdef CFIDEV_BUSWIDTH
-# undef CFIDEV_BUSWIDTH
-# define CFIDEV_BUSWIDTH (map->buswidth)
-# else
-# define CFIDEV_BUSWIDTH CFIDEV_BUSWIDTH_2
-# endif
-# define cfi_buswidth_is_2() (CFIDEV_BUSWIDTH == CFIDEV_BUSWIDTH_2)
-#else
-# define cfi_buswidth_is_2() (0)
+#ifdef CONFIG_MTD_CFI_I4
+ case 4:
#endif
-
-#ifdef CFIDEV_BUSWIDTH_4
-# ifdef CFIDEV_BUSWIDTH
-# undef CFIDEV_BUSWIDTH
-# define CFIDEV_BUSWIDTH (map->buswidth)
-# else
-# define CFIDEV_BUSWIDTH CFIDEV_BUSWIDTH_4
-# endif
-# define cfi_buswidth_is_4() (CFIDEV_BUSWIDTH == CFIDEV_BUSWIDTH_4)
-#else
-# define cfi_buswidth_is_4() (0)
+#ifdef CONFIG_MTD_CFI_I8
+ case 8:
#endif
+ return 1;
+
+ default:
+ return 0;
+ }
+}
-#ifndef CFIDEV_BUSWIDTH
-#error You must define at least one bus width to support!
-#endif
-/* NB: these values must represents the number of bytes needed to meet the
- * device type (x8, x16, x32). Eg. a 32 bit device is 4 x 8 bytes.
+/* NB: these values must represents the number of bytes needed to meet the
+ * device type (x8, x16, x32). Eg. a 32 bit device is 4 x 8 bytes.
* These numbers are used in calculations.
*/
#define CFI_DEVICETYPE_X8 (8 / 8)
#define CFI_DEVICETYPE_X16 (16 / 8)
#define CFI_DEVICETYPE_X32 (32 / 8)
+#define CFI_DEVICETYPE_X64 (64 / 8)
/* NB: We keep these structures in memory in HOST byteorder, except
* where individually noted.
@@ -163,84 +96,147 @@
/* Basic Query Structure */
struct cfi_ident {
- __u8 qry[3];
- __u16 P_ID;
- __u16 P_ADR;
- __u16 A_ID;
- __u16 A_ADR;
- __u8 VccMin;
- __u8 VccMax;
- __u8 VppMin;
- __u8 VppMax;
- __u8 WordWriteTimeoutTyp;
- __u8 BufWriteTimeoutTyp;
- __u8 BlockEraseTimeoutTyp;
- __u8 ChipEraseTimeoutTyp;
- __u8 WordWriteTimeoutMax;
- __u8 BufWriteTimeoutMax;
- __u8 BlockEraseTimeoutMax;
- __u8 ChipEraseTimeoutMax;
- __u8 DevSize;
- __u16 InterfaceDesc;
- __u16 MaxBufWriteSize;
- __u8 NumEraseRegions;
- __u32 EraseRegionInfo[0]; /* Not host ordered */
+ uint8_t qry[3];
+ uint16_t P_ID;
+ uint16_t P_ADR;
+ uint16_t A_ID;
+ uint16_t A_ADR;
+ uint8_t VccMin;
+ uint8_t VccMax;
+ uint8_t VppMin;
+ uint8_t VppMax;
+ uint8_t WordWriteTimeoutTyp;
+ uint8_t BufWriteTimeoutTyp;
+ uint8_t BlockEraseTimeoutTyp;
+ uint8_t ChipEraseTimeoutTyp;
+ uint8_t WordWriteTimeoutMax;
+ uint8_t BufWriteTimeoutMax;
+ uint8_t BlockEraseTimeoutMax;
+ uint8_t ChipEraseTimeoutMax;
+ uint8_t DevSize;
+ uint16_t InterfaceDesc;
+ uint16_t MaxBufWriteSize;
+ uint8_t NumEraseRegions;
+ uint32_t EraseRegionInfo[0]; /* Not host ordered */
} __attribute__((packed));
/* Extended Query Structure for both PRI and ALT */
struct cfi_extquery {
- __u8 pri[3];
- __u8 MajorVersion;
- __u8 MinorVersion;
+ uint8_t pri[3];
+ uint8_t MajorVersion;
+ uint8_t MinorVersion;
} __attribute__((packed));
/* Vendor-Specific PRI for Intel/Sharp Extended Command Set (0x0001) */
struct cfi_pri_intelext {
- __u8 pri[3];
- __u8 MajorVersion;
- __u8 MinorVersion;
- __u32 FeatureSupport;
- __u8 SuspendCmdSupport;
- __u16 BlkStatusRegMask;
- __u8 VccOptimal;
- __u8 VppOptimal;
+ uint8_t pri[3];
+ uint8_t MajorVersion;
+ uint8_t MinorVersion;
+ uint32_t FeatureSupport; /* if bit 31 is set then an additional uint32_t feature
+ block follows - FIXME - not currently supported */
+ uint8_t SuspendCmdSupport;
+ uint16_t BlkStatusRegMask;
+ uint8_t VccOptimal;
+ uint8_t VppOptimal;
+ uint8_t NumProtectionFields;
+ uint16_t ProtRegAddr;
+ uint8_t FactProtRegSize;
+ uint8_t UserProtRegSize;
+ uint8_t extra[0];
+} __attribute__((packed));
+
+struct cfi_intelext_otpinfo {
+ uint32_t ProtRegAddr;
+ uint16_t FactGroups;
+ uint8_t FactProtRegSize;
+ uint16_t UserGroups;
+ uint8_t UserProtRegSize;
+} __attribute__((packed));
+
+struct cfi_intelext_blockinfo {
+ uint16_t NumIdentBlocks;
+ uint16_t BlockSize;
+ uint16_t MinBlockEraseCycles;
+ uint8_t BitsPerCell;
+ uint8_t BlockCap;
+} __attribute__((packed));
+
+struct cfi_intelext_regioninfo {
+ uint16_t NumIdentPartitions;
+ uint8_t NumOpAllowed;
+ uint8_t NumOpAllowedSimProgMode;
+ uint8_t NumOpAllowedSimEraMode;
+ uint8_t NumBlockTypes;
+ struct cfi_intelext_blockinfo BlockTypes[1];
+} __attribute__((packed));
+
+struct cfi_intelext_programming_regioninfo {
+ uint8_t ProgRegShift;
+ uint8_t Reserved1;
+ uint8_t ControlValid;
+ uint8_t Reserved2;
+ uint8_t ControlInvalid;
+ uint8_t Reserved3;
+} __attribute__((packed));
+
+/* Vendor-Specific PRI for AMD/Fujitsu Extended Command Set (0x0002) */
+
+struct cfi_pri_amdstd {
+ uint8_t pri[3];
+ uint8_t MajorVersion;
+ uint8_t MinorVersion;
+ uint8_t SiliconRevision; /* bits 1-0: Address Sensitive Unlock */
+ uint8_t EraseSuspend;
+ uint8_t BlkProt;
+ uint8_t TmpBlkUnprotect;
+ uint8_t BlkProtUnprot;
+ uint8_t SimultaneousOps;
+ uint8_t BurstMode;
+ uint8_t PageMode;
+ uint8_t VppMin;
+ uint8_t VppMax;
+ uint8_t TopBottom;
} __attribute__((packed));
struct cfi_pri_query {
- __u8 NumFields;
- __u32 ProtField[1]; /* Not host ordered */
+ uint8_t NumFields;
+ uint32_t ProtField[1]; /* Not host ordered */
} __attribute__((packed));
struct cfi_bri_query {
- __u8 PageModeReadCap;
- __u8 NumFields;
- __u32 ConfField[1]; /* Not host ordered */
+ uint8_t PageModeReadCap;
+ uint8_t NumFields;
+ uint32_t ConfField[1]; /* Not host ordered */
} __attribute__((packed));
-#define P_ID_NONE 0
-#define P_ID_INTEL_EXT 1
-#define P_ID_AMD_STD 2
-#define P_ID_INTEL_STD 3
-#define P_ID_AMD_EXT 4
-#define P_ID_MITSUBISHI_STD 256
-#define P_ID_MITSUBISHI_EXT 257
-#define P_ID_RESERVED 65535
+#define P_ID_NONE 0x0000
+#define P_ID_INTEL_EXT 0x0001
+#define P_ID_AMD_STD 0x0002
+#define P_ID_INTEL_STD 0x0003
+#define P_ID_AMD_EXT 0x0004
+#define P_ID_WINBOND 0x0006
+#define P_ID_ST_ADV 0x0020
+#define P_ID_MITSUBISHI_STD 0x0100
+#define P_ID_MITSUBISHI_EXT 0x0101
+#define P_ID_SST_PAGE 0x0102
+#define P_ID_INTEL_PERFORMANCE 0x0200
+#define P_ID_INTEL_DATA 0x0210
+#define P_ID_RESERVED 0xffff
-#define CFI_MODE_CFI 0
-#define CFI_MODE_JEDEC 1
+#define CFI_MODE_CFI 1
+#define CFI_MODE_JEDEC 0
struct cfi_private {
- __u16 cmdset;
+ uint16_t cmdset;
void *cmdset_priv;
int interleave;
int device_type;
int cfi_mode; /* Are we a JEDEC device pretending to be CFI? */
int addr_unlock1;
int addr_unlock2;
- int fast_prog;
struct mtd_info *(*cmdset_setup)(struct map_info *);
struct cfi_ident *cfiq; /* For now only one. We insist that all devs
must be of the same type. */
@@ -251,83 +247,145 @@ struct cfi_private {
struct flchip chips[0]; /* per-chip data structure for each chip */
};
-#define MAX_CFI_CHIPS 8 /* Entirely arbitrary to avoid realloc() */
-
/*
* Returns the command address according to the given geometry.
*/
-static inline __u32 cfi_build_cmd_addr(__u32 cmd_ofs, int interleave, int type)
+static inline uint32_t cfi_build_cmd_addr(uint32_t cmd_ofs, int interleave, int type)
{
return (cmd_ofs * type) * interleave;
}
/*
- * Transforms the CFI command for the given geometry (bus width & interleave.
+ * Transforms the CFI command for the given geometry (bus width & interleave).
+ * It looks too long to be inline, but in the common case it should almost all
+ * get optimised away.
*/
-static inline __u32 cfi_build_cmd(u_char cmd, struct map_info *map, struct cfi_private *cfi)
+static inline map_word cfi_build_cmd(u_long cmd, struct map_info *map, struct cfi_private *cfi)
{
- __u32 val = 0;
-
- if (cfi_buswidth_is_1()) {
- /* 1 x8 device */
- val = cmd;
- } else if (cfi_buswidth_is_2()) {
- if (cfi_interleave_is_1()) {
- /* 1 x16 device in x16 mode */
- val = cpu_to_cfi16(cmd);
- } else if (cfi_interleave_is_2()) {
- /* 2 (x8, x16 or x32) devices in x8 mode */
- val = cpu_to_cfi16((cmd << 8) | cmd);
- }
- } else if (cfi_buswidth_is_4()) {
- if (cfi_interleave_is_1()) {
- /* 1 x32 device in x32 mode */
- val = cpu_to_cfi32(cmd);
- } else if (cfi_interleave_is_2()) {
- /* 2 x16 device in x16 mode */
- val = cpu_to_cfi32((cmd << 16) | cmd);
- } else if (cfi_interleave_is_4()) {
- /* 4 (x8, x16 or x32) devices in x8 mode */
- val = (cmd << 16) | cmd;
- val = cpu_to_cfi32((val << 8) | val);
- }
+ map_word val = { {0} };
+ int wordwidth, words_per_bus, chip_mode, chips_per_word;
+ unsigned long onecmd;
+ int i;
+
+ /* We do it this way to give the compiler a fighting chance
+ of optimising away all the crap for 'bankwidth' larger than
+ an unsigned long, in the common case where that support is
+ disabled */
+ if (map_bankwidth_is_large(map)) {
+ wordwidth = sizeof(unsigned long);
+ words_per_bus = (map_bankwidth(map)) / wordwidth; // i.e. normally 1
+ } else {
+ wordwidth = map_bankwidth(map);
+ words_per_bus = 1;
+ }
+
+ chip_mode = map_bankwidth(map) / cfi_interleave(cfi);
+ chips_per_word = wordwidth * cfi_interleave(cfi) / map_bankwidth(map);
+
+ /* First, determine what the bit-pattern should be for a single
+ device, according to chip mode and endianness... */
+ switch (chip_mode) {
+ default: BUG();
+ case 1:
+ onecmd = cmd;
+ break;
+ case 2:
+ onecmd = cpu_to_cfi16(cmd);
+ break;
+ case 4:
+ onecmd = cpu_to_cfi32(cmd);
+ break;
+ }
+
+ /* Now replicate it across the size of an unsigned long, or
+ just to the bus width as appropriate */
+ switch (chips_per_word) {
+ default: BUG();
+#if BITS_PER_LONG >= 64
+ case 8:
+ onecmd |= (onecmd << (chip_mode * 32));
+#endif
+ case 4:
+ onecmd |= (onecmd << (chip_mode * 16));
+ case 2:
+ onecmd |= (onecmd << (chip_mode * 8));
+ case 1:
+ ;
}
+
+ /* And finally, for the multi-word case, replicate it
+ in all words in the structure */
+ for (i=0; i < words_per_bus; i++) {
+ val.x[i] = onecmd;
+ }
+
return val;
}
#define CMD(x) cfi_build_cmd((x), map, cfi)
-/*
- * Read a value according to the bus width.
- */
-static inline __u32 cfi_read(struct map_info *map, __u32 addr)
+static inline unsigned long cfi_merge_status(map_word val, struct map_info *map,
+ struct cfi_private *cfi)
{
- if (cfi_buswidth_is_1()) {
- return map->read8(map, addr);
- } else if (cfi_buswidth_is_2()) {
- return map->read16(map, addr);
- } else if (cfi_buswidth_is_4()) {
- return map->read32(map, addr);
+ int wordwidth, words_per_bus, chip_mode, chips_per_word;
+ unsigned long onestat, res = 0;
+ int i;
+
+ /* We do it this way to give the compiler a fighting chance
+ of optimising away all the crap for 'bankwidth' larger than
+ an unsigned long, in the common case where that support is
+ disabled */
+ if (map_bankwidth_is_large(map)) {
+ wordwidth = sizeof(unsigned long);
+ words_per_bus = (map_bankwidth(map)) / wordwidth; // i.e. normally 1
} else {
- return 0;
+ wordwidth = map_bankwidth(map);
+ words_per_bus = 1;
}
-}
-/*
- * Write a value according to the bus width.
- */
+ chip_mode = map_bankwidth(map) / cfi_interleave(cfi);
+ chips_per_word = wordwidth * cfi_interleave(cfi) / map_bankwidth(map);
-static inline void cfi_write(struct map_info *map, __u32 val, __u32 addr)
-{
- if (cfi_buswidth_is_1()) {
- map->write8(map, val, addr);
- } else if (cfi_buswidth_is_2()) {
- map->write16(map, val, addr);
- } else if (cfi_buswidth_is_4()) {
- map->write32(map, val, addr);
+ onestat = val.x[0];
+ /* Or all status words together */
+ for (i=1; i < words_per_bus; i++) {
+ onestat |= val.x[i];
+ }
+
+ res = onestat;
+ switch(chips_per_word) {
+ default: BUG();
+#if BITS_PER_LONG >= 64
+ case 8:
+ res |= (onestat >> (chip_mode * 32));
+#endif
+ case 4:
+ res |= (onestat >> (chip_mode * 16));
+ case 2:
+ res |= (onestat >> (chip_mode * 8));
+ case 1:
+ ;
+ }
+
+ /* Last, determine what the bit-pattern should be for a single
+ device, according to chip mode and endianness... */
+ switch (chip_mode) {
+ case 1:
+ break;
+ case 2:
+ res = cfi16_to_cpu(res);
+ break;
+ case 4:
+ res = cfi32_to_cpu(res);
+ break;
+ default: BUG();
}
+ return res;
}
+#define MERGESTATUS(x) cfi_merge_status((x), map, cfi)
+
+
/*
* Sends a CFI command to a bank of flash for the given geometry.
*
@@ -335,59 +393,87 @@ static inline void cfi_write(struct map_info *map, __u32 val, __u32 addr)
* If prev_val is non-null, it will be set to the value at the command address,
* before the command was written.
*/
-static inline __u32 cfi_send_gen_cmd(u_char cmd, __u32 cmd_addr, __u32 base,
+static inline uint32_t cfi_send_gen_cmd(u_char cmd, uint32_t cmd_addr, uint32_t base,
struct map_info *map, struct cfi_private *cfi,
- int type, __u32 *prev_val)
+ int type, map_word *prev_val)
{
- __u32 val;
- __u32 addr = base + cfi_build_cmd_addr(cmd_addr, CFIDEV_INTERLEAVE, type);
+ map_word val;
+ uint32_t addr = base + cfi_build_cmd_addr(cmd_addr, cfi_interleave(cfi), type);
val = cfi_build_cmd(cmd, map, cfi);
if (prev_val)
- *prev_val = cfi_read(map, addr);
+ *prev_val = map_read(map, addr);
- cfi_write(map, val, addr);
+ map_write(map, val, addr);
return addr - base;
}
-static inline __u8 cfi_read_query(struct map_info *map, __u32 addr)
+static inline uint8_t cfi_read_query(struct map_info *map, uint32_t addr)
{
- if (cfi_buswidth_is_1()) {
- return map->read8(map, addr);
- } else if (cfi_buswidth_is_2()) {
- return cfi16_to_cpu(map->read16(map, addr));
- } else if (cfi_buswidth_is_4()) {
- return cfi32_to_cpu(map->read32(map, addr));
+ map_word val = map_read(map, addr);
+
+ if (map_bankwidth_is_1(map)) {
+ return val.x[0];
+ } else if (map_bankwidth_is_2(map)) {
+ return cfi16_to_cpu(val.x[0]);
} else {
- return 0;
+ /* No point in a 64-bit byteswap since that would just be
+ swapping the responses from different chips, and we are
+ only interested in one chip (a representative sample) */
+ return cfi32_to_cpu(val.x[0]);
}
}
-static inline void cfi_udelay(int us)
+static inline uint16_t cfi_read_query16(struct map_info *map, uint32_t addr)
{
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,2,0)
- if (current->need_resched) {
- unsigned long t = us * HZ / 1000000;
- if (t < 1)
- t = 1;
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(t);
+ map_word val = map_read(map, addr);
+
+ if (map_bankwidth_is_1(map)) {
+ return val.x[0] & 0xff;
+ } else if (map_bankwidth_is_2(map)) {
+ return cfi16_to_cpu(val.x[0]);
+ } else {
+ /* No point in a 64-bit byteswap since that would just be
+ swapping the responses from different chips, and we are
+ only interested in one chip (a representative sample) */
+ return cfi32_to_cpu(val.x[0]);
}
- else
-#endif
- udelay(us);
-}
-static inline void cfi_spin_lock(spinlock_t *mutex)
-{
- spin_lock_bh(mutex);
}
-static inline void cfi_spin_unlock(spinlock_t *mutex)
+static inline void cfi_udelay(int us)
{
- spin_unlock_bh(mutex);
+ if (us >= 1000) {
+ msleep((us+999)/1000);
+ } else {
+ udelay(us);
+ cond_resched();
+ }
}
+struct cfi_extquery *cfi_read_pri(struct map_info *map, uint16_t adr, uint16_t size,
+ const char* name);
+struct cfi_fixup {
+ uint16_t mfr;
+ uint16_t id;
+ void (*fixup)(struct mtd_info *mtd, void* param);
+ void* param;
+};
+
+#define CFI_MFR_ANY 0xffff
+#define CFI_ID_ANY 0xffff
+
+#define CFI_MFR_AMD 0x0001
+#define CFI_MFR_ST 0x0020 /* STMicroelectronics */
+
+void cfi_fixup(struct mtd_info *mtd, struct cfi_fixup* fixups);
+
+typedef int (*varsize_frob_t)(struct map_info *map, struct flchip *chip,
+ unsigned long adr, int len, void *thunk);
+
+int cfi_varsize_frob(struct mtd_info *mtd, varsize_frob_t frob,
+ loff_t ofs, size_t len, void *thunk);
+
#endif /* __MTD_CFI_H__ */
diff --git a/linux-2.4.x/include/linux/mtd/cfi_endian.h b/linux-2.4.x/include/linux/mtd/cfi_endian.h
index 9f3b041..25724f7 100644
--- a/linux-2.4.x/include/linux/mtd/cfi_endian.h
+++ b/linux-2.4.x/include/linux/mtd/cfi_endian.h
@@ -1,5 +1,5 @@
/*
- * $Id: cfi_endian.h,v 1.10 2001/06/18 11:00:46 abz Exp $
+ * $Id: cfi_endian.h,v 1.11 2002/01/30 23:20:48 awozniak Exp $
*
*/
@@ -30,22 +30,28 @@
#define cfi8_to_cpu(x) (x)
#define cpu_to_cfi16(x) cpu_to_le16(x)
#define cpu_to_cfi32(x) cpu_to_le32(x)
+#define cpu_to_cfi64(x) cpu_to_le64(x)
#define cfi16_to_cpu(x) le16_to_cpu(x)
#define cfi32_to_cpu(x) le32_to_cpu(x)
+#define cfi64_to_cpu(x) le64_to_cpu(x)
#elif defined (CFI_BIG_ENDIAN)
#define cpu_to_cfi8(x) (x)
#define cfi8_to_cpu(x) (x)
#define cpu_to_cfi16(x) cpu_to_be16(x)
#define cpu_to_cfi32(x) cpu_to_be32(x)
+#define cpu_to_cfi64(x) cpu_to_be64(x)
#define cfi16_to_cpu(x) be16_to_cpu(x)
#define cfi32_to_cpu(x) be32_to_cpu(x)
+#define cfi64_to_cpu(x) be64_to_cpu(x)
#elif defined (CFI_HOST_ENDIAN)
#define cpu_to_cfi8(x) (x)
#define cfi8_to_cpu(x) (x)
#define cpu_to_cfi16(x) (x)
#define cpu_to_cfi32(x) (x)
+#define cpu_to_cfi64(x) (x)
#define cfi16_to_cpu(x) (x)
#define cfi32_to_cpu(x) (x)
+#define cfi64_to_cpu(x) (x)
#else
#error No CFI endianness defined
#endif
diff --git a/linux-2.4.x/include/linux/mtd/compatmac.h b/linux-2.4.x/include/linux/mtd/compatmac.h
index 9c9a227..5433d1f 100644
--- a/linux-2.4.x/include/linux/mtd/compatmac.h
+++ b/linux-2.4.x/include/linux/mtd/compatmac.h
@@ -1,202 +1,249 @@
-
/*
- * mtd/include/compatmac.h
- *
- * $Id: compatmac.h,v 1.4 2000/07/03 10:01:38 dwmw2 Exp $
+ * $Id: compatmac.h,v 1.80 2005/11/07 11:14:54 gleixner Exp $
*
* Extensions and omissions from the normal 'linux/compatmac.h'
- * files. hopefully this will end up empty as the 'real' one
+ * files. hopefully this will end up empty as the 'real' one
* becomes fully-featured.
*/
-
-/* First, include the parts which the kernel is good enough to provide
- * to us
- */
-
#ifndef __LINUX_MTD_COMPATMAC_H__
#define __LINUX_MTD_COMPATMAC_H__
-#include <linux/compatmac.h>
-#include <linux/types.h> /* used later in this header */
-#include <linux/module.h>
-#ifndef LINUX_VERSION_CODE
#include <linux/version.h>
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,10)
+#error "This kernel is too old: not supported by this file"
#endif
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,1,0)
-#include <linux/vmalloc.h>
+ /* O(1) scheduler stuff. */
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,5) && !defined(__rh_config_h__)
+#include <linux/sched.h>
+static inline void __recalc_sigpending(void)
+{
+ recalc_sigpending(current);
+}
+#undef recalc_sigpending
+#define recalc_sigpending() __recalc_sigpending ()
+
+#define set_user_nice(tsk, n) do { (tsk)->nice = n; } while(0)
#endif
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,0,0)
-# error "This kernel is too old: not supported by this file"
+
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,20)
+
+#ifndef yield
+#define yield() do { set_current_state(TASK_RUNNING); schedule(); } while(0)
#endif
-/* Modularization issues */
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,1,18)
-# define __USE_OLD_SYMTAB__
-# define EXPORT_NO_SYMBOLS register_symtab(NULL);
-# define REGISTER_SYMTAB(tab) register_symtab(tab)
-#else
-# define REGISTER_SYMTAB(tab) /* nothing */
+#ifndef minor
+#define major(d) (MAJOR(to_kdev_t(d)))
+#define minor(d) (MINOR(to_kdev_t(d)))
#endif
-#ifdef __USE_OLD_SYMTAB__
-# define __MODULE_STRING(s) /* nothing */
-# define MODULE_PARM(v,t) /* nothing */
-# define MODULE_PARM_DESC(v,t) /* nothing */
-# define MODULE_AUTHOR(n) /* nothing */
-# define MODULE_DESCRIPTION(d) /* nothing */
-# define MODULE_SUPPORTED_DEVICE(n) /* nothing */
+#ifndef mk_kdev
+#define mk_kdev(ma,mi) MKDEV(ma,mi)
+#define kdev_t_to_nr(x) (x)
#endif
-/*
- * "select" changed in 2.1.23. The implementation is twin, but this
- * header is new
- */
-#if LINUX_VERSION_CODE > KERNEL_VERSION(2,1,22)
-# include <linux/poll.h>
-#else
-# define __USE_OLD_SELECT__
+#define need_resched() (current->need_resched)
+#define cond_resched() do { if need_resched() { yield(); } } while(0)
+
+#endif /* < 2.4.20 */
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,73)
+#define iminor(i) minor((i)->i_rdev)
+#define imajor(i) major((i)->i_rdev)
+#define old_encode_dev(d) ( (major(d)<<8) | minor(d) )
+#define old_decode_dev(rdev) (kdev_t_to_nr(mk_kdev((rdev)>>8, (rdev)&0xff)))
+#define old_valid_dev(d) (1)
#endif
-/* Other change in the fops are solved using pseudo-types */
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,1,0)
-# define lseek_t long long
-# define lseek_off_t long long
-#else
-# define lseek_t int
-# define lseek_off_t off_t
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,61)
+
+#include <linux/sched.h>
+
+#ifdef __rh_config_h__
+#define sigmask_lock sighand->siglock
+#define sig sighand
#endif
-/* changed the prototype of read/write */
+static inline void __daemonize_modvers(void)
+{
+ daemonize();
+
+ spin_lock_irq(&current->sigmask_lock);
+ sigfillset(&current->blocked);
+ recalc_sigpending();
+ spin_unlock_irq(&current->sigmask_lock);
+}
+#undef daemonize
+#define daemonize(fmt, ...) do { \
+ snprintf(current->comm, sizeof(current->comm), fmt ,##__VA_ARGS__); \
+ __daemonize_modvers(); \
+ } while(0)
+
+static inline int dequeue_signal_lock(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
+{
+ unsigned long flags;
+ unsigned long ret;
+
+ spin_lock_irqsave(&current->sigmask_lock, flags);
+ ret = dequeue_signal(mask, info);
+ spin_unlock_irqrestore(&current->sigmask_lock, flags);
+
+ return ret;
+}
+
+static inline int allow_signal(int sig)
+{
+ if (sig < 1 || sig > _NSIG)
+ return -EINVAL;
+
+ spin_lock_irq(&current->sigmask_lock);
+ sigdelset(&current->blocked, sig);
+ recalc_sigpending();
+ /* Make sure the kernel neither eats it now converts to SIGKILL */
+ current->sig->action[sig-1].sa.sa_handler = (void *)2;
+ spin_unlock_irq(&current->sigmask_lock);
+ return 0;
+}
+static inline int disallow_signal(int sig)
+{
+ if (sig < 1 || sig > _NSIG)
+ return -EINVAL;
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,1,0) || defined(__alpha__)
-# define count_t unsigned long
-# define read_write_t long
-#else
-# define count_t int
-# define read_write_t int
+ spin_lock_irq(&current->sigmask_lock);
+ sigaddset(&current->blocked, sig);
+ recalc_sigpending();
+
+ current->sig->action[sig-1].sa.sa_handler = SIG_DFL;
+ spin_unlock_irq(&current->sigmask_lock);
+ return 0;
+}
+
+#define PF_FREEZE 0
+#define refrigerator(x) do { ; } while(0)
#endif
+ /* Module bits */
+
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,1,31)
-# define release_t void
-# define release_return(x) return
-#else
-# define release_t int
-# define release_return(x) return (x)
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,60)
+#define try_module_get(m) try_inc_mod_count(m)
+#define __module_get(m) do { if (!try_inc_mod_count(m)) BUG(); } while(0)
+#define module_put(m) do { if (m) __MOD_DEC_USE_COUNT((struct module *)(m)); } while(0)
+#define set_module_owner(x) do { x->owner = THIS_MODULE; } while(0)
#endif
-#if LINUX_VERSION_CODE < 0x20300
-#define __exit
+
+ /* Random filesystem stuff, only for JFFS2 really */
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,5)
+#define parent_ino(d) ((d)->d_parent->d_inode->i_ino)
#endif
-#if LINUX_VERSION_CODE < 0x20200
-#define __init
-#else
-#include <linux/init.h>
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,12)
+#define PageUptodate(x) Page_Uptodate(x)
#endif
-#if LINUX_VERSION_CODE < 0x20300
-#define init_MUTEX(x) do {*(x) = MUTEX;} while (0)
-#define RQFUNC_ARG void
-#define blkdev_dequeue_request(req) do {CURRENT = req->next;} while (0)
-#else
-#define RQFUNC_ARG request_queue_t *q
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,48)
+#define get_seconds() CURRENT_TIME
#endif
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,2,0)
-#define __MOD_INC_USE_COUNT(mod) \
- (atomic_inc(&(mod)->uc.usecount), (mod)->flags |= MOD_VISITED|MOD_USED_ONCE)
-#define __MOD_DEC_USE_COUNT(mod) \
- (atomic_dec(&(mod)->uc.usecount), (mod)->flags |= MOD_VISITED)
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,53)
+#define generic_file_readonly_mmap generic_file_mmap
#endif
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,70)
+
+#include <linux/kmod.h>
+#include <linux/string.h>
+
+static inline char *strlcpy(char *dest, const char *src, int len)
+{
+ dest[len-1] = 0;
+ return strncpy(dest, src, len-1);
+}
+
+static inline int do_old_request_module(const char *mod)
+{
+ return request_module(mod);
+}
+#undef request_module
+#define request_module(fmt, ...) \
+ ({ char modname[32]; snprintf(modname, 31, fmt ,##__VA_ARGS__); do_old_request_module(modname); })
+
+#endif /* 2.5.70 */
+#ifndef container_of
+#define container_of(ptr, type, member) ({ \
+ const typeof( ((type *)0)->member ) *__mptr = (ptr); \
+ (type *)( (char *)__mptr - offsetof(type,member) );})
+#endif
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,3,0)
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,7)
+#define kvec iovec
+#define __user
+#endif
-#define DECLARE_WAIT_QUEUE_HEAD(x) struct wait_queue *x = NULL
-#define init_waitqueue_head init_waitqueue
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,28)
+#define msleep(x) \
+ set_current_state(TASK_UNINTERRUPTIBLE); \
+ schedule_timeout((x)*(HZ/1000));
+#endif
-static inline int try_inc_mod_count(struct module *mod)
+#ifndef __iomem
+#define __iomem
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11)
+#define CURRENT_TIME_SEC ((struct timespec) { xtime.tv_sec, 0 })
+#include <linux/sched.h>
+#include <linux/suspend.h>
+static inline int try_to_freeze(unsigned long refrigerator_flags)
{
- if (mod)
- __MOD_INC_USE_COUNT(mod);
- return 1;
+ if (unlikely(current->flags & PF_FREEZE)) {
+ refrigerator(refrigerator_flags);
+ return 1;
+ } else
+ return 0;
}
#endif
+#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,12)
+#define try_to_freeze() try_to_freeze(0)
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,13)
+typedef struct class_simple class;
+#define class_device_create(cs, dev, device, fmt, ...) class_simple_device_add((struct class_simple *)cs, dev, device, fmt, __VA_ARGS__)
+#define class_device_destroy(cs, dev) class_simple_device_remove(dev)
+#define class_create(owner, name) (struct class *)class_simple_create(owner, name)
+#define class_destroy(cs) class_simple_destroy((struct class_simple *)cs)
+#endif
-/* Yes, I'm aware that it's a fairly ugly hack.
- Until the __constant_* macros appear in Linus' own kernels, this is
- the way it has to be done.
- DW 19/1/00
+#ifndef list_for_each_entry_safe
+/**
+ * list_for_each_entry_safe - iterate over list of given type safe against removal of list entry
+ * @pos: the type * to use as a loop counter.
+ * @n: another type * to use as temporary storage
+ * @head: the head for your list.
+ * @member: the name of the list_struct within the struct.
*/
+#define list_for_each_entry_safe(pos, n, head, member) \
+ for (pos = list_entry((head)->next, typeof(*pos), member), \
+ n = list_entry(pos->member.next, typeof(*pos), member); \
+ &pos->member != (head); \
+ pos = n, n = list_entry(n->member.next, typeof(*n), member))
-#include <asm/byteorder.h>
-
-#ifndef __constant_cpu_to_le16
-
-#ifdef __BIG_ENDIAN
-#define __constant_cpu_to_le64(x) ___swab64((x))
-#define __constant_le64_to_cpu(x) ___swab64((x))
-#define __constant_cpu_to_le32(x) ___swab32((x))
-#define __constant_le32_to_cpu(x) ___swab32((x))
-#define __constant_cpu_to_le16(x) ___swab16((x))
-#define __constant_le16_to_cpu(x) ___swab16((x))
-#define __constant_cpu_to_be64(x) ((__u64)(x))
-#define __constant_be64_to_cpu(x) ((__u64)(x))
-#define __constant_cpu_to_be32(x) ((__u32)(x))
-#define __constant_be32_to_cpu(x) ((__u32)(x))
-#define __constant_cpu_to_be16(x) ((__u16)(x))
-#define __constant_be16_to_cpu(x) ((__u16)(x))
-#else
-#ifdef __LITTLE_ENDIAN
-#define __constant_cpu_to_le64(x) ((__u64)(x))
-#define __constant_le64_to_cpu(x) ((__u64)(x))
-#define __constant_cpu_to_le32(x) ((__u32)(x))
-#define __constant_le32_to_cpu(x) ((__u32)(x))
-#define __constant_cpu_to_le16(x) ((__u16)(x))
-#define __constant_le16_to_cpu(x) ((__u16)(x))
-#define __constant_cpu_to_be64(x) ___swab64((x))
-#define __constant_be64_to_cpu(x) ___swab64((x))
-#define __constant_cpu_to_be32(x) ___swab32((x))
-#define __constant_be32_to_cpu(x) ___swab32((x))
-#define __constant_cpu_to_be16(x) ___swab16((x))
-#define __constant_be16_to_cpu(x) ___swab16((x))
-#else
-#error No (recognised) endianness defined (unless it,s PDP)
-#endif /* __LITTLE_ENDIAN */
-#endif /* __BIG_ENDIAN */
-
-#endif /* ifndef __constant_cpu_to_le16 */
-
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,3,0)
- #define mod_init_t int __init
- #define mod_exit_t void
-#else
- #define mod_init_t static int __init
- #define mod_exit_t static void __exit
-#endif
-
-#ifndef THIS_MODULE
-#ifdef MODULE
-#define THIS_MODULE (&__this_module)
-#else
-#define THIS_MODULE (NULL)
-#endif
-#endif
-
-#if LINUX_VERSION_CODE < 0x20300
-#include <linux/interrupt.h>
-#define spin_lock_bh(lock) do {start_bh_atomic();spin_lock(lock);} while(0)
-#define spin_unlock_bh(lock) do {spin_unlock(lock);end_bh_atomic();} while(0)
-#else
-#include <asm/softirq.h>
-#include <linux/spinlock.h>
#endif
-#endif /* __LINUX_MTD_COMPATMAC_H__ */
-
+#ifndef DEFINE_SPINLOCK
+#define DEFINE_SPINLOCK(x) spinlock_t x = SPIN_LOCK_UNLOCKED
+#endif
+#ifndef DEFINE_RWLOCK
+#define DEFINE_RWLOCK(x) rwlock_t x = RW_LOCK_UNLOCKED
+#endif
+#endif /* __LINUX_MTD_COMPATMAC_H__ */
diff --git a/linux-2.4.x/include/linux/mtd/doc2000.h b/linux-2.4.x/include/linux/mtd/doc2000.h
index 6161f2c..244954e 100644
--- a/linux-2.4.x/include/linux/mtd/doc2000.h
+++ b/linux-2.4.x/include/linux/mtd/doc2000.h
@@ -1,13 +1,21 @@
-
-/* Linux driver for Disk-On-Chip 2000 */
-/* (c) 1999 Machine Vision Holdings, Inc. */
-/* Author: David Woodhouse <dwmw2@mvhi.com> */
-/* $Id: doc2000.h,v 1.15 2001/09/19 00:22:15 dwmw2 Exp $ */
+/*
+ * Linux driver for Disk-On-Chip devices
+ *
+ * Copyright (C) 1999 Machine Vision Holdings, Inc.
+ * Copyright (C) 2001-2003 David Woodhouse <dwmw2@infradead.org>
+ * Copyright (C) 2002-2003 Greg Ungerer <gerg@snapgear.com>
+ * Copyright (C) 2002-2003 SnapGear Inc
+ *
+ * $Id: doc2000.h,v 1.26 2006/03/29 08:26:28 dwmw2 Exp $
+ *
+ * Released under GPL
+ */
#ifndef __MTD_DOC2000_H__
#define __MTD_DOC2000_H__
#include <linux/mtd/mtd.h>
+#include <linux/mutex.h>
#define DoC_Sig1 0
#define DoC_Sig2 1
@@ -38,22 +46,51 @@
#define DoC_Mil_CDSN_IO 0x0800
#define DoC_2k_CDSN_IO 0x1800
-/* How to access the device?
- * On ARM, it'll be mmap'd directly with 32-bit wide accesses.
+#define DoC_Mplus_NOP 0x1002
+#define DoC_Mplus_AliasResolution 0x1004
+#define DoC_Mplus_DOCControl 0x1006
+#define DoC_Mplus_AccessStatus 0x1008
+#define DoC_Mplus_DeviceSelect 0x1008
+#define DoC_Mplus_Configuration 0x100a
+#define DoC_Mplus_OutputControl 0x100c
+#define DoC_Mplus_FlashControl 0x1020
+#define DoC_Mplus_FlashSelect 0x1022
+#define DoC_Mplus_FlashCmd 0x1024
+#define DoC_Mplus_FlashAddress 0x1026
+#define DoC_Mplus_FlashData0 0x1028
+#define DoC_Mplus_FlashData1 0x1029
+#define DoC_Mplus_ReadPipeInit 0x102a
+#define DoC_Mplus_LastDataRead 0x102c
+#define DoC_Mplus_LastDataRead1 0x102d
+#define DoC_Mplus_WritePipeTerm 0x102e
+#define DoC_Mplus_ECCSyndrome0 0x1040
+#define DoC_Mplus_ECCSyndrome1 0x1041
+#define DoC_Mplus_ECCSyndrome2 0x1042
+#define DoC_Mplus_ECCSyndrome3 0x1043
+#define DoC_Mplus_ECCSyndrome4 0x1044
+#define DoC_Mplus_ECCSyndrome5 0x1045
+#define DoC_Mplus_ECCConf 0x1046
+#define DoC_Mplus_Toggle 0x1046
+#define DoC_Mplus_DownloadStatus 0x1074
+#define DoC_Mplus_CtrlConfirm 0x1076
+#define DoC_Mplus_Power 0x1fff
+
+/* How to access the device?
+ * On ARM, it'll be mmap'd directly with 32-bit wide accesses.
* On PPC, it's mmap'd and 16-bit wide.
- * Others use readb/writeb
+ * Others use readb/writeb
*/
#if defined(__arm__)
-#define ReadDOC_(adr, reg) ((unsigned char)(*(__u32 *)(((unsigned long)adr)+((reg)<<2))))
-#define WriteDOC_(d, adr, reg) do{ *(__u32 *)(((unsigned long)adr)+((reg)<<2)) = (__u32)d; wmb();} while(0)
+#define ReadDOC_(adr, reg) ((unsigned char)(*(volatile __u32 *)(((unsigned long)adr)+((reg)<<2))))
+#define WriteDOC_(d, adr, reg) do{ *(volatile __u32 *)(((unsigned long)adr)+((reg)<<2)) = (__u32)d; wmb();} while(0)
#define DOC_IOREMAP_LEN 0x8000
#elif defined(__ppc__)
-#define ReadDOC_(adr, reg) ((unsigned char)(*(__u16 *)(((unsigned long)adr)+((reg)<<1))))
-#define WriteDOC_(d, adr, reg) do{ *(__u16 *)(((unsigned long)adr)+((reg)<<1)) = (__u16)d; wmb();} while(0)
+#define ReadDOC_(adr, reg) ((unsigned char)(*(volatile __u16 *)(((unsigned long)adr)+((reg)<<1))))
+#define WriteDOC_(d, adr, reg) do{ *(volatile __u16 *)(((unsigned long)adr)+((reg)<<1)) = (__u16)d; wmb();} while(0)
#define DOC_IOREMAP_LEN 0x4000
#else
-#define ReadDOC_(adr, reg) readb(((unsigned long)adr) + (reg))
-#define WriteDOC_(d, adr, reg) writeb(d, ((unsigned long)adr) + (reg))
+#define ReadDOC_(adr, reg) readb((void __iomem *)(adr) + (reg))
+#define WriteDOC_(d, adr, reg) writeb(d, (void __iomem *)(adr) + (reg))
#define DOC_IOREMAP_LEN 0x2000
#endif
@@ -71,13 +108,21 @@
#define DOC_MODE_RESERVED1 2
#define DOC_MODE_RESERVED2 3
-#define DOC_MODE_MDWREN 4
#define DOC_MODE_CLR_ERR 0x80
+#define DOC_MODE_RST_LAT 0x10
+#define DOC_MODE_BDECT 0x08
+#define DOC_MODE_MDWREN 0x04
#define DOC_ChipID_Doc2k 0x20
+#define DOC_ChipID_Doc2kTSOP 0x21 /* internal number for MTD */
#define DOC_ChipID_DocMil 0x30
+#define DOC_ChipID_DocMilPlus32 0x40
+#define DOC_ChipID_DocMilPlus16 0x41
#define CDSN_CTRL_FR_B 0x80
+#define CDSN_CTRL_FR_B0 0x40
+#define CDSN_CTRL_FR_B1 0x80
+
#define CDSN_CTRL_ECC_IO 0x20
#define CDSN_CTRL_FLASH_IO 0x10
#define CDSN_CTRL_WP 0x08
@@ -93,6 +138,10 @@
#define DOC_ECC_RESV 0x02
#define DOC_ECC_IGNORE 0x01
+#define DOC_FLASH_CE 0x80
+#define DOC_FLASH_WP 0x40
+#define DOC_FLASH_BANK 0x02
+
/* We have to also set the reserved bit 1 for enable */
#define DOC_ECC_EN (DOC_ECC__EN | DOC_ECC_RESV)
#define DOC_ECC_DIS (DOC_ECC_RESV)
@@ -107,34 +156,38 @@ struct Nand {
#define MAX_FLOORS 4
#define MAX_CHIPS 4
-#define MAX_FLOORS_MIL 4
+#define MAX_FLOORS_MIL 1
#define MAX_CHIPS_MIL 1
+#define MAX_FLOORS_MPLUS 2
+#define MAX_CHIPS_MPLUS 1
+
#define ADDR_COLUMN 1
#define ADDR_PAGE 2
#define ADDR_COLUMN_PAGE 3
struct DiskOnChip {
unsigned long physadr;
- unsigned long virtadr;
+ void __iomem *virtadr;
unsigned long totlen;
- char ChipID; /* Type of DiskOnChip */
+ unsigned char ChipID; /* Type of DiskOnChip */
int ioreg;
-
+
unsigned long mfr; /* Flash IDs - only one type of flash per device */
unsigned long id;
int chipshift;
char page256;
char pageadrlen;
+ char interleave; /* Internal interleaving - Millennium Plus style */
unsigned long erasesize;
-
+
int curfloor;
int curchip;
-
+
int numchips;
struct Nand *chips;
struct mtd_info *nextdoc;
- struct semaphore lock;
+ struct mutex lock;
};
int doc_decode_ecc(unsigned char sector[512], unsigned char ecc1[6]);
diff --git a/linux-2.4.x/include/linux/mtd/flashchip.h b/linux-2.4.x/include/linux/mtd/flashchip.h
index 4cdccad..a293a3b 100644
--- a/linux-2.4.x/include/linux/mtd/flashchip.h
+++ b/linux-2.4.x/include/linux/mtd/flashchip.h
@@ -1,12 +1,12 @@
-/*
+/*
* struct flchip definition
- *
- * Contains information about the location and state of a given flash device
+ *
+ * Contains information about the location and state of a given flash device
*
* (C) 2000 Red Hat. GPLd.
*
- * $Id: flashchip.h,v 1.7 2001/01/18 03:52:36 nico Exp $
+ * $Id: flashchip.h,v 1.18 2005/11/07 11:14:54 gleixner Exp $
*
*/
@@ -15,11 +15,11 @@
/* For spinlocks. sched.h includes spinlock.h from whichever directory it
* happens to be in - so we don't have to care whether we're on 2.2, which
- * has asm/spinlock.h, or 2.4, which has linux/spinlock.h
+ * has asm/spinlock.h, or 2.4, which has linux/spinlock.h
*/
#include <linux/sched.h>
-typedef enum {
+typedef enum {
FL_READY,
FL_STATUS,
FL_CFI_QUERY,
@@ -29,6 +29,7 @@ typedef enum {
FL_ERASE_SUSPENDED,
FL_WRITING,
FL_WRITING_TO_BUFFER,
+ FL_OTP_WRITE,
FL_WRITE_SUSPENDING,
FL_WRITE_SUSPENDED,
FL_PM_SUSPENDED,
@@ -36,13 +37,17 @@ typedef enum {
FL_UNLOADING,
FL_LOCKING,
FL_UNLOCKING,
+ FL_POINT,
+ FL_XIP_WHILE_ERASING,
+ FL_XIP_WHILE_WRITING,
FL_UNKNOWN
} flstate_t;
-/* NOTE: confusingly, this can be used to refer to more than one chip at a time,
- if they're interleaved. */
+/* NOTE: confusingly, this can be used to refer to more than one chip at a time,
+ if they're interleaved. This can even refer to individual partitions on
+ the same physical chip when present. */
struct flchip {
unsigned long start; /* Offset within the map */
@@ -54,8 +59,14 @@ struct flchip {
a given offset, and we'll want to add the per-chip length field
back in.
*/
+ int ref_point_counter;
flstate_t state;
flstate_t oldstate;
+
+ unsigned int write_suspended:1;
+ unsigned int erase_suspended:1;
+ unsigned long in_progress_block_addr;
+
spinlock_t *mutex;
spinlock_t _spinlock; /* We do it like this because sometimes they'll be shared. */
wait_queue_head_t wq; /* Wait on here when we're waiting for the chip
@@ -63,8 +74,17 @@ struct flchip {
int word_write_time;
int buffer_write_time;
int erase_time;
+
+ void *priv;
};
+/* This is used to handle contention on write/erase operations
+ between partitions of the same physical chip. */
+struct flchip_shared {
+ spinlock_t lock;
+ struct flchip *writing;
+ struct flchip *erasing;
+};
#endif /* __MTD_FLASHCHIP_H__ */
diff --git a/linux-2.4.x/include/linux/mtd/ftl.h b/linux-2.4.x/include/linux/mtd/ftl.h
index ae017ef..d996091 100644
--- a/linux-2.4.x/include/linux/mtd/ftl.h
+++ b/linux-2.4.x/include/linux/mtd/ftl.h
@@ -1,6 +1,6 @@
/*
- * $Id: ftl.h,v 1.5 2001/06/02 20:35:51 dwmw2 Exp $
- *
+ * $Id: ftl.h,v 1.7 2005/11/07 11:14:54 gleixner Exp $
+ *
* Derived from (and probably identical to):
* ftl.h 1.7 1999/10/25 20:23:17
*
@@ -12,7 +12,7 @@
* Software distributed under the License is distributed on an "AS IS"
* basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
* the License for the specific language governing rights and
- * limitations under the License.
+ * limitations under the License.
*
* The initial developer of the original code is David A. Hinds
* <dahinds@users.sourceforge.net>. Portions created by David A. Hinds
diff --git a/linux-2.4.x/include/linux/mtd/gen_probe.h b/linux-2.4.x/include/linux/mtd/gen_probe.h
index 43c3a08..256e734 100644
--- a/linux-2.4.x/include/linux/mtd/gen_probe.h
+++ b/linux-2.4.x/include/linux/mtd/gen_probe.h
@@ -1,21 +1,21 @@
/*
* (C) 2001, 2001 Red Hat, Inc.
* GPL'd
- * $Id: gen_probe.h,v 1.1 2001/09/02 18:50:13 dwmw2 Exp $
+ * $Id: gen_probe.h,v 1.4 2005/11/07 11:14:54 gleixner Exp $
*/
#ifndef __LINUX_MTD_GEN_PROBE_H__
#define __LINUX_MTD_GEN_PROBE_H__
#include <linux/mtd/flashchip.h>
-#include <linux/mtd/map.h>
+#include <linux/mtd/map.h>
#include <linux/mtd/cfi.h>
+#include <linux/bitops.h>
struct chip_probe {
char *name;
int (*probe_chip)(struct map_info *map, __u32 base,
- struct flchip *chips, struct cfi_private *cfi);
-
+ unsigned long *chip_map, struct cfi_private *cfi);
};
struct mtd_info *mtd_do_chip_probe(struct map_info *map, struct chip_probe *cp);
diff --git a/linux-2.4.x/include/linux/mtd/inftl.h b/linux-2.4.x/include/linux/mtd/inftl.h
new file mode 100644
index 0000000..9dbf209
--- /dev/null
+++ b/linux-2.4.x/include/linux/mtd/inftl.h
@@ -0,0 +1,62 @@
+/*
+ * inftl.h -- defines to support the Inverse NAND Flash Translation Layer
+ *
+ * (C) Copyright 2002, Greg Ungerer (gerg@snapgear.com)
+ *
+ * $Id: inftl.h,v 1.8 2006/03/29 08:27:08 dwmw2 Exp $
+ */
+
+#ifndef __MTD_INFTL_H__
+#define __MTD_INFTL_H__
+
+#ifndef __KERNEL__
+#error This is a kernel header. Perhaps include nftl-user.h instead?
+#endif
+
+#include <linux/mtd/blktrans.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nftl.h>
+
+#include <mtd/inftl-user.h>
+
+#ifndef INFTL_MAJOR
+#define INFTL_MAJOR 96
+#endif
+#define INFTL_PARTN_BITS 4
+
+#ifdef __KERNEL__
+
+struct INFTLrecord {
+ struct mtd_blktrans_dev mbd;
+ __u16 MediaUnit;
+ __u32 EraseSize;
+ struct INFTLMediaHeader MediaHdr;
+ int usecount;
+ unsigned char heads;
+ unsigned char sectors;
+ unsigned short cylinders;
+ __u16 numvunits;
+ __u16 firstEUN;
+ __u16 lastEUN;
+ __u16 numfreeEUNs;
+ __u16 LastFreeEUN; /* To speed up finding a free EUN */
+ int head,sect,cyl;
+ __u16 *PUtable; /* Physical Unit Table */
+ __u16 *VUtable; /* Virtual Unit Table */
+ unsigned int nb_blocks; /* number of physical blocks */
+ unsigned int nb_boot_blocks; /* number of blocks used by the bios */
+ struct erase_info instr;
+ struct nand_oobinfo oobinfo;
+};
+
+int INFTL_mount(struct INFTLrecord *s);
+int INFTL_formatblock(struct INFTLrecord *s, int block);
+
+extern char inftlmountrev[];
+
+void INFTL_dumptables(struct INFTLrecord *s);
+void INFTL_dumpVUchains(struct INFTLrecord *s);
+
+#endif /* __KERNEL__ */
+
+#endif /* __MTD_INFTL_H__ */
diff --git a/linux-2.4.x/include/linux/mtd/jedec.h b/linux-2.4.x/include/linux/mtd/jedec.h
index 75271b8..9006feb 100644
--- a/linux-2.4.x/include/linux/mtd/jedec.h
+++ b/linux-2.4.x/include/linux/mtd/jedec.h
@@ -1,20 +1,19 @@
/* JEDEC Flash Interface.
- * This is an older type of interface for self programming flash. It is
+ * This is an older type of interface for self programming flash. It is
* commonly use in older AMD chips and is obsolete compared with CFI.
* It is called JEDEC because the JEDEC association distributes the ID codes
* for the chips.
*
* See the AMD flash databook for information on how to operate the interface.
*
- * $Id: jedec.h,v 1.2 2001/11/06 14:37:36 dwmw2 Exp $
+ * $Id: jedec.h,v 1.4 2005/11/07 11:14:54 gleixner Exp $
*/
#ifndef __LINUX_MTD_JEDEC_H__
#define __LINUX_MTD_JEDEC_H__
#include <linux/types.h>
-#include <linux/mtd/map.h>
#define MAX_JEDEC_CHIPS 16
@@ -34,16 +33,16 @@ struct jedec_flash_chip
__u16 jedec;
unsigned long size;
unsigned long sectorsize;
-
+
// *(__u8*)(base + (adder << addrshift)) = data << datashift
// Address size = size << addrshift
unsigned long base; // Byte 0 of the flash, will be unaligned
unsigned int datashift; // Useful for 32bit/16bit accesses
unsigned int addrshift;
unsigned long offset; // linerized start. base==offset for unbanked, uninterleaved flash
-
+
__u32 capabilities;
-
+
// These markers are filled in by the flash_chip_scan function
unsigned long start;
unsigned long length;
@@ -52,16 +51,16 @@ struct jedec_flash_chip
struct jedec_private
{
unsigned long size; // Total size of all the devices
-
+
/* Bank handling. If sum(bank_fill) == size then this is linear flash.
Otherwise the mapping has holes in it. bank_fill may be used to
- find the holes, but in the common symetric case
- bank_fill[0] == bank_fill[*], thus addresses may be computed
+ find the holes, but in the common symetric case
+ bank_fill[0] == bank_fill[*], thus addresses may be computed
mathmatically. bank_fill must be powers of two */
unsigned is_banked;
unsigned long bank_fill[MAX_JEDEC_CHIPS];
-
- struct jedec_flash_chip chips[MAX_JEDEC_CHIPS];
+
+ struct jedec_flash_chip chips[MAX_JEDEC_CHIPS];
};
#endif
diff --git a/linux-2.4.x/include/linux/mtd/map.h b/linux-2.4.x/include/linux/mtd/map.h
index 94ffed2..fedfbc8 100644
--- a/linux-2.4.x/include/linux/mtd/map.h
+++ b/linux-2.4.x/include/linux/mtd/map.h
@@ -1,27 +1,183 @@
/* Overhauled routines for dealing with different mmap regions of flash */
-/* $Id: map.h,v 1.25 2001/09/09 15:04:17 dwmw2 Exp $ */
+/* $Id: map.h,v 1.54 2005/11/07 11:14:54 gleixner Exp $ */
#ifndef __LINUX_MTD_MAP_H__
#define __LINUX_MTD_MAP_H__
#include <linux/config.h>
#include <linux/types.h>
-#include <linux/mtd/mtd.h>
-#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/string.h>
+
+#include <linux/mtd/compatmac.h>
+
+#include <asm/unaligned.h>
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/bug.h>
+
+#ifdef CONFIG_MTD_MAP_BANK_WIDTH_1
+#define map_bankwidth(map) 1
+#define map_bankwidth_is_1(map) (map_bankwidth(map) == 1)
+#define map_bankwidth_is_large(map) (0)
+#define map_words(map) (1)
+#define MAX_MAP_BANKWIDTH 1
+#else
+#define map_bankwidth_is_1(map) (0)
+#endif
+
+#ifdef CONFIG_MTD_MAP_BANK_WIDTH_2
+# ifdef map_bankwidth
+# undef map_bankwidth
+# define map_bankwidth(map) ((map)->bankwidth)
+# else
+# define map_bankwidth(map) 2
+# define map_bankwidth_is_large(map) (0)
+# define map_words(map) (1)
+# endif
+#define map_bankwidth_is_2(map) (map_bankwidth(map) == 2)
+#undef MAX_MAP_BANKWIDTH
+#define MAX_MAP_BANKWIDTH 2
+#else
+#define map_bankwidth_is_2(map) (0)
+#endif
+
+#ifdef CONFIG_MTD_MAP_BANK_WIDTH_4
+# ifdef map_bankwidth
+# undef map_bankwidth
+# define map_bankwidth(map) ((map)->bankwidth)
+# else
+# define map_bankwidth(map) 4
+# define map_bankwidth_is_large(map) (0)
+# define map_words(map) (1)
+# endif
+#define map_bankwidth_is_4(map) (map_bankwidth(map) == 4)
+#undef MAX_MAP_BANKWIDTH
+#define MAX_MAP_BANKWIDTH 4
+#else
+#define map_bankwidth_is_4(map) (0)
+#endif
+
+/* ensure we never evaluate anything shorted than an unsigned long
+ * to zero, and ensure we'll never miss the end of an comparison (bjd) */
+
+#define map_calc_words(map) ((map_bankwidth(map) + (sizeof(unsigned long)-1))/ sizeof(unsigned long))
+
+#ifdef CONFIG_MTD_MAP_BANK_WIDTH_8
+# ifdef map_bankwidth
+# undef map_bankwidth
+# define map_bankwidth(map) ((map)->bankwidth)
+# if BITS_PER_LONG < 64
+# undef map_bankwidth_is_large
+# define map_bankwidth_is_large(map) (map_bankwidth(map) > BITS_PER_LONG/8)
+# undef map_words
+# define map_words(map) map_calc_words(map)
+# endif
+# else
+# define map_bankwidth(map) 8
+# define map_bankwidth_is_large(map) (BITS_PER_LONG < 64)
+# define map_words(map) map_calc_words(map)
+# endif
+#define map_bankwidth_is_8(map) (map_bankwidth(map) == 8)
+#undef MAX_MAP_BANKWIDTH
+#define MAX_MAP_BANKWIDTH 8
+#else
+#define map_bankwidth_is_8(map) (0)
+#endif
+
+#ifdef CONFIG_MTD_MAP_BANK_WIDTH_16
+# ifdef map_bankwidth
+# undef map_bankwidth
+# define map_bankwidth(map) ((map)->bankwidth)
+# undef map_bankwidth_is_large
+# define map_bankwidth_is_large(map) (map_bankwidth(map) > BITS_PER_LONG/8)
+# undef map_words
+# define map_words(map) map_calc_words(map)
+# else
+# define map_bankwidth(map) 16
+# define map_bankwidth_is_large(map) (1)
+# define map_words(map) map_calc_words(map)
+# endif
+#define map_bankwidth_is_16(map) (map_bankwidth(map) == 16)
+#undef MAX_MAP_BANKWIDTH
+#define MAX_MAP_BANKWIDTH 16
+#else
+#define map_bankwidth_is_16(map) (0)
+#endif
+
+#ifdef CONFIG_MTD_MAP_BANK_WIDTH_32
+# ifdef map_bankwidth
+# undef map_bankwidth
+# define map_bankwidth(map) ((map)->bankwidth)
+# undef map_bankwidth_is_large
+# define map_bankwidth_is_large(map) (map_bankwidth(map) > BITS_PER_LONG/8)
+# undef map_words
+# define map_words(map) map_calc_words(map)
+# else
+# define map_bankwidth(map) 32
+# define map_bankwidth_is_large(map) (1)
+# define map_words(map) map_calc_words(map)
+# endif
+#define map_bankwidth_is_32(map) (map_bankwidth(map) == 32)
+#undef MAX_MAP_BANKWIDTH
+#define MAX_MAP_BANKWIDTH 32
+#else
+#define map_bankwidth_is_32(map) (0)
+#endif
+
+#ifndef map_bankwidth
+#error "No bus width supported. What's the point?"
+#endif
+
+static inline int map_bankwidth_supported(int w)
+{
+ switch (w) {
+#ifdef CONFIG_MTD_MAP_BANK_WIDTH_1
+ case 1:
+#endif
+#ifdef CONFIG_MTD_MAP_BANK_WIDTH_2
+ case 2:
+#endif
+#ifdef CONFIG_MTD_MAP_BANK_WIDTH_4
+ case 4:
+#endif
+#ifdef CONFIG_MTD_MAP_BANK_WIDTH_8
+ case 8:
+#endif
+#ifdef CONFIG_MTD_MAP_BANK_WIDTH_16
+ case 16:
+#endif
+#ifdef CONFIG_MTD_MAP_BANK_WIDTH_32
+ case 32:
+#endif
+ return 1;
+
+ default:
+ return 0;
+ }
+}
+
+#define MAX_MAP_LONGS ( ((MAX_MAP_BANKWIDTH*8) + BITS_PER_LONG - 1) / BITS_PER_LONG )
+
+typedef union {
+ unsigned long x[MAX_MAP_LONGS];
+} map_word;
/* The map stuff is very simple. You fill in your struct map_info with
a handful of routines for accessing the device, making sure they handle
paging etc. correctly if your device needs it. Then you pass it off
- to a chip driver which deals with a mapped device - generally either
- do_cfi_probe() or do_ram_probe(), either of which will return a
- struct mtd_info if they liked what they saw. At which point, you
- fill in the mtd->module with your own module address, and register
- it.
-
+ to a chip probe routine -- either JEDEC or CFI probe or both -- via
+ do_map_probe(). If a chip is recognised, the probe code will invoke the
+ appropriate chip driver (if present) and return a struct mtd_info.
+ At which point, you fill in the mtd->module with your own module
+ address, and register it with the MTD core code. Or you could partition
+ it and register the partitions instead, or keep it for your own private
+ use; whatever.
+
The mtd->priv field will point to the struct map_info, and any further
- private data required by the chip driver is linked from the
- mtd->priv->fldrv_priv field. This allows the map driver to get at
+ private data required by the chip driver is linked from the
+ mtd->priv->fldrv_priv field. This allows the map driver to get at
the destructor function map->fldrv_destroy() when it's tired
of living.
*/
@@ -29,34 +185,45 @@
struct map_info {
char *name;
unsigned long size;
- int buswidth; /* in octets */
- __u8 (*read8)(struct map_info *, unsigned long);
- __u16 (*read16)(struct map_info *, unsigned long);
- __u32 (*read32)(struct map_info *, unsigned long);
- /* If it returned a 'long' I'd call it readl.
- * It doesn't.
- * I won't.
- * dwmw2 */
-
+ unsigned long phys;
+#define NO_XIP (-1UL)
+
+ void __iomem *virt;
+ void *cached;
+
+ int bankwidth; /* in octets. This isn't necessarily the width
+ of actual bus cycles -- it's the repeat interval
+ in bytes, before you are talking to the first chip again.
+ */
+
+#ifdef CONFIG_MTD_COMPLEX_MAPPINGS
+ map_word (*read)(struct map_info *, unsigned long);
void (*copy_from)(struct map_info *, void *, unsigned long, ssize_t);
- void (*write8)(struct map_info *, __u8, unsigned long);
- void (*write16)(struct map_info *, __u16, unsigned long);
- void (*write32)(struct map_info *, __u32, unsigned long);
+
+ void (*write)(struct map_info *, const map_word, unsigned long);
void (*copy_to)(struct map_info *, unsigned long, const void *, ssize_t);
+ /* We can perhaps put in 'point' and 'unpoint' methods, if we really
+ want to enable XIP for non-linear mappings. Not yet though. */
+#endif
+ /* It's possible for the map driver to use cached memory in its
+ copy_from implementation (and _only_ with copy_from). However,
+ when the chip driver knows some flash area has changed contents,
+ it will signal it to the map driver through this routine to let
+ the map driver invalidate the corresponding cache as needed.
+ If there is no cache to care about this can be set to NULL. */
+ void (*inval_cache)(struct map_info *, unsigned long, ssize_t);
+
+ /* set_vpp() must handle being reentered -- enable, enable, disable
+ must leave it enabled. */
void (*set_vpp)(struct map_info *, int);
- /* We put these two here rather than a single void *map_priv,
- because we want mappers to be able to have quickly-accessible
- cache for the 'currently-mapped page' without the _extra_
- redirection that would be necessary. If you need more than
- two longs, turn the second into a pointer. dwmw2 */
+
unsigned long map_priv_1;
unsigned long map_priv_2;
void *fldrv_priv;
struct mtd_chip_driver *fldrv;
};
-
struct mtd_chip_driver {
struct mtd_info *(*probe)(struct map_info *map);
void (*destroy)(struct mtd_info *);
@@ -68,27 +235,204 @@ struct mtd_chip_driver {
void register_mtd_chip_driver(struct mtd_chip_driver *);
void unregister_mtd_chip_driver(struct mtd_chip_driver *);
-struct mtd_info *do_map_probe(char *name, struct map_info *map);
+struct mtd_info *do_map_probe(const char *name, struct map_info *map);
+void map_destroy(struct mtd_info *mtd);
+
+#define ENABLE_VPP(map) do { if(map->set_vpp) map->set_vpp(map, 1); } while(0)
+#define DISABLE_VPP(map) do { if(map->set_vpp) map->set_vpp(map, 0); } while(0)
+
+#define INVALIDATE_CACHED_RANGE(map, from, size) \
+ do { if(map->inval_cache) map->inval_cache(map, from, size); } while(0)
+
+
+static inline int map_word_equal(struct map_info *map, map_word val1, map_word val2)
+{
+ int i;
+ for (i=0; i<map_words(map); i++) {
+ if (val1.x[i] != val2.x[i])
+ return 0;
+ }
+ return 1;
+}
+
+static inline map_word map_word_and(struct map_info *map, map_word val1, map_word val2)
+{
+ map_word r;
+ int i;
+
+ for (i=0; i<map_words(map); i++) {
+ r.x[i] = val1.x[i] & val2.x[i];
+ }
+ return r;
+}
+static inline map_word map_word_clr(struct map_info *map, map_word val1, map_word val2)
+{
+ map_word r;
+ int i;
-/*
- * Destroy an MTD device which was created for a map device.
- * Make sure the MTD device is already unregistered before calling this
- */
-static inline void map_destroy(struct mtd_info *mtd)
+ for (i=0; i<map_words(map); i++) {
+ r.x[i] = val1.x[i] & ~val2.x[i];
+ }
+ return r;
+}
+
+static inline map_word map_word_or(struct map_info *map, map_word val1, map_word val2)
{
- struct map_info *map = mtd->priv;
+ map_word r;
+ int i;
+
+ for (i=0; i<map_words(map); i++) {
+ r.x[i] = val1.x[i] | val2.x[i];
+ }
+ return r;
+}
- if (map->fldrv->destroy)
- map->fldrv->destroy(mtd);
-#ifdef CONFIG_MODULES
- if (map->fldrv->module)
- __MOD_DEC_USE_COUNT(map->fldrv->module);
+#define map_word_andequal(m, a, b, z) map_word_equal(m, z, map_word_and(m, a, b))
+
+static inline int map_word_bitsset(struct map_info *map, map_word val1, map_word val2)
+{
+ int i;
+
+ for (i=0; i<map_words(map); i++) {
+ if (val1.x[i] & val2.x[i])
+ return 1;
+ }
+ return 0;
+}
+
+static inline map_word map_word_load(struct map_info *map, const void *ptr)
+{
+ map_word r;
+
+ if (map_bankwidth_is_1(map))
+ r.x[0] = *(unsigned char *)ptr;
+ else if (map_bankwidth_is_2(map))
+ r.x[0] = get_unaligned((uint16_t *)ptr);
+ else if (map_bankwidth_is_4(map))
+ r.x[0] = get_unaligned((uint32_t *)ptr);
+#if BITS_PER_LONG >= 64
+ else if (map_bankwidth_is_8(map))
+ r.x[0] = get_unaligned((uint64_t *)ptr);
#endif
- kfree(mtd);
+ else if (map_bankwidth_is_large(map))
+ memcpy(r.x, ptr, map->bankwidth);
+
+ return r;
}
-#define ENABLE_VPP(map) do { if(map->set_vpp) map->set_vpp(map, 1); } while(0)
-#define DISABLE_VPP(map) do { if(map->set_vpp) map->set_vpp(map, 0); } while(0)
+static inline map_word map_word_load_partial(struct map_info *map, map_word orig, const unsigned char *buf, int start, int len)
+{
+ int i;
+
+ if (map_bankwidth_is_large(map)) {
+ char *dest = (char *)&orig;
+ memcpy(dest+start, buf, len);
+ } else {
+ for (i=start; i < start+len; i++) {
+ int bitpos;
+#ifdef __LITTLE_ENDIAN
+ bitpos = i*8;
+#else /* __BIG_ENDIAN */
+ bitpos = (map_bankwidth(map)-1-i)*8;
+#endif
+ orig.x[0] &= ~(0xff << bitpos);
+ orig.x[0] |= buf[i-start] << bitpos;
+ }
+ }
+ return orig;
+}
+
+#if BITS_PER_LONG < 64
+#define MAP_FF_LIMIT 4
+#else
+#define MAP_FF_LIMIT 8
+#endif
+
+static inline map_word map_word_ff(struct map_info *map)
+{
+ map_word r;
+ int i;
+
+ if (map_bankwidth(map) < MAP_FF_LIMIT) {
+ int bw = 8 * map_bankwidth(map);
+ r.x[0] = (1 << bw) - 1;
+ } else {
+ for (i=0; i<map_words(map); i++)
+ r.x[i] = ~0UL;
+ }
+ return r;
+}
+
+static inline map_word inline_map_read(struct map_info *map, unsigned long ofs)
+{
+ map_word r;
+
+ if (map_bankwidth_is_1(map))
+ r.x[0] = __raw_readb(map->virt + ofs);
+ else if (map_bankwidth_is_2(map))
+ r.x[0] = __raw_readw(map->virt + ofs);
+ else if (map_bankwidth_is_4(map))
+ r.x[0] = __raw_readl(map->virt + ofs);
+#if BITS_PER_LONG >= 64
+ else if (map_bankwidth_is_8(map))
+ r.x[0] = __raw_readq(map->virt + ofs);
+#endif
+ else if (map_bankwidth_is_large(map))
+ memcpy_fromio(r.x, map->virt+ofs, map->bankwidth);
+
+ return r;
+}
+
+static inline void inline_map_write(struct map_info *map, const map_word datum, unsigned long ofs)
+{
+ if (map_bankwidth_is_1(map))
+ __raw_writeb(datum.x[0], map->virt + ofs);
+ else if (map_bankwidth_is_2(map))
+ __raw_writew(datum.x[0], map->virt + ofs);
+ else if (map_bankwidth_is_4(map))
+ __raw_writel(datum.x[0], map->virt + ofs);
+#if BITS_PER_LONG >= 64
+ else if (map_bankwidth_is_8(map))
+ __raw_writeq(datum.x[0], map->virt + ofs);
+#endif
+ else if (map_bankwidth_is_large(map))
+ memcpy_toio(map->virt+ofs, datum.x, map->bankwidth);
+ mb();
+}
+
+static inline void inline_map_copy_from(struct map_info *map, void *to, unsigned long from, ssize_t len)
+{
+ if (map->cached)
+ memcpy(to, (char *)map->cached + from, len);
+ else
+ memcpy_fromio(to, map->virt + from, len);
+}
+
+static inline void inline_map_copy_to(struct map_info *map, unsigned long to, const void *from, ssize_t len)
+{
+ memcpy_toio(map->virt + to, from, len);
+}
+
+#ifdef CONFIG_MTD_COMPLEX_MAPPINGS
+#define map_read(map, ofs) (map)->read(map, ofs)
+#define map_copy_from(map, to, from, len) (map)->copy_from(map, to, from, len)
+#define map_write(map, datum, ofs) (map)->write(map, datum, ofs)
+#define map_copy_to(map, to, from, len) (map)->copy_to(map, to, from, len)
+
+extern void simple_map_init(struct map_info *);
+#define map_is_linear(map) (map->phys != NO_XIP)
+
+#else
+#define map_read(map, ofs) inline_map_read(map, ofs)
+#define map_copy_from(map, to, from, len) inline_map_copy_from(map, to, from, len)
+#define map_write(map, datum, ofs) inline_map_write(map, datum, ofs)
+#define map_copy_to(map, to, from, len) inline_map_copy_to(map, to, from, len)
+
+
+#define simple_map_init(map) BUG_ON(!map_bankwidth_supported((map)->bankwidth))
+#define map_is_linear(map) ({ (void)(map); 1; })
+
+#endif /* !CONFIG_MTD_COMPLEX_MAPPINGS */
#endif /* __LINUX_MTD_MAP_H__ */
diff --git a/linux-2.4.x/include/linux/mtd/mtd.h b/linux-2.4.x/include/linux/mtd/mtd.h
index b186e6c..fbc075b 100644
--- a/linux-2.4.x/include/linux/mtd/mtd.h
+++ b/linux-2.4.x/include/linux/mtd/mtd.h
@@ -1,123 +1,45 @@
-
-/* $Id: mtd.h,v 1.33 2001/06/09 00:08:59 dwmw2 Exp $ */
+/*
+ * $Id: mtd.h,v 1.62 2005/11/29 20:01:30 gleixner Exp $
+ *
+ * Copyright (C) 1999-2003 David Woodhouse <dwmw2@infradead.org> et al.
+ *
+ * Released under GPL
+ */
#ifndef __MTD_MTD_H__
#define __MTD_MTD_H__
-#ifdef __KERNEL__
+#ifndef __KERNEL__
+#error This is a kernel header. Perhaps include mtd-user.h instead?
+#endif
#include <linux/config.h>
-#include <linux/version.h>
#include <linux/types.h>
-#include <linux/mtd/compatmac.h>
#include <linux/module.h>
#include <linux/uio.h>
+#include <linux/notifier.h>
-#endif /* __KERNEL__ */
-
-struct erase_info_user {
- u_int32_t start;
- u_int32_t length;
-};
-
-struct mtd_oob_buf {
- u_int32_t start;
- u_int32_t length;
- unsigned char *ptr;
-};
-
+#include <linux/mtd/compatmac.h>
+#include <mtd/mtd-abi.h>
#define MTD_CHAR_MAJOR 90
#define MTD_BLOCK_MAJOR 31
#define MAX_MTD_DEVICES 16
-
-
-#define MTD_ABSENT 0
-#define MTD_RAM 1
-#define MTD_ROM 2
-#define MTD_NORFLASH 3
-#define MTD_NANDFLASH 4
-#define MTD_PEROM 5
-#define MTD_OTHER 14
-#define MTD_UNKNOWN 15
-
-
-
-#define MTD_CLEAR_BITS 1 // Bits can be cleared (flash)
-#define MTD_SET_BITS 2 // Bits can be set
-#define MTD_ERASEABLE 4 // Has an erase function
-#define MTD_WRITEB_WRITEABLE 8 // Direct IO is possible
-#define MTD_VOLATILE 16 // Set for RAMs
-#define MTD_XIP 32 // eXecute-In-Place possible
-#define MTD_OOB 64 // Out-of-band data (NAND flash)
-#define MTD_ECC 128 // Device capable of automatic ECC
-
-// Some common devices / combinations of capabilities
-#define MTD_CAP_ROM 0
-#define MTD_CAP_RAM (MTD_CLEAR_BITS|MTD_SET_BITS|MTD_WRITEB_WRITEABLE)
-#define MTD_CAP_NORFLASH (MTD_CLEAR_BITS|MTD_ERASEABLE)
-#define MTD_CAP_NANDFLASH (MTD_CLEAR_BITS|MTD_ERASEABLE|MTD_OOB)
-#define MTD_WRITEABLE (MTD_CLEAR_BITS|MTD_SET_BITS)
-
-
-// Types of automatic ECC/Checksum available
-#define MTD_ECC_NONE 0 // No automatic ECC available
-#define MTD_ECC_RS_DiskOnChip 1 // Automatic ECC on DiskOnChip
-#define MTD_ECC_SW 2 // SW ECC for Toshiba & Samsung devices
-
-struct mtd_info_user {
- u_char type;
- u_int32_t flags;
- u_int32_t size; // Total size of the MTD
- u_int32_t erasesize;
- u_int32_t oobblock; // Size of OOB blocks (e.g. 512)
- u_int32_t oobsize; // Amount of OOB data per block (e.g. 16)
- u_int32_t ecctype;
- u_int32_t eccsize;
-};
-
-struct region_info_user {
- u_int32_t offset; /* At which this region starts,
- * from the beginning of the MTD */
- u_int32_t erasesize; /* For this region */
- u_int32_t numblocks; /* Number of blocks in this region */
- u_int32_t regionindex;
-};
-
-#define MEMGETINFO _IOR('M', 1, struct mtd_info_user)
-#define MEMERASE _IOW('M', 2, struct erase_info_user)
-#define MEMWRITEOOB _IOWR('M', 3, struct mtd_oob_buf)
-#define MEMREADOOB _IOWR('M', 4, struct mtd_oob_buf)
-#define MEMLOCK _IOW('M', 5, struct erase_info_user)
-#define MEMUNLOCK _IOW('M', 6, struct erase_info_user)
-#define MEMGETREGIONCOUNT _IOR('M', 7, int)
-#define MEMGETREGIONINFO _IOWR('M', 8, struct region_info_user)
-#define MEMREADDATA _IOWR('M', 9, struct mtd_oob_buf)
-#define MEMWRITEDATA _IOWR('M', 10, struct mtd_oob_buf)
-
-#ifndef __KERNEL__
-
-typedef struct mtd_info_user mtd_info_t;
-typedef struct erase_info_user erase_info_t;
-typedef struct region_info_user region_info_t;
-
- /* User-space ioctl definitions */
-
-
-#else /* __KERNEL__ */
-
-
#define MTD_ERASE_PENDING 0x01
#define MTD_ERASING 0x02
#define MTD_ERASE_SUSPEND 0x04
#define MTD_ERASE_DONE 0x08
#define MTD_ERASE_FAILED 0x10
+/* If the erase fails, fail_addr might indicate exactly which block failed. If
+ fail_addr = 0xffffffff, the failure was not at the device level or was not
+ specific to any particular block. */
struct erase_info {
struct mtd_info *mtd;
u_int32_t addr;
u_int32_t len;
+ u_int32_t fail_addr;
u_long time;
u_long retries;
u_int dev;
@@ -150,45 +72,75 @@ struct mtd_info {
u_int32_t ecctype;
u_int32_t eccsize;
+ /*
+ * Reuse some of the above unused fields in the case of NOR flash
+ * with configurable programming regions to avoid modifying the
+ * user visible structure layout/size. Only valid when the
+ * MTD_PROGRAM_REGIONS flag is set.
+ * (Maybe we should have an union for those?)
+ */
+#define MTD_PROGREGION_SIZE(mtd) (mtd)->oobblock
+#define MTD_PROGREGION_CTRLMODE_VALID(mtd) (mtd)->oobsize
+#define MTD_PROGREGION_CTRLMODE_INVALID(mtd) (mtd)->ecctype
+
// Kernel-only stuff starts here.
char *name;
int index;
+ // oobinfo is a nand_oobinfo structure, which can be set by iotcl (MEMSETOOBINFO)
+ struct nand_oobinfo oobinfo;
+ u_int32_t oobavail; // Number of bytes in OOB area available for fs
+
/* Data for variable erase regions. If numeraseregions is zero,
- * it means that the whole device has erasesize as given above.
+ * it means that the whole device has erasesize as given above.
*/
int numeraseregions;
- struct mtd_erase_region_info *eraseregions;
+ struct mtd_erase_region_info *eraseregions;
/* This really shouldn't be here. It can go away in 2.5 */
u_int32_t bank_size;
- struct module *module;
int (*erase) (struct mtd_info *mtd, struct erase_info *instr);
/* This stuff for eXecute-In-Place */
int (*point) (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char **mtdbuf);
/* We probably shouldn't allow XIP if the unpoint isn't a NULL */
- void (*unpoint) (struct mtd_info *mtd, u_char * addr);
+ void (*unpoint) (struct mtd_info *mtd, u_char * addr, loff_t from, size_t len);
int (*read) (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf);
int (*write) (struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, const u_char *buf);
- int (*read_ecc) (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf, u_char *eccbuf);
- int (*write_ecc) (struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, const u_char *buf, u_char *eccbuf);
+ int (*read_ecc) (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf, u_char *eccbuf, struct nand_oobinfo *oobsel);
+ int (*write_ecc) (struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, const u_char *buf, u_char *eccbuf, struct nand_oobinfo *oobsel);
int (*read_oob) (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf);
int (*write_oob) (struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, const u_char *buf);
- /* iovec-based read/write methods. We need these especially for NAND flash,
+ /*
+ * Methods to access the protection register area, present in some
+ * flash devices. The user data is one time programmable but the
+ * factory data is read only.
+ */
+ int (*get_fact_prot_info) (struct mtd_info *mtd, struct otp_info *buf, size_t len);
+ int (*read_fact_prot_reg) (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf);
+ int (*get_user_prot_info) (struct mtd_info *mtd, struct otp_info *buf, size_t len);
+ int (*read_user_prot_reg) (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf);
+ int (*write_user_prot_reg) (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf);
+ int (*lock_user_prot_reg) (struct mtd_info *mtd, loff_t from, size_t len);
+
+ /* kvec-based read/write methods. We need these especially for NAND flash,
with its limited number of write cycles per erase.
- NB: The 'count' parameter is the number of _vectors_, each of
+ NB: The 'count' parameter is the number of _vectors_, each of
which contains an (ofs, len) tuple.
*/
- int (*readv) (struct mtd_info *mtd, struct iovec *vecs, unsigned long count, loff_t from, size_t *retlen);
- int (*writev) (struct mtd_info *mtd, const struct iovec *vecs, unsigned long count, loff_t to, size_t *retlen);
+ int (*readv) (struct mtd_info *mtd, struct kvec *vecs, unsigned long count, loff_t from, size_t *retlen);
+ int (*readv_ecc) (struct mtd_info *mtd, struct kvec *vecs, unsigned long count, loff_t from,
+ size_t *retlen, u_char *eccbuf, struct nand_oobinfo *oobsel);
+ int (*writev) (struct mtd_info *mtd, const struct kvec *vecs, unsigned long count, loff_t to, size_t *retlen);
+ int (*writev_ecc) (struct mtd_info *mtd, const struct kvec *vecs, unsigned long count, loff_t to,
+ size_t *retlen, u_char *eccbuf, struct nand_oobinfo *oobsel);
/* Sync */
void (*sync) (struct mtd_info *mtd);
@@ -201,7 +153,16 @@ struct mtd_info {
int (*suspend) (struct mtd_info *mtd);
void (*resume) (struct mtd_info *mtd);
+ /* Bad block management functions */
+ int (*block_isbad) (struct mtd_info *mtd, loff_t ofs);
+ int (*block_markbad) (struct mtd_info *mtd, loff_t ofs);
+
+ struct notifier_block reboot_notifier; /* default mode before reboot */
+
void *priv;
+
+ struct module *owner;
+ int usecount;
};
@@ -210,39 +171,27 @@ struct mtd_info {
extern int add_mtd_device(struct mtd_info *mtd);
extern int del_mtd_device (struct mtd_info *mtd);
-extern struct mtd_info *__get_mtd_device(struct mtd_info *mtd, int num);
-
-static inline struct mtd_info *get_mtd_device(struct mtd_info *mtd, int num)
-{
- struct mtd_info *ret;
-
- ret = __get_mtd_device(mtd, num);
-
- if (ret && ret->module && !try_inc_mod_count(ret->module))
- return NULL;
+extern struct mtd_info *get_mtd_device(struct mtd_info *mtd, int num);
- return ret;
-}
-
-static inline void put_mtd_device(struct mtd_info *mtd)
-{
- if (mtd->module)
- __MOD_DEC_USE_COUNT(mtd->module);
-}
+extern void put_mtd_device(struct mtd_info *mtd);
struct mtd_notifier {
void (*add)(struct mtd_info *mtd);
void (*remove)(struct mtd_info *mtd);
- struct mtd_notifier *next;
+ struct list_head list;
};
extern void register_mtd_user (struct mtd_notifier *new);
extern int unregister_mtd_user (struct mtd_notifier *old);
+int default_mtd_writev(struct mtd_info *mtd, const struct kvec *vecs,
+ unsigned long count, loff_t to, size_t *retlen);
+
+int default_mtd_readv(struct mtd_info *mtd, struct kvec *vecs,
+ unsigned long count, loff_t from, size_t *retlen);
-#ifndef MTDC
#define MTD_ERASE(mtd, args...) (*(mtd->erase))(mtd, args)
#define MTD_POINT(mtd, a,b,c,d) (*(mtd->point))(mtd, a,b,c, (u_char **)(d))
#define MTD_UNPOINT(mtd, arg) (*(mtd->unpoint))(mtd, (u_char *)arg)
@@ -254,8 +203,18 @@ extern int unregister_mtd_user (struct mtd_notifier *old);
#define MTD_WRITEECC(mtd, args...) (*(mtd->write_ecc))(mtd, args)
#define MTD_READOOB(mtd, args...) (*(mtd->read_oob))(mtd, args)
#define MTD_WRITEOOB(mtd, args...) (*(mtd->write_oob))(mtd, args)
-#define MTD_SYNC(mtd) do { if (mtd->sync) (*(mtd->sync))(mtd); } while (0)
-#endif /* MTDC */
+#define MTD_SYNC(mtd) do { if (mtd->sync) (*(mtd->sync))(mtd); } while (0)
+
+
+#ifdef CONFIG_MTD_PARTITIONS
+void mtd_erase_callback(struct erase_info *instr);
+#else
+static inline void mtd_erase_callback(struct erase_info *instr)
+{
+ if (instr->callback)
+ instr->callback(instr);
+}
+#endif
/*
* Debugging macro and defines
@@ -266,14 +225,14 @@ extern int unregister_mtd_user (struct mtd_notifier *old);
#define MTD_DEBUG_LEVEL3 (3) /* Noisy */
#ifdef CONFIG_MTD_DEBUG
-#define DEBUG(n, args...) \
- if (n <= CONFIG_MTD_DEBUG_VERBOSE) { \
- printk(KERN_INFO args); \
- }
+#define DEBUG(n, args...) \
+ do { \
+ if (n <= CONFIG_MTD_DEBUG_VERBOSE) \
+ printk(KERN_INFO args); \
+ } while(0)
#else /* CONFIG_MTD_DEBUG */
-#define DEBUG(n, args...)
-#endif /* CONFIG_MTD_DEBUG */
+#define DEBUG(n, args...) do { } while(0)
-#endif /* __KERNEL__ */
+#endif /* CONFIG_MTD_DEBUG */
#endif /* __MTD_MTD_H__ */
diff --git a/linux-2.4.x/include/linux/mtd/nand.h b/linux-2.4.x/include/linux/mtd/nand.h
index 8c678ab..5127728 100644
--- a/linux-2.4.x/include/linux/mtd/nand.h
+++ b/linux-2.4.x/include/linux/mtd/nand.h
@@ -2,9 +2,10 @@
* linux/include/linux/mtd/nand.h
*
* Copyright (c) 2000 David Woodhouse <dwmw2@mvhi.com>
- * Steven J. Hill <sjhill@cotw.com>
+ * Steven J. Hill <sjhill@realitydiluted.com>
+ * Thomas Gleixner <tglx@linutronix.de>
*
- * $Id: nand.h,v 1.8 2000/10/30 17:16:17 sjhill Exp $
+ * $Id: nand.h,v 1.77 2005/12/16 15:41:33 vwool Exp $
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -23,17 +24,83 @@
* bat later if I did something naughty.
* 10-11-2000 SJH Added private NAND flash structure for driver
* 10-24-2000 SJH Added prototype for 'nand_scan' function
+ * 10-29-2001 TG changed nand_chip structure to support
+ * hardwarespecific function for accessing control lines
+ * 02-21-2002 TG added support for different read/write adress and
+ * ready/busy line access function
+ * 02-26-2002 TG added chip_delay to nand_chip structure to optimize
+ * command delay times for different chips
+ * 04-28-2002 TG OOB config defines moved from nand.c to avoid duplicate
+ * defines in jffs2/wbuf.c
+ * 08-07-2002 TG forced bad block location to byte 5 of OOB, even if
+ * CONFIG_MTD_NAND_ECC_JFFS2 is not set
+ * 08-10-2002 TG extensions to nand_chip structure to support HW-ECC
+ *
+ * 08-29-2002 tglx nand_chip structure: data_poi for selecting
+ * internal / fs-driver buffer
+ * support for 6byte/512byte hardware ECC
+ * read_ecc, write_ecc extended for different oob-layout
+ * oob layout selections: NAND_NONE_OOB, NAND_JFFS2_OOB,
+ * NAND_YAFFS_OOB
+ * 11-25-2002 tglx Added Manufacturer code FUJITSU, NATIONAL
+ * Split manufacturer and device ID structures
+ *
+ * 02-08-2004 tglx added option field to nand structure for chip anomalities
+ * 05-25-2004 tglx added bad block table support, ST-MICRO manufacturer id
+ * update of nand_chip structure description
+ * 01-17-2005 dmarlin added extended commands for AG-AND device and added option
+ * for BBT_AUTO_REFRESH.
+ * 01-20-2005 dmarlin added optional pointer to hardware specific callback for
+ * extra error status checks.
+ *
+ * 11-01-2005 vwool NAND page layouts introduces for HW ECC handling
*/
#ifndef __LINUX_MTD_NAND_H
#define __LINUX_MTD_NAND_H
#include <linux/config.h>
-#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/spinlock.h>
+#include <linux/mtd/mtd.h>
-/*
- * Searches for a NAND device
+struct mtd_info;
+/* Scan and identify a NAND device */
+extern int nand_scan (struct mtd_info *mtd, int max_chips);
+/* Free resources held by the NAND device */
+extern void nand_release (struct mtd_info *mtd);
+
+/* Read raw data from the device without ECC */
+extern int nand_read_raw (struct mtd_info *mtd, uint8_t *buf, loff_t from, size_t len, size_t ooblen);
+
+
+/* The maximum number of NAND chips in an array */
+#define NAND_MAX_CHIPS 8
+
+/* This constant declares the max. oobsize / page, which
+ * is supported now. If you add a chip with bigger oobsize/page
+ * adjust this accordingly.
*/
-extern int nand_scan (struct mtd_info *mtd);
+#define NAND_MAX_OOBSIZE 64
+
+/*
+ * Constants for hardware specific CLE/ALE/NCE function
+*/
+/* Select the chip by setting nCE to low */
+#define NAND_CTL_SETNCE 1
+/* Deselect the chip by setting nCE to high */
+#define NAND_CTL_CLRNCE 2
+/* Select the command latch by setting CLE to high */
+#define NAND_CTL_SETCLE 3
+/* Deselect the command latch by setting CLE to low */
+#define NAND_CTL_CLRCLE 4
+/* Select the address latch by setting ALE to high */
+#define NAND_CTL_SETALE 5
+/* Deselect the address latch by setting ALE to low */
+#define NAND_CTL_CLRALE 6
+/* Set write protection by setting WP to high. Not used! */
+#define NAND_CTL_SETWP 7
+/* Clear write protection by setting WP to low. Not used! */
+#define NAND_CTL_CLRWP 8
/*
* Standard NAND flash commands
@@ -44,12 +111,148 @@ extern int nand_scan (struct mtd_info *mtd);
#define NAND_CMD_READOOB 0x50
#define NAND_CMD_ERASE1 0x60
#define NAND_CMD_STATUS 0x70
+#define NAND_CMD_STATUS_MULTI 0x71
#define NAND_CMD_SEQIN 0x80
#define NAND_CMD_READID 0x90
#define NAND_CMD_ERASE2 0xd0
#define NAND_CMD_RESET 0xff
+/* Extended commands for large page devices */
+#define NAND_CMD_READSTART 0x30
+#define NAND_CMD_CACHEDPROG 0x15
+
+/* Extended commands for AG-AND device */
+/*
+ * Note: the command for NAND_CMD_DEPLETE1 is really 0x00 but
+ * there is no way to distinguish that from NAND_CMD_READ0
+ * until the remaining sequence of commands has been completed
+ * so add a high order bit and mask it off in the command.
+ */
+#define NAND_CMD_DEPLETE1 0x100
+#define NAND_CMD_DEPLETE2 0x38
+#define NAND_CMD_STATUS_MULTI 0x71
+#define NAND_CMD_STATUS_ERROR 0x72
+/* multi-bank error status (banks 0-3) */
+#define NAND_CMD_STATUS_ERROR0 0x73
+#define NAND_CMD_STATUS_ERROR1 0x74
+#define NAND_CMD_STATUS_ERROR2 0x75
+#define NAND_CMD_STATUS_ERROR3 0x76
+#define NAND_CMD_STATUS_RESET 0x7f
+#define NAND_CMD_STATUS_CLEAR 0xff
+
+/* Status bits */
+#define NAND_STATUS_FAIL 0x01
+#define NAND_STATUS_FAIL_N1 0x02
+#define NAND_STATUS_TRUE_READY 0x20
+#define NAND_STATUS_READY 0x40
+#define NAND_STATUS_WP 0x80
+
/*
+ * Constants for ECC_MODES
+ */
+
+/* No ECC. Usage is not recommended ! */
+#define NAND_ECC_NONE 0
+/* Software ECC 3 byte ECC per 256 Byte data */
+#define NAND_ECC_SOFT 1
+/* Hardware ECC 3 byte ECC per 256 Byte data */
+#define NAND_ECC_HW3_256 2
+/* Hardware ECC 3 byte ECC per 512 Byte data */
+#define NAND_ECC_HW3_512 3
+/* Hardware ECC 6 byte ECC per 512 Byte data */
+#define NAND_ECC_HW6_512 4
+/* Hardware ECC 8 byte ECC per 512 Byte data */
+#define NAND_ECC_HW8_512 6
+/* Hardware ECC 12 byte ECC per 2048 Byte data */
+#define NAND_ECC_HW12_2048 7
+
+struct page_layout_item {
+ int length;
+ enum {
+ ITEM_TYPE_DATA,
+ ITEM_TYPE_OOB,
+ ITEM_TYPE_ECC,
+ } type;
+};
+
+/*
+ * Constants for Hardware ECC
+ */
+/* Reset Hardware ECC for read */
+#define NAND_ECC_READ 0
+/* Reset Hardware ECC for write */
+#define NAND_ECC_WRITE 1
+/* Enable Hardware ECC before syndrom is read back from flash */
+#define NAND_ECC_READSYN 2
+#define NAND_ECC_WRITESYN 3
+#define NAND_ECC_READOOB 4
+#define NAND_ECC_WRITEOOB 5
+
+/* Bit mask for flags passed to do_nand_read_ecc */
+#define NAND_GET_DEVICE 0x80
+
+
+/* Option constants for bizarre disfunctionality and real
+* features
+*/
+/* Chip can not auto increment pages */
+#define NAND_NO_AUTOINCR 0x00000001
+/* Buswitdh is 16 bit */
+#define NAND_BUSWIDTH_16 0x00000002
+/* Device supports partial programming without padding */
+#define NAND_NO_PADDING 0x00000004
+/* Chip has cache program function */
+#define NAND_CACHEPRG 0x00000008
+/* Chip has copy back function */
+#define NAND_COPYBACK 0x00000010
+/* AND Chip which has 4 banks and a confusing page / block
+ * assignment. See Renesas datasheet for further information */
+#define NAND_IS_AND 0x00000020
+/* Chip has a array of 4 pages which can be read without
+ * additional ready /busy waits */
+#define NAND_4PAGE_ARRAY 0x00000040
+/* Chip requires that BBT is periodically rewritten to prevent
+ * bits from adjacent blocks from 'leaking' in altering data.
+ * This happens with the Renesas AG-AND chips, possibly others. */
+#define BBT_AUTO_REFRESH 0x00000080
+
+/* Options valid for Samsung large page devices */
+#define NAND_SAMSUNG_LP_OPTIONS \
+ (NAND_NO_PADDING | NAND_CACHEPRG | NAND_COPYBACK)
+
+/* Macros to identify the above */
+#define NAND_CANAUTOINCR(chip) (!(chip->options & NAND_NO_AUTOINCR))
+#define NAND_MUST_PAD(chip) (!(chip->options & NAND_NO_PADDING))
+#define NAND_HAS_CACHEPROG(chip) ((chip->options & NAND_CACHEPRG))
+#define NAND_HAS_COPYBACK(chip) ((chip->options & NAND_COPYBACK))
+
+/* Mask to zero out the chip options, which come from the id table */
+#define NAND_CHIPOPTIONS_MSK (0x0000ffff & ~NAND_NO_AUTOINCR)
+
+/* Non chip related options */
+/* Use a flash based bad block table. This option is passed to the
+ * default bad block table function. */
+#define NAND_USE_FLASH_BBT 0x00010000
+/* The hw ecc generator provides a syndrome instead a ecc value on read
+ * This can only work if we have the ecc bytes directly behind the
+ * data bytes. Applies for DOC and AG-AND Renesas HW Reed Solomon generators */
+#define NAND_HWECC_SYNDROME 0x00020000
+/* This option skips the bbt scan during initialization. */
+#define NAND_SKIP_BBTSCAN 0x00040000
+/* This option specifies that a whole NAND page is to be written in
+ * nand_write_oob. This is needed for some HW ECC generators that need a
+ * whole page to be written to generate ECC properly */
+#define NAND_COMPLEX_OOB_WRITE 0x00080000
+
+/* Options set by nand scan */
+/* Nand scan has allocated oob_buf */
+#define NAND_OOBBUF_ALLOC 0x40000000
+/* Nand scan has allocated data_buf */
+#define NAND_DATABUF_ALLOC 0x80000000
+
+
+/*
+ * nand_state_t - chip states
* Enumeration for NAND flash chip state
*/
typedef enum {
@@ -57,55 +260,143 @@ typedef enum {
FL_READING,
FL_WRITING,
FL_ERASING,
- FL_SYNCING
+ FL_SYNCING,
+ FL_CACHEDPRG,
+ FL_PM_SUSPENDED,
} nand_state_t;
-/*
- * NAND Private Flash Chip Data
- *
- * Structure overview:
- *
- * IO_ADDR - address to access the 8 I/O lines to the flash device
- *
- * CTRL_ADDR - address where ALE, CLE and CE control bits are accessed
- *
- * CLE - location in control word for Command Latch Enable bit
- *
- * ALE - location in control word for Address Latch Enable bit
- *
- * NCE - location in control word for nChip Enable bit
- *
- * chip_lock - spinlock used to protect access to this structure
- *
- * wq - wait queue to sleep on if a NAND operation is in progress
- *
- * state - give the current state of the NAND device
- *
- * page_shift - number of address bits in a page (column address bits)
- *
- * data_buf - data buffer passed to/from MTD user modules
- *
- * ecc_code_buf - used only for holding calculated or read ECCs for
- * a page read or written when ECC is in use
- *
- * reserved - padding to make structure fall on word boundary if
- * when ECC is in use
+/* Keep gcc happy */
+struct nand_chip;
+
+/**
+ * struct nand_hw_control - Control structure for hardware controller (e.g ECC generator) shared among independend devices
+ * @lock: protection lock
+ * @active: the mtd device which holds the controller currently
+ * @wq: wait queue to sleep on if a NAND operation is in progress
+ * used instead of the per chip wait queue when a hw controller is available
+ */
+struct nand_hw_control {
+ spinlock_t lock;
+ struct nand_chip *active;
+ wait_queue_head_t wq;
+};
+
+/**
+ * struct nand_chip - NAND Private Flash Chip Data
+ * @IO_ADDR_R: [BOARDSPECIFIC] address to read the 8 I/O lines of the flash device
+ * @IO_ADDR_W: [BOARDSPECIFIC] address to write the 8 I/O lines of the flash device
+ * @read_byte: [REPLACEABLE] read one byte from the chip
+ * @write_byte: [REPLACEABLE] write one byte to the chip
+ * @read_word: [REPLACEABLE] read one word from the chip
+ * @write_word: [REPLACEABLE] write one word to the chip
+ * @write_buf: [REPLACEABLE] write data from the buffer to the chip
+ * @read_buf: [REPLACEABLE] read data from the chip into the buffer
+ * @verify_buf: [REPLACEABLE] verify buffer contents against the chip data
+ * @select_chip: [REPLACEABLE] select chip nr
+ * @block_bad: [REPLACEABLE] check, if the block is bad
+ * @block_markbad: [REPLACEABLE] mark the block bad
+ * @hwcontrol: [BOARDSPECIFIC] hardwarespecific function for accesing control-lines
+ * @dev_ready: [BOARDSPECIFIC] hardwarespecific function for accesing device ready/busy line
+ * If set to NULL no access to ready/busy is available and the ready/busy information
+ * is read from the chip status register
+ * @cmdfunc: [REPLACEABLE] hardwarespecific function for writing commands to the chip
+ * @waitfunc: [REPLACEABLE] hardwarespecific function for wait on ready
+ * @calculate_ecc: [REPLACEABLE] function for ecc calculation or readback from ecc hardware
+ * @correct_data: [REPLACEABLE] function for ecc correction, matching to ecc generator (sw/hw)
+ * @enable_hwecc: [BOARDSPECIFIC] function to enable (reset) hardware ecc generator. Must only
+ * be provided if a hardware ECC is available
+ * @erase_cmd: [INTERN] erase command write function, selectable due to AND support
+ * @scan_bbt: [REPLACEABLE] function to scan bad block table
+ * @eccmode: [BOARDSPECIFIC] mode of ecc, see defines
+ * @eccsize: [INTERN] databytes used per ecc-calculation
+ * @eccbytes: [INTERN] number of ecc bytes per ecc-calculation step
+ * @eccsteps: [INTERN] number of ecc calculation steps per page
+ * @chip_delay: [BOARDSPECIFIC] chip dependent delay for transfering data from array to read regs (tR)
+ * @chip_lock: [INTERN] spinlock used to protect access to this structure and the chip
+ * @wq: [INTERN] wait queue to sleep on if a NAND operation is in progress
+ * @state: [INTERN] the current state of the NAND device
+ * @page_shift: [INTERN] number of address bits in a page (column address bits)
+ * @phys_erase_shift: [INTERN] number of address bits in a physical eraseblock
+ * @bbt_erase_shift: [INTERN] number of address bits in a bbt entry
+ * @chip_shift: [INTERN] number of address bits in one chip
+ * @data_buf: [INTERN] internal buffer for one page + oob
+ * @oob_buf: [INTERN] oob buffer for one eraseblock
+ * @oobdirty: [INTERN] indicates that oob_buf must be reinitialized
+ * @data_poi: [INTERN] pointer to a data buffer
+ * @options: [BOARDSPECIFIC] various chip options. They can partly be set to inform nand_scan about
+ * special functionality. See the defines for further explanation
+ * @badblockpos: [INTERN] position of the bad block marker in the oob area
+ * @numchips: [INTERN] number of physical chips
+ * @chipsize: [INTERN] the size of one chip for multichip arrays
+ * @pagemask: [INTERN] page number mask = number of (pages / chip) - 1
+ * @pagebuf: [INTERN] holds the pagenumber which is currently in data_buf
+ * @autooob: [REPLACEABLE] the default (auto)placement scheme
+ * @bbt: [INTERN] bad block table pointer
+ * @bbt_td: [REPLACEABLE] bad block table descriptor for flash lookup
+ * @bbt_md: [REPLACEABLE] bad block table mirror descriptor
+ * @badblock_pattern: [REPLACEABLE] bad block scan pattern used for initial bad block scan
+ * @controller: [OPTIONAL] a pointer to a hardware controller structure which is shared among multiple independend devices
+ * @priv: [OPTIONAL] pointer to private chip date
+ * @errstat: [OPTIONAL] hardware specific function to perform additional error status checks
+ * (determine if errors are correctable)
*/
+
struct nand_chip {
- unsigned long IO_ADDR;
- unsigned long CTRL_ADDR;
- unsigned int CLE;
- unsigned int ALE;
- unsigned int NCE;
- spinlock_t chip_lock;
+ void __iomem *IO_ADDR_R;
+ void __iomem *IO_ADDR_W;
+
+ u_char (*read_byte)(struct mtd_info *mtd);
+ void (*write_byte)(struct mtd_info *mtd, u_char byte);
+ u16 (*read_word)(struct mtd_info *mtd);
+ void (*write_word)(struct mtd_info *mtd, u16 word);
+
+ void (*write_buf)(struct mtd_info *mtd, const u_char *buf, int len);
+ void (*read_buf)(struct mtd_info *mtd, u_char *buf, int len);
+ int (*verify_buf)(struct mtd_info *mtd, const u_char *buf, int len);
+ void (*select_chip)(struct mtd_info *mtd, int chip);
+ int (*block_bad)(struct mtd_info *mtd, loff_t ofs, int getchip);
+ int (*block_markbad)(struct mtd_info *mtd, loff_t ofs);
+ void (*hwcontrol)(struct mtd_info *mtd, int cmd);
+ int (*dev_ready)(struct mtd_info *mtd);
+ void (*cmdfunc)(struct mtd_info *mtd, unsigned command, int column, int page_addr);
+ int (*waitfunc)(struct mtd_info *mtd, struct nand_chip *this, int state);
+ int (*calculate_ecc)(struct mtd_info *mtd, const u_char *dat, u_char *ecc_code);
+ int (*correct_data)(struct mtd_info *mtd, u_char *dat, u_char *read_ecc, u_char *calc_ecc);
+ void (*enable_hwecc)(struct mtd_info *mtd, int mode);
+ void (*erase_cmd)(struct mtd_info *mtd, int page);
+ int (*scan_bbt)(struct mtd_info *mtd);
+ int eccmode;
+ int eccsize;
+ int eccbytes;
+ int eccsteps;
+ int chip_delay;
+ spinlock_t chip_lock;
wait_queue_head_t wq;
- nand_state_t state;
- int page_shift;
- u_char *data_buf;
-#ifdef CONFIG_MTD_NAND_ECC
- u_char ecc_code_buf[6];
- u_char reserved[2];
-#endif
+ nand_state_t state;
+ int page_shift;
+ int phys_erase_shift;
+ int bbt_erase_shift;
+ int chip_shift;
+ u_char *data_buf;
+ u_char *oob_buf;
+ int oobdirty;
+ u_char *data_poi;
+ unsigned int options;
+ int badblockpos;
+ int numchips;
+ unsigned long chipsize;
+ int pagemask;
+ int pagebuf;
+ struct nand_oobinfo *autooob;
+ struct page_layout_item *layout;
+ int layout_allocated;
+ uint8_t *bbt;
+ struct nand_bbt_descr *bbt_td;
+ struct nand_bbt_descr *bbt_md;
+ struct nand_bbt_descr *badblock_pattern;
+ struct nand_hw_control *controller;
+ void *priv;
+ int (*errstat)(struct mtd_info *mtd, struct nand_chip *this, int state, int status, int page);
};
/*
@@ -113,42 +404,129 @@ struct nand_chip {
*/
#define NAND_MFR_TOSHIBA 0x98
#define NAND_MFR_SAMSUNG 0xec
+#define NAND_MFR_FUJITSU 0x04
+#define NAND_MFR_NATIONAL 0x8f
+#define NAND_MFR_RENESAS 0x07
+#define NAND_MFR_STMICRO 0x20
+#define NAND_MFR_HYNIX 0xad
-/*
- * NAND Flash Device ID Structure
- *
- * Structure overview:
- *
- * name - Complete name of device
- *
- * manufacture_id - manufacturer ID code of device.
+/**
+ * struct nand_flash_dev - NAND Flash Device ID Structure
*
- * model_id - model ID code of device.
- *
- * chipshift - total number of address bits for the device which
- * is used to calculate address offsets and the total
- * number of bytes the device is capable of.
- *
- * page256 - denotes if flash device has 256 byte pages or not.
- *
- * pageadrlen - number of bytes minus one needed to hold the
- * complete address into the flash array. Keep in
- * mind that when a read or write is done to a
- * specific address, the address is input serially
- * 8 bits at a time. This structure member is used
- * by the read/write routines as a loop index for
- * shifting the address out 8 bits at a time.
- *
- * erasesize - size of an erase block in the flash device.
+ * @name: Identify the device type
+ * @id: device ID code
+ * @pagesize: Pagesize in bytes. Either 256 or 512 or 0
+ * If the pagesize is 0, then the real pagesize
+ * and the eraseize are determined from the
+ * extended id bytes in the chip
+ * @erasesize: Size of an erase block in the flash device.
+ * @chipsize: Total chipsize in Mega Bytes
+ * @options: Bitfield to store chip relevant options
*/
struct nand_flash_dev {
- char * name;
- int manufacture_id;
- int model_id;
- int chipshift;
- char page256;
- char pageadrlen;
+ char *name;
+ int id;
+ unsigned long pagesize;
+ unsigned long chipsize;
unsigned long erasesize;
+ unsigned long options;
};
+/**
+ * struct nand_manufacturers - NAND Flash Manufacturer ID Structure
+ * @name: Manufacturer name
+ * @id: manufacturer ID code of device.
+*/
+struct nand_manufacturers {
+ int id;
+ char * name;
+};
+
+extern struct nand_flash_dev nand_flash_ids[];
+extern struct nand_manufacturers nand_manuf_ids[];
+
+/**
+ * struct nand_bbt_descr - bad block table descriptor
+ * @options: options for this descriptor
+ * @pages: the page(s) where we find the bbt, used with option BBT_ABSPAGE
+ * when bbt is searched, then we store the found bbts pages here.
+ * Its an array and supports up to 8 chips now
+ * @offs: offset of the pattern in the oob area of the page
+ * @veroffs: offset of the bbt version counter in the oob are of the page
+ * @version: version read from the bbt page during scan
+ * @len: length of the pattern, if 0 no pattern check is performed
+ * @maxblocks: maximum number of blocks to search for a bbt. This number of
+ * blocks is reserved at the end of the device where the tables are
+ * written.
+ * @reserved_block_code: if non-0, this pattern denotes a reserved (rather than
+ * bad) block in the stored bbt
+ * @pattern: pattern to identify bad block table or factory marked good /
+ * bad blocks, can be NULL, if len = 0
+ *
+ * Descriptor for the bad block table marker and the descriptor for the
+ * pattern which identifies good and bad blocks. The assumption is made
+ * that the pattern and the version count are always located in the oob area
+ * of the first block.
+ */
+struct nand_bbt_descr {
+ int options;
+ int pages[NAND_MAX_CHIPS];
+ int offs;
+ int veroffs;
+ uint8_t version[NAND_MAX_CHIPS];
+ int len;
+ int maxblocks;
+ int reserved_block_code;
+ uint8_t *pattern;
+};
+
+/* Options for the bad block table descriptors */
+
+/* The number of bits used per block in the bbt on the device */
+#define NAND_BBT_NRBITS_MSK 0x0000000F
+#define NAND_BBT_1BIT 0x00000001
+#define NAND_BBT_2BIT 0x00000002
+#define NAND_BBT_4BIT 0x00000004
+#define NAND_BBT_8BIT 0x00000008
+/* The bad block table is in the last good block of the device */
+#define NAND_BBT_LASTBLOCK 0x00000010
+/* The bbt is at the given page, else we must scan for the bbt */
+#define NAND_BBT_ABSPAGE 0x00000020
+/* The bbt is at the given page, else we must scan for the bbt */
+#define NAND_BBT_SEARCH 0x00000040
+/* bbt is stored per chip on multichip devices */
+#define NAND_BBT_PERCHIP 0x00000080
+/* bbt has a version counter at offset veroffs */
+#define NAND_BBT_VERSION 0x00000100
+/* Create a bbt if none axists */
+#define NAND_BBT_CREATE 0x00000200
+/* Search good / bad pattern through all pages of a block */
+#define NAND_BBT_SCANALLPAGES 0x00000400
+/* Scan block empty during good / bad block scan */
+#define NAND_BBT_SCANEMPTY 0x00000800
+/* Write bbt if neccecary */
+#define NAND_BBT_WRITE 0x00001000
+/* Read and write back block contents when writing bbt */
+#define NAND_BBT_SAVECONTENT 0x00002000
+/* Search good / bad pattern on the first and the second page */
+#define NAND_BBT_SCAN2NDPAGE 0x00004000
+
+/* The maximum number of blocks to scan for a bbt */
+#define NAND_BBT_SCAN_MAXBLOCKS 4
+
+extern int nand_scan_bbt (struct mtd_info *mtd, struct nand_bbt_descr *bd);
+extern int nand_update_bbt (struct mtd_info *mtd, loff_t offs);
+extern int nand_default_bbt (struct mtd_info *mtd);
+extern int nand_isbad_bbt (struct mtd_info *mtd, loff_t offs, int allowbbt);
+extern int nand_erase_nand (struct mtd_info *mtd, struct erase_info *instr, int allowbbt);
+extern int nand_do_read_ecc (struct mtd_info *mtd, loff_t from, size_t len,
+ size_t * retlen, u_char * buf, u_char * oob_buf,
+ struct nand_oobinfo *oobsel, int flags);
+
+/*
+* Constants for oob configuration
+*/
+#define NAND_SMALL_BADBLOCK_POS 5
+#define NAND_LARGE_BADBLOCK_POS 0
+
#endif /* __LINUX_MTD_NAND_H */
diff --git a/linux-2.4.x/include/linux/mtd/nand_ecc.h b/linux-2.4.x/include/linux/mtd/nand_ecc.h
index f3f1d7e..12c5bc3 100644
--- a/linux-2.4.x/include/linux/mtd/nand_ecc.h
+++ b/linux-2.4.x/include/linux/mtd/nand_ecc.h
@@ -1,9 +1,9 @@
/*
* drivers/mtd/nand_ecc.h
*
- * Copyright (C) 2000 Steven J. Hill (sjhill@cotw.com)
+ * Copyright (C) 2000 Steven J. Hill (sjhill@realitydiluted.com)
*
- * $Id: nand_ecc.h,v 1.1 2000/10/12 00:57:15 sjhill Exp $
+ * $Id: nand_ecc.h,v 1.4 2004/06/17 02:35:02 dbrown Exp $
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -12,17 +12,19 @@
* This file is the header for the ECC algorithm.
*/
-/*
- * Creates non-inverted ECC code from line parity
- */
-void nand_trans_result(u_char reg2, u_char reg3, u_char *ecc_code);
+#ifndef __MTD_NAND_ECC_H__
+#define __MTD_NAND_ECC_H__
+
+struct mtd_info;
/*
* Calculate 3 byte ECC code for 256 byte block
*/
-void nand_calculate_ecc (const u_char *dat, u_char *ecc_code);
+int nand_calculate_ecc(struct mtd_info *mtd, const u_char *dat, u_char *ecc_code);
/*
* Detect and correct a 1 bit error for 256 byte block
*/
-int nand_correct_data (u_char *dat, u_char *read_ecc, u_char *calc_ecc);
+int nand_correct_data(struct mtd_info *mtd, u_char *dat, u_char *read_ecc, u_char *calc_ecc);
+
+#endif /* __MTD_NAND_ECC_H__ */
diff --git a/linux-2.4.x/include/linux/mtd/nftl.h b/linux-2.4.x/include/linux/mtd/nftl.h
index 3483a8c..d35d2c2 100644
--- a/linux-2.4.x/include/linux/mtd/nftl.h
+++ b/linux-2.4.x/include/linux/mtd/nftl.h
@@ -1,81 +1,16 @@
-
-/* Defines for NAND Flash Translation Layer */
-/* (c) 1999 Machine Vision Holdings, Inc. */
-/* Author: David Woodhouse <dwmw2@mvhi.com> */
-/* $Id: nftl.h,v 1.10 2000/12/29 00:25:38 dwmw2 Exp $ */
+/*
+ * $Id: nftl.h,v 1.16 2004/06/30 14:49:00 dbrown Exp $
+ *
+ * (C) 1999-2003 David Woodhouse <dwmw2@infradead.org>
+ */
#ifndef __MTD_NFTL_H__
#define __MTD_NFTL_H__
-#ifndef __BOOT__
#include <linux/mtd/mtd.h>
-#endif
-
-/* Block Control Information */
-
-struct nftl_bci {
- unsigned char ECCSig[6];
- __u8 Status;
- __u8 Status1;
-}__attribute__((packed));
-
-/* Unit Control Information */
+#include <linux/mtd/blktrans.h>
-struct nftl_uci0 {
- __u16 VirtUnitNum;
- __u16 ReplUnitNum;
- __u16 SpareVirtUnitNum;
- __u16 SpareReplUnitNum;
-} __attribute__((packed));
-
-struct nftl_uci1 {
- __u32 WearInfo;
- __u16 EraseMark;
- __u16 EraseMark1;
-} __attribute__((packed));
-
-struct nftl_uci2 {
- __u16 FoldMark;
- __u16 FoldMark1;
- __u32 unused;
-} __attribute__((packed));
-
-union nftl_uci {
- struct nftl_uci0 a;
- struct nftl_uci1 b;
- struct nftl_uci2 c;
-};
-
-struct nftl_oob {
- struct nftl_bci b;
- union nftl_uci u;
-};
-
-/* NFTL Media Header */
-
-struct NFTLMediaHeader {
- char DataOrgID[6];
- __u16 NumEraseUnits;
- __u16 FirstPhysicalEUN;
- __u32 FormattedSize;
- unsigned char UnitSizeFactor;
-} __attribute__((packed));
-
-#define MAX_ERASE_ZONES (8192 - 512)
-
-#define ERASE_MARK 0x3c69
-#define SECTOR_FREE 0xff
-#define SECTOR_USED 0x55
-#define SECTOR_IGNORE 0x11
-#define SECTOR_DELETED 0x00
-
-#define FOLD_MARK_IN_PROGRESS 0x5555
-
-#define ZONE_GOOD 0xff
-#define ZONE_BAD_ORIGINAL 0
-#define ZONE_BAD_MARKED 7
-
-#ifdef __KERNEL__
+#include <mtd/nftl-user.h>
/* these info are used in ReplUnitTable */
#define BLOCK_NIL 0xffff /* last block of a chain */
@@ -84,8 +19,7 @@ struct NFTLMediaHeader {
#define BLOCK_RESERVED 0xfffc /* bios block or bad block */
struct NFTLrecord {
- struct mtd_info *mtd;
- struct semaphore mutex;
+ struct mtd_blktrans_dev mbd;
__u16 MediaUnit, SpareMediaUnit;
__u32 EraseSize;
struct NFTLMediaHeader MediaHdr;
@@ -97,13 +31,13 @@ struct NFTLrecord {
__u16 lastEUN; /* should be suppressed */
__u16 numfreeEUNs;
__u16 LastFreeEUN; /* To speed up finding a free EUN */
- __u32 long nr_sects;
int head,sect,cyl;
__u16 *EUNtable; /* [numvunits]: First EUN for each virtual unit */
__u16 *ReplUnitTable; /* [numEUNs]: ReplUnitNumber for each */
unsigned int nb_blocks; /* number of physical blocks */
unsigned int nb_boot_blocks; /* number of blocks used by the bios */
struct erase_info instr;
+ struct nand_oobinfo oobinfo;
};
int NFTL_mount(struct NFTLrecord *s);
@@ -114,9 +48,7 @@ int NFTL_formatblock(struct NFTLrecord *s, int block);
#endif
#define MAX_NFTLS 16
-#define MAX_SECTORS_PER_UNIT 32
+#define MAX_SECTORS_PER_UNIT 64
#define NFTL_PARTN_BITS 4
-#endif /* __KERNEL__ */
-
#endif /* __MTD_NFTL_H__ */
diff --git a/linux-2.4.x/include/linux/mtd/onenand.h b/linux-2.4.x/include/linux/mtd/onenand.h
new file mode 100644
index 0000000..d259e0a
--- /dev/null
+++ b/linux-2.4.x/include/linux/mtd/onenand.h
@@ -0,0 +1,161 @@
+/*
+ * linux/include/linux/mtd/onenand.h
+ *
+ * Copyright (C) 2005 Samsung Electronics
+ * Kyungmin Park <kyungmin.park@samsung.com>
+ *
+ * $Id: onenand.h,v 1.13 2006/02/22 00:26:38 kmpark Exp $
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __LINUX_MTD_ONENAND_H
+#define __LINUX_MTD_ONENAND_H
+
+#include <linux/spinlock.h>
+#include <linux/mtd/onenand_regs.h>
+#include <linux/mtd/bbm.h>
+
+#define MAX_BUFFERRAM 2
+
+/* Scan and identify a OneNAND device */
+extern int onenand_scan(struct mtd_info *mtd, int max_chips);
+/* Free resources held by the OneNAND device */
+extern void onenand_release(struct mtd_info *mtd);
+
+/**
+ * onenand_state_t - chip states
+ * Enumeration for OneNAND flash chip state
+ */
+typedef enum {
+ FL_READY,
+ FL_READING,
+ FL_WRITING,
+ FL_ERASING,
+ FL_SYNCING,
+ FL_UNLOCKING,
+ FL_LOCKING,
+ FL_RESETING,
+ FL_OTPING,
+ FL_PM_SUSPENDED,
+} onenand_state_t;
+
+/**
+ * struct onenand_bufferram - OneNAND BufferRAM Data
+ * @param block block address in BufferRAM
+ * @param page page address in BufferRAM
+ * @param valid valid flag
+ */
+struct onenand_bufferram {
+ int block;
+ int page;
+ int valid;
+};
+
+/**
+ * struct onenand_chip - OneNAND Private Flash Chip Data
+ * @param base [BOARDSPECIFIC] address to access OneNAND
+ * @param chipsize [INTERN] the size of one chip for multichip arrays
+ * @param device_id [INTERN] device ID
+ * @param verstion_id [INTERN] version ID
+ * @param options [BOARDSPECIFIC] various chip options. They can partly be set to inform onenand_scan about
+ * @param erase_shift [INTERN] number of address bits in a block
+ * @param page_shift [INTERN] number of address bits in a page
+ * @param ppb_shift [INTERN] number of address bits in a pages per block
+ * @param page_mask [INTERN] a page per block mask
+ * @param bufferam_index [INTERN] BufferRAM index
+ * @param bufferam [INTERN] BufferRAM info
+ * @param readw [REPLACEABLE] hardware specific function for read short
+ * @param writew [REPLACEABLE] hardware specific function for write short
+ * @param command [REPLACEABLE] hardware specific function for writing commands to the chip
+ * @param wait [REPLACEABLE] hardware specific function for wait on ready
+ * @param read_bufferram [REPLACEABLE] hardware specific function for BufferRAM Area
+ * @param write_bufferram [REPLACEABLE] hardware specific function for BufferRAM Area
+ * @param read_word [REPLACEABLE] hardware specific function for read register of OneNAND
+ * @param write_word [REPLACEABLE] hardware specific function for write register of OneNAND
+ * @param scan_bbt [REPLACEALBE] hardware specific function for scaning Bad block Table
+ * @param chip_lock [INTERN] spinlock used to protect access to this structure and the chip
+ * @param wq [INTERN] wait queue to sleep on if a OneNAND operation is in progress
+ * @param state [INTERN] the current state of the OneNAND device
+ * @param autooob [REPLACEABLE] the default (auto)placement scheme
+ * @param bbm [REPLACEABLE] pointer to Bad Block Management
+ * @param priv [OPTIONAL] pointer to private chip date
+ */
+struct onenand_chip {
+ void __iomem *base;
+ unsigned int chipsize;
+ unsigned int device_id;
+ unsigned int density_mask;
+ unsigned int options;
+
+ unsigned int erase_shift;
+ unsigned int page_shift;
+ unsigned int ppb_shift; /* Pages per block shift */
+ unsigned int page_mask;
+
+ unsigned int bufferram_index;
+ struct onenand_bufferram bufferram[MAX_BUFFERRAM];
+
+ int (*command)(struct mtd_info *mtd, int cmd, loff_t address, size_t len);
+ int (*wait)(struct mtd_info *mtd, int state);
+ int (*read_bufferram)(struct mtd_info *mtd, int area,
+ unsigned char *buffer, int offset, size_t count);
+ int (*write_bufferram)(struct mtd_info *mtd, int area,
+ const unsigned char *buffer, int offset, size_t count);
+ unsigned short (*read_word)(void __iomem *addr);
+ void (*write_word)(unsigned short value, void __iomem *addr);
+ void (*mmcontrol)(struct mtd_info *mtd, int sync_read);
+ int (*block_markbad)(struct mtd_info *mtd, loff_t ofs);
+ int (*scan_bbt)(struct mtd_info *mtd);
+
+ spinlock_t chip_lock;
+ wait_queue_head_t wq;
+ onenand_state_t state;
+ unsigned char *page_buf;
+
+ struct nand_oobinfo *autooob;
+
+ void *bbm;
+
+ void *priv;
+};
+
+/*
+ * Helper macros
+ */
+#define ONENAND_CURRENT_BUFFERRAM(this) (this->bufferram_index)
+#define ONENAND_NEXT_BUFFERRAM(this) (this->bufferram_index ^ 1)
+#define ONENAND_SET_NEXT_BUFFERRAM(this) (this->bufferram_index ^= 1)
+
+#define ONENAND_GET_SYS_CFG1(this) \
+ (this->read_word(this->base + ONENAND_REG_SYS_CFG1))
+#define ONENAND_SET_SYS_CFG1(v, this) \
+ (this->write_word(v, this->base + ONENAND_REG_SYS_CFG1))
+
+/* Check byte access in OneNAND */
+#define ONENAND_CHECK_BYTE_ACCESS(addr) (addr & 0x1)
+
+/*
+ * Options bits
+ */
+#define ONENAND_CONT_LOCK (0x0001)
+#define ONENAND_PAGEBUF_ALLOC (0x1000)
+
+/*
+ * OneNAND Flash Manufacturer ID Codes
+ */
+#define ONENAND_MFR_SAMSUNG 0xec
+
+/**
+ * struct nand_manufacturers - NAND Flash Manufacturer ID Structure
+ * @param name: Manufacturer name
+ * @param id: manufacturer ID code of device.
+*/
+struct onenand_manufacturers {
+ int id;
+ char *name;
+};
+
+#endif /* __LINUX_MTD_ONENAND_H */
diff --git a/linux-2.4.x/include/linux/mtd/onenand_regs.h b/linux-2.4.x/include/linux/mtd/onenand_regs.h
new file mode 100644
index 0000000..32dacb4
--- /dev/null
+++ b/linux-2.4.x/include/linux/mtd/onenand_regs.h
@@ -0,0 +1,190 @@
+/*
+ * linux/include/linux/mtd/onenand_regs.h
+ *
+ * OneNAND Register header file
+ *
+ * Copyright (C) 2005 Samsung Electronics
+ *
+ * $Id: onenand_regs.h,v 1.4 2006/02/22 00:26:38 kmpark Exp $
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ONENAND_REG_H
+#define __ONENAND_REG_H
+
+/* Memory Address Map Translation (Word order) */
+#define ONENAND_MEMORY_MAP(x) ((x) << 1)
+
+/*
+ * External BufferRAM area
+ */
+#define ONENAND_BOOTRAM ONENAND_MEMORY_MAP(0x0000)
+#define ONENAND_DATARAM ONENAND_MEMORY_MAP(0x0200)
+#define ONENAND_SPARERAM ONENAND_MEMORY_MAP(0x8010)
+
+/*
+ * OneNAND Registers
+ */
+#define ONENAND_REG_MANUFACTURER_ID ONENAND_MEMORY_MAP(0xF000)
+#define ONENAND_REG_DEVICE_ID ONENAND_MEMORY_MAP(0xF001)
+#define ONENAND_REG_VERSION_ID ONENAND_MEMORY_MAP(0xF002)
+#define ONENAND_REG_DATA_BUFFER_SIZE ONENAND_MEMORY_MAP(0xF003)
+#define ONENAND_REG_BOOT_BUFFER_SIZE ONENAND_MEMORY_MAP(0xF004)
+#define ONENAND_REG_NUM_BUFFERS ONENAND_MEMORY_MAP(0xF005)
+#define ONENAND_REG_TECHNOLOGY ONENAND_MEMORY_MAP(0xF006)
+
+#define ONENAND_REG_START_ADDRESS1 ONENAND_MEMORY_MAP(0xF100)
+#define ONENAND_REG_START_ADDRESS2 ONENAND_MEMORY_MAP(0xF101)
+#define ONENAND_REG_START_ADDRESS3 ONENAND_MEMORY_MAP(0xF102)
+#define ONENAND_REG_START_ADDRESS4 ONENAND_MEMORY_MAP(0xF103)
+#define ONENAND_REG_START_ADDRESS5 ONENAND_MEMORY_MAP(0xF104)
+#define ONENAND_REG_START_ADDRESS6 ONENAND_MEMORY_MAP(0xF105)
+#define ONENAND_REG_START_ADDRESS7 ONENAND_MEMORY_MAP(0xF106)
+#define ONENAND_REG_START_ADDRESS8 ONENAND_MEMORY_MAP(0xF107)
+
+#define ONENAND_REG_START_BUFFER ONENAND_MEMORY_MAP(0xF200)
+#define ONENAND_REG_COMMAND ONENAND_MEMORY_MAP(0xF220)
+#define ONENAND_REG_SYS_CFG1 ONENAND_MEMORY_MAP(0xF221)
+#define ONENAND_REG_SYS_CFG2 ONENAND_MEMORY_MAP(0xF222)
+#define ONENAND_REG_CTRL_STATUS ONENAND_MEMORY_MAP(0xF240)
+#define ONENAND_REG_INTERRUPT ONENAND_MEMORY_MAP(0xF241)
+#define ONENAND_REG_START_BLOCK_ADDRESS ONENAND_MEMORY_MAP(0xF24C)
+#define ONENAND_REG_END_BLOCK_ADDRESS ONENAND_MEMORY_MAP(0xF24D)
+#define ONENAND_REG_WP_STATUS ONENAND_MEMORY_MAP(0xF24E)
+
+#define ONENAND_REG_ECC_STATUS ONENAND_MEMORY_MAP(0xFF00)
+#define ONENAND_REG_ECC_M0 ONENAND_MEMORY_MAP(0xFF01)
+#define ONENAND_REG_ECC_S0 ONENAND_MEMORY_MAP(0xFF02)
+#define ONENAND_REG_ECC_M1 ONENAND_MEMORY_MAP(0xFF03)
+#define ONENAND_REG_ECC_S1 ONENAND_MEMORY_MAP(0xFF04)
+#define ONENAND_REG_ECC_M2 ONENAND_MEMORY_MAP(0xFF05)
+#define ONENAND_REG_ECC_S2 ONENAND_MEMORY_MAP(0xFF06)
+#define ONENAND_REG_ECC_M3 ONENAND_MEMORY_MAP(0xFF07)
+#define ONENAND_REG_ECC_S3 ONENAND_MEMORY_MAP(0xFF08)
+
+/*
+ * Device ID Register F001h (R)
+ */
+#define ONENAND_DEVICE_DENSITY_SHIFT (4)
+#define ONENAND_DEVICE_IS_DDP (1 << 3)
+#define ONENAND_DEVICE_IS_DEMUX (1 << 2)
+#define ONENAND_DEVICE_VCC_MASK (0x3)
+
+#define ONENAND_DEVICE_DENSITY_512Mb (0x002)
+
+/*
+ * Version ID Register F002h (R)
+ */
+#define ONENAND_VERSION_PROCESS_SHIFT (8)
+
+/*
+ * Start Address 1 F100h (R/W)
+ */
+#define ONENAND_DDP_SHIFT (15)
+
+/*
+ * Start Address 8 F107h (R/W)
+ */
+#define ONENAND_FPA_MASK (0x3f)
+#define ONENAND_FPA_SHIFT (2)
+#define ONENAND_FSA_MASK (0x03)
+
+/*
+ * Start Buffer Register F200h (R/W)
+ */
+#define ONENAND_BSA_MASK (0x03)
+#define ONENAND_BSA_SHIFT (8)
+#define ONENAND_BSA_BOOTRAM (0 << 2)
+#define ONENAND_BSA_DATARAM0 (2 << 2)
+#define ONENAND_BSA_DATARAM1 (3 << 2)
+#define ONENAND_BSC_MASK (0x03)
+
+/*
+ * Command Register F220h (R/W)
+ */
+#define ONENAND_CMD_READ (0x00)
+#define ONENAND_CMD_READOOB (0x13)
+#define ONENAND_CMD_PROG (0x80)
+#define ONENAND_CMD_PROGOOB (0x1A)
+#define ONENAND_CMD_UNLOCK (0x23)
+#define ONENAND_CMD_LOCK (0x2A)
+#define ONENAND_CMD_LOCK_TIGHT (0x2C)
+#define ONENAND_CMD_ERASE (0x94)
+#define ONENAND_CMD_RESET (0xF0)
+#define ONENAND_CMD_OTP_ACCESS (0x65)
+#define ONENAND_CMD_READID (0x90)
+
+/* NOTE: Those are not *REAL* commands */
+#define ONENAND_CMD_BUFFERRAM (0x1978)
+
+/*
+ * System Configuration 1 Register F221h (R, R/W)
+ */
+#define ONENAND_SYS_CFG1_SYNC_READ (1 << 15)
+#define ONENAND_SYS_CFG1_BRL_7 (7 << 12)
+#define ONENAND_SYS_CFG1_BRL_6 (6 << 12)
+#define ONENAND_SYS_CFG1_BRL_5 (5 << 12)
+#define ONENAND_SYS_CFG1_BRL_4 (4 << 12)
+#define ONENAND_SYS_CFG1_BRL_3 (3 << 12)
+#define ONENAND_SYS_CFG1_BRL_10 (2 << 12)
+#define ONENAND_SYS_CFG1_BRL_9 (1 << 12)
+#define ONENAND_SYS_CFG1_BRL_8 (0 << 12)
+#define ONENAND_SYS_CFG1_BRL_SHIFT (12)
+#define ONENAND_SYS_CFG1_BL_32 (4 << 9)
+#define ONENAND_SYS_CFG1_BL_16 (3 << 9)
+#define ONENAND_SYS_CFG1_BL_8 (2 << 9)
+#define ONENAND_SYS_CFG1_BL_4 (1 << 9)
+#define ONENAND_SYS_CFG1_BL_CONT (0 << 9)
+#define ONENAND_SYS_CFG1_BL_SHIFT (9)
+#define ONENAND_SYS_CFG1_NO_ECC (1 << 8)
+#define ONENAND_SYS_CFG1_RDY (1 << 7)
+#define ONENAND_SYS_CFG1_INT (1 << 6)
+#define ONENAND_SYS_CFG1_IOBE (1 << 5)
+#define ONENAND_SYS_CFG1_RDY_CONF (1 << 4)
+
+/*
+ * Controller Status Register F240h (R)
+ */
+#define ONENAND_CTRL_ONGO (1 << 15)
+#define ONENAND_CTRL_LOCK (1 << 14)
+#define ONENAND_CTRL_LOAD (1 << 13)
+#define ONENAND_CTRL_PROGRAM (1 << 12)
+#define ONENAND_CTRL_ERASE (1 << 11)
+#define ONENAND_CTRL_ERROR (1 << 10)
+#define ONENAND_CTRL_RSTB (1 << 7)
+#define ONENAND_CTRL_OTP_L (1 << 6)
+#define ONENAND_CTRL_OTP_BL (1 << 5)
+
+/*
+ * Interrupt Status Register F241h (R)
+ */
+#define ONENAND_INT_MASTER (1 << 15)
+#define ONENAND_INT_READ (1 << 7)
+#define ONENAND_INT_WRITE (1 << 6)
+#define ONENAND_INT_ERASE (1 << 5)
+#define ONENAND_INT_RESET (1 << 4)
+#define ONENAND_INT_CLEAR (0 << 0)
+
+/*
+ * NAND Flash Write Protection Status Register F24Eh (R)
+ */
+#define ONENAND_WP_US (1 << 2)
+#define ONENAND_WP_LS (1 << 1)
+#define ONENAND_WP_LTS (1 << 0)
+
+/*
+ * ECC Status Reigser FF00h (R)
+ */
+#define ONENAND_ECC_1BIT (1 << 0)
+#define ONENAND_ECC_2BIT (1 << 1)
+#define ONENAND_ECC_2BIT_ALL (0xAAAA)
+
+/*
+ * One-Time Programmable (OTP)
+ */
+#define ONENAND_OTP_LOCK_OFFSET (14)
+
+#endif /* __ONENAND_REG_H */
diff --git a/linux-2.4.x/include/linux/mtd/partitions.h b/linux-2.4.x/include/linux/mtd/partitions.h
index 7c55241..b03f512 100644
--- a/linux-2.4.x/include/linux/mtd/partitions.h
+++ b/linux-2.4.x/include/linux/mtd/partitions.h
@@ -5,7 +5,7 @@
*
* This code is GPL
*
- * $Id: partitions.h,v 1.8 2002/03/08 16:34:36 rkaiser Exp $
+ * $Id: partitions.h,v 1.17 2005/11/07 11:14:55 gleixner Exp $
*/
#ifndef MTD_PARTITIONS_H
@@ -16,32 +16,33 @@
/*
* Partition definition structure:
- *
+ *
* An array of struct partition is passed along with a MTD object to
* add_mtd_partitions() to create them.
*
* For each partition, these fields are available:
* name: string that will be used to label the partition's MTD device.
- * size: the partition size; if defined as MTDPART_SIZ_FULL, the partition
+ * size: the partition size; if defined as MTDPART_SIZ_FULL, the partition
* will extend to the end of the master MTD device.
- * offset: absolute starting position within the master MTD device; if
- * defined as MTDPART_OFS_APPEND, the partition will start where the
+ * offset: absolute starting position within the master MTD device; if
+ * defined as MTDPART_OFS_APPEND, the partition will start where the
* previous one ended; if MTDPART_OFS_NXTBLK, at the next erase block.
- * mask_flags: contains flags that have to be masked (removed) from the
+ * mask_flags: contains flags that have to be masked (removed) from the
* master MTD flag set for the corresponding MTD partition.
- * For example, to force a read-only partition, simply adding
+ * For example, to force a read-only partition, simply adding
* MTD_WRITEABLE to the mask_flags will do the trick.
*
- * Note: writeable partitions require their size and offset be
+ * Note: writeable partitions require their size and offset be
* erasesize aligned (e.g. use MTDPART_OFS_NEXTBLK).
- */
+ */
struct mtd_partition {
- char *name; /* identifier string */
- u_int32_t size; /* partition size */
+ char *name; /* identifier string */
+ u_int32_t size; /* partition size */
u_int32_t offset; /* offset within the master MTD space */
- u_int32_t mask_flags; /* master MTD flags to mask out for this partition */
- struct mtd_info **mtdp; /* pointer to store the MTD object */
+ u_int32_t mask_flags; /* master MTD flags to mask out for this partition */
+ struct nand_oobinfo *oobsel; /* out of band layout for this partition (NAND only)*/
+ struct mtd_info **mtdp; /* pointer to store the MTD object */
};
#define MTDPART_OFS_NXTBLK (-2)
@@ -49,8 +50,26 @@ struct mtd_partition {
#define MTDPART_SIZ_FULL (0)
-int add_mtd_partitions(struct mtd_info *, struct mtd_partition *, int);
+int add_mtd_partitions(struct mtd_info *, const struct mtd_partition *, int);
int del_mtd_partitions(struct mtd_info *);
+/*
+ * Functions dealing with the various ways of partitioning the space
+ */
+
+struct mtd_part_parser {
+ struct list_head list;
+ struct module *owner;
+ const char *name;
+ int (*parse_fn)(struct mtd_info *, struct mtd_partition **, unsigned long);
+};
+
+extern int register_mtd_parser(struct mtd_part_parser *parser);
+extern int deregister_mtd_parser(struct mtd_part_parser *parser);
+extern int parse_mtd_partitions(struct mtd_info *master, const char **types,
+ struct mtd_partition **pparts, unsigned long origin);
+
+#define put_partition_parser(p) do { module_put((p)->owner); } while(0)
+
#endif
diff --git a/linux-2.4.x/include/linux/mtd/physmap.h b/linux-2.4.x/include/linux/mtd/physmap.h
new file mode 100644
index 0000000..c7b8bcd
--- /dev/null
+++ b/linux-2.4.x/include/linux/mtd/physmap.h
@@ -0,0 +1,61 @@
+/*
+ * For boards with physically mapped flash and using
+ * drivers/mtd/maps/physmap.c mapping driver.
+ *
+ * $Id: physmap.h,v 1.4 2005/11/07 11:14:55 gleixner Exp $
+ *
+ * Copyright (C) 2003 MontaVista Software Inc.
+ * Author: Jun Sun, jsun@mvista.com or jsun@junsun.net
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#ifndef __LINUX_MTD_PHYSMAP__
+
+#include <linux/config.h>
+
+#if defined(CONFIG_MTD_PHYSMAP)
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/partitions.h>
+
+/*
+ * The map_info for physmap. Board can override size, buswidth, phys,
+ * (*set_vpp)(), etc in their initial setup routine.
+ */
+extern struct map_info physmap_map;
+
+/*
+ * Board needs to specify the exact mapping during their setup time.
+ */
+static inline void physmap_configure(unsigned long addr, unsigned long size, int bankwidth, void (*set_vpp)(struct map_info *, int) )
+{
+ physmap_map.phys = addr;
+ physmap_map.size = size;
+ physmap_map.bankwidth = bankwidth;
+ physmap_map.set_vpp = set_vpp;
+}
+
+#if defined(CONFIG_MTD_PARTITIONS)
+
+/*
+ * Machines that wish to do flash partition may want to call this function in
+ * their setup routine.
+ *
+ * physmap_set_partitions(mypartitions, num_parts);
+ *
+ * Note that one can always override this hard-coded partition with
+ * command line partition (you need to enable CONFIG_MTD_CMDLINE_PARTS).
+ */
+void physmap_set_partitions(struct mtd_partition *parts, int num_parts);
+
+#endif /* defined(CONFIG_MTD_PARTITIONS) */
+#endif /* defined(CONFIG_MTD) */
+
+#endif /* __LINUX_MTD_PHYSMAP__ */
+
diff --git a/linux-2.4.x/include/linux/mtd/plat-ram.h b/linux-2.4.x/include/linux/mtd/plat-ram.h
new file mode 100644
index 0000000..2332eda
--- /dev/null
+++ b/linux-2.4.x/include/linux/mtd/plat-ram.h
@@ -0,0 +1,35 @@
+/* linux/include/mtd/plat-ram.h
+ *
+ * (c) 2004 Simtec Electronics
+ * http://www.simtec.co.uk/products/SWLINUX/
+ * Ben Dooks <ben@simtec.co.uk>
+ *
+ * Generic platform device based RAM map
+ *
+ * $Id: plat-ram.h,v 1.2 2005/01/24 00:37:40 bjd Exp $
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#ifndef __LINUX_MTD_PLATRAM_H
+#define __LINUX_MTD_PLATRAM_H __FILE__
+
+#define PLATRAM_RO (0)
+#define PLATRAM_RW (1)
+
+struct platdata_mtd_ram {
+ char *mapname;
+ char **probes;
+ struct mtd_partition *partitions;
+ int nr_partitions;
+ int bankwidth;
+
+ /* control callbacks */
+
+ void (*set_rw)(struct device *dev, int to);
+};
+
+#endif /* __LINUX_MTD_PLATRAM_H */
diff --git a/linux-2.4.x/include/linux/mtd/pmc551.h b/linux-2.4.x/include/linux/mtd/pmc551.h
index 640d3eb..a7f6d20 100644
--- a/linux-2.4.x/include/linux/mtd/pmc551.h
+++ b/linux-2.4.x/include/linux/mtd/pmc551.h
@@ -1,5 +1,5 @@
/*
- * $Id: pmc551.h,v 1.4 2001/06/12 16:19:38 major Exp $
+ * $Id: pmc551.h,v 1.6 2005/11/07 11:14:55 gleixner Exp $
*
* PMC551 PCI Mezzanine Ram Device
*
@@ -7,7 +7,7 @@
* Mark Ferrell
* Copyright 1999,2000 Nortel Networks
*
- * License:
+ * License:
* As part of this driver was derrived from the slram.c driver it falls
* under the same license, which is GNU General Public License v2
*/
@@ -17,7 +17,7 @@
#include <linux/mtd/mtd.h>
-#define PMC551_VERSION "$Id: pmc551.h,v 1.4 2001/06/12 16:19:38 major Exp $\n"\
+#define PMC551_VERSION "$Id: pmc551.h,v 1.6 2005/11/07 11:14:55 gleixner Exp $\n"\
"Ramix PMC551 PCI Mezzanine Ram Driver. (C) 1999,2000 Nortel Networks.\n"
/*
@@ -30,16 +30,16 @@ struct mypriv {
u32 curr_map0;
u32 asize;
struct mtd_info *nextpmc551;
-};
+};
/*
* Function Prototypes
*/
static int pmc551_erase(struct mtd_info *, struct erase_info *);
-static void pmc551_unpoint(struct mtd_info *, u_char *);
+static void pmc551_unpoint(struct mtd_info *, u_char *, loff_t, size_t);
static int pmc551_point (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char **mtdbuf);
static int pmc551_read(struct mtd_info *, loff_t, size_t, size_t *, u_char *);
-static int pmc551_write(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
+static int pmc551_write(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
/*
@@ -50,7 +50,7 @@ static int pmc551_write(struct mtd_info *, loff_t, size_t, size_t *, const u_cha
#endif
#ifndef PCI_DEVICE_ID_V3_SEMI_V370PDC
-#define PCI_DEVICE_ID_V3_SEMI_V370PDC 0x0200
+#define PCI_DEVICE_ID_V3_SEMI_V370PDC 0x0200
#endif
diff --git a/linux-2.4.x/include/linux/mtd/xip.h b/linux-2.4.x/include/linux/mtd/xip.h
new file mode 100644
index 0000000..220d50b
--- /dev/null
+++ b/linux-2.4.x/include/linux/mtd/xip.h
@@ -0,0 +1,102 @@
+/*
+ * MTD primitives for XIP support
+ *
+ * Author: Nicolas Pitre
+ * Created: Nov 2, 2004
+ * Copyright: (C) 2004 MontaVista Software, Inc.
+ *
+ * This XIP support for MTD has been loosely inspired
+ * by an earlier patch authored by David Woodhouse.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * $Id: xip.h,v 1.5 2005/11/07 11:14:55 gleixner Exp $
+ */
+
+#ifndef __LINUX_MTD_XIP_H__
+#define __LINUX_MTD_XIP_H__
+
+#include <linux/config.h>
+
+#ifdef CONFIG_MTD_XIP
+
+/*
+ * We really don't want gcc to guess anything.
+ * We absolutely _need_ proper inlining.
+ */
+#include <linux/compiler.h>
+
+/*
+ * Function that are modifying the flash state away from array mode must
+ * obviously not be running from flash. The __xipram is therefore marking
+ * those functions so they get relocated to ram.
+ */
+#define __xipram noinline __attribute__ ((__section__ (".data")))
+
+/*
+ * Each architecture has to provide the following macros. They must access
+ * the hardware directly and not rely on any other (XIP) functions since they
+ * won't be available when used (flash not in array mode).
+ *
+ * xip_irqpending()
+ *
+ * return non zero when any hardware interrupt is pending.
+ *
+ * xip_currtime()
+ *
+ * return a platform specific time reference to be used with
+ * xip_elapsed_since().
+ *
+ * xip_elapsed_since(x)
+ *
+ * return in usecs the elapsed timebetween now and the reference x as
+ * returned by xip_currtime().
+ *
+ * note 1: convertion to usec can be approximated, as long as the
+ * returned value is <= the real elapsed time.
+ * note 2: this should be able to cope with a few seconds without
+ * overflowing.
+ *
+ * xip_iprefetch()
+ *
+ * Macro to fill instruction prefetch
+ * e.g. a series of nops: asm volatile (".rep 8; nop; .endr");
+ */
+
+#include <asm/mtd-xip.h>
+
+#ifndef xip_irqpending
+
+#warning "missing IRQ and timer primitives for XIP MTD support"
+#warning "some of the XIP MTD support code will be disabled"
+#warning "your system will therefore be unresponsive when writing or erasing flash"
+
+#define xip_irqpending() (0)
+#define xip_currtime() (0)
+#define xip_elapsed_since(x) (0)
+
+#endif
+
+#ifndef xip_iprefetch
+#define xip_iprefetch() do { } while (0)
+#endif
+
+/*
+ * xip_cpu_idle() is used when waiting for a delay equal or larger than
+ * the system timer tick period. This should put the CPU into idle mode
+ * to save power and to be woken up only when some interrupts are pending.
+ * This should not rely upon standard kernel code.
+ */
+#ifndef xip_cpu_idle
+#define xip_cpu_idle() do { } while (0)
+#endif
+
+#else
+
+#define __xipram
+
+#endif /* CONFIG_MTD_XIP */
+
+#endif /* __LINUX_MTD_XIP_H__ */
diff --git a/linux-2.4.x/include/linux/rbtree-24.h b/linux-2.4.x/include/linux/rbtree-24.h
new file mode 100644
index 0000000..f007490
--- /dev/null
+++ b/linux-2.4.x/include/linux/rbtree-24.h
@@ -0,0 +1,133 @@
+/*
+ Red Black Trees
+ (C) 1999 Andrea Arcangeli <andrea@suse.de>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+
+ linux/include/linux/rbtree.h
+
+ To use rbtrees you'll have to implement your own insert and search cores.
+ This will avoid us to use callbacks and to drop drammatically performances.
+ I know it's not the cleaner way, but in C (not in C++) to get
+ performances and genericity...
+
+ Some example of insert and search follows here. The search is a plain
+ normal search over an ordered tree. The insert instead must be implemented
+ int two steps: as first thing the code must insert the element in
+ order as a red leaf in the tree, then the support library function
+ rb_insert_color() must be called. Such function will do the
+ not trivial work to rebalance the rbtree if necessary.
+
+-----------------------------------------------------------------------
+static inline struct page * rb_search_page_cache(struct inode * inode,
+ unsigned long offset)
+{
+ rb_node_t * n = inode->i_rb_page_cache.rb_node;
+ struct page * page;
+
+ while (n)
+ {
+ page = rb_entry(n, struct page, rb_page_cache);
+
+ if (offset < page->offset)
+ n = n->rb_left;
+ else if (offset > page->offset)
+ n = n->rb_right;
+ else
+ return page;
+ }
+ return NULL;
+}
+
+static inline struct page * __rb_insert_page_cache(struct inode * inode,
+ unsigned long offset,
+ rb_node_t * node)
+{
+ rb_node_t ** p = &inode->i_rb_page_cache.rb_node;
+ rb_node_t * parent = NULL;
+ struct page * page;
+
+ while (*p)
+ {
+ parent = *p;
+ page = rb_entry(parent, struct page, rb_page_cache);
+
+ if (offset < page->offset)
+ p = &(*p)->rb_left;
+ else if (offset > page->offset)
+ p = &(*p)->rb_right;
+ else
+ return page;
+ }
+
+ rb_link_node(node, parent, p);
+
+ return NULL;
+}
+
+static inline struct page * rb_insert_page_cache(struct inode * inode,
+ unsigned long offset,
+ rb_node_t * node)
+{
+ struct page * ret;
+ if ((ret = __rb_insert_page_cache(inode, offset, node)))
+ goto out;
+ rb_insert_color(node, &inode->i_rb_page_cache);
+ out:
+ return ret;
+}
+-----------------------------------------------------------------------
+*/
+
+#ifndef _LINUX_RBTREE_H
+#define _LINUX_RBTREE_H
+
+#include <linux/kernel.h>
+#include <linux/stddef.h>
+
+typedef struct rb_node_s
+{
+ struct rb_node_s * rb_parent;
+ int rb_color;
+#define RB_RED 0
+#define RB_BLACK 1
+ struct rb_node_s * rb_right;
+ struct rb_node_s * rb_left;
+}
+rb_node_t;
+
+typedef struct rb_root_s
+{
+ struct rb_node_s * rb_node;
+}
+rb_root_t;
+
+#define RB_ROOT (rb_root_t) { NULL, }
+#define rb_entry(ptr, type, member) \
+ ((type *)((char *)(ptr)-(unsigned long)(&((type *)0)->member)))
+
+extern void rb_insert_color(rb_node_t *, rb_root_t *);
+extern void rb_erase(rb_node_t *, rb_root_t *);
+
+static inline void rb_link_node(rb_node_t * node, rb_node_t * parent, rb_node_t ** rb_link)
+{
+ node->rb_parent = parent;
+ node->rb_color = RB_RED;
+ node->rb_left = node->rb_right = NULL;
+
+ *rb_link = node;
+}
+
+#endif /* _LINUX_RBTREE_H */
diff --git a/linux-2.4.x/include/linux/rbtree.h b/linux-2.4.x/include/linux/rbtree.h
index 96f20e1..62c407a 100644
--- a/linux-2.4.x/include/linux/rbtree.h
+++ b/linux-2.4.x/include/linux/rbtree.h
@@ -1,133 +1,25 @@
/*
- Red Black Trees
- (C) 1999 Andrea Arcangeli <andrea@suse.de>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
+ * 2.5 compatibility
+ * $Id: rbtree.h,v 1.3 2003/01/14 13:56:05 dwmw2 Exp $
+ */
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
+#ifndef __MTD_COMPAT_RBTREE_H__
+#define __MTD_COMPAT_RBTREE_H__
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+#include <linux/version.h>
- linux/include/linux/rbtree.h
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,40)
+#include_next <linux/rbtree.h>
+#else
+#define rb_node_s rb_node
+#define rb_root_s rb_root
- To use rbtrees you'll have to implement your own insert and search cores.
- This will avoid us to use callbacks and to drop drammatically performances.
- I know it's not the cleaner way, but in C (not in C++) to get
- performances and genericity...
+#include <linux/rbtree-24.h>
- Some example of insert and search follows here. The search is a plain
- normal search over an ordered tree. The insert instead must be implemented
- int two steps: as first thing the code must insert the element in
- order as a red leaf in the tree, then the support library function
- rb_insert_color() must be called. Such function will do the
- not trivial work to rebalance the rbtree if necessary.
+/* Find logical next and previous nodes in a tree */
+extern struct rb_node *rb_next(struct rb_node *);
+extern struct rb_node *rb_prev(struct rb_node *);
+extern struct rb_node *rb_first(struct rb_root *);
+#endif
------------------------------------------------------------------------
-static inline struct page * rb_search_page_cache(struct inode * inode,
- unsigned long offset)
-{
- rb_node_t * n = inode->i_rb_page_cache.rb_node;
- struct page * page;
-
- while (n)
- {
- page = rb_entry(n, struct page, rb_page_cache);
-
- if (offset < page->offset)
- n = n->rb_left;
- else if (offset > page->offset)
- n = n->rb_right;
- else
- return page;
- }
- return NULL;
-}
-
-static inline struct page * __rb_insert_page_cache(struct inode * inode,
- unsigned long offset,
- rb_node_t * node)
-{
- rb_node_t ** p = &inode->i_rb_page_cache.rb_node;
- rb_node_t * parent = NULL;
- struct page * page;
-
- while (*p)
- {
- parent = *p;
- page = rb_entry(parent, struct page, rb_page_cache);
-
- if (offset < page->offset)
- p = &(*p)->rb_left;
- else if (offset > page->offset)
- p = &(*p)->rb_right;
- else
- return page;
- }
-
- rb_link_node(node, parent, p);
-
- return NULL;
-}
-
-static inline struct page * rb_insert_page_cache(struct inode * inode,
- unsigned long offset,
- rb_node_t * node)
-{
- struct page * ret;
- if ((ret = __rb_insert_page_cache(inode, offset, node)))
- goto out;
- rb_insert_color(node, &inode->i_rb_page_cache);
- out:
- return ret;
-}
------------------------------------------------------------------------
-*/
-
-#ifndef _LINUX_RBTREE_H
-#define _LINUX_RBTREE_H
-
-#include <linux/kernel.h>
-#include <linux/stddef.h>
-
-typedef struct rb_node_s
-{
- struct rb_node_s * rb_parent;
- int rb_color;
-#define RB_RED 0
-#define RB_BLACK 1
- struct rb_node_s * rb_right;
- struct rb_node_s * rb_left;
-}
-rb_node_t;
-
-typedef struct rb_root_s
-{
- struct rb_node_s * rb_node;
-}
-rb_root_t;
-
-#define RB_ROOT (rb_root_t) { NULL, }
-#define rb_entry(ptr, type, member) \
- ((type *)((char *)(ptr)-(unsigned long)(&((type *)0)->member)))
-
-extern void rb_insert_color(rb_node_t *, rb_root_t *);
-extern void rb_erase(rb_node_t *, rb_root_t *);
-
-static inline void rb_link_node(rb_node_t * node, rb_node_t * parent, rb_node_t ** rb_link)
-{
- node->rb_parent = parent;
- node->rb_color = RB_RED;
- node->rb_left = node->rb_right = NULL;
-
- *rb_link = node;
-}
-
-#endif /* _LINUX_RBTREE_H */
+#endif /* __MTD_COMPAT_RBTREE_H__ */
diff --git a/linux-2.4.x/include/linux/rslib.h b/linux-2.4.x/include/linux/rslib.h
new file mode 100644
index 0000000..ace25ac
--- /dev/null
+++ b/linux-2.4.x/include/linux/rslib.h
@@ -0,0 +1,105 @@
+/*
+ * include/linux/rslib.h
+ *
+ * Overview:
+ * Generic Reed Solomon encoder / decoder library
+ *
+ * Copyright (C) 2004 Thomas Gleixner (tglx@linutronix.de)
+ *
+ * RS code lifted from reed solomon library written by Phil Karn
+ * Copyright 2002 Phil Karn, KA9Q
+ *
+ * $Id: rslib.h,v 1.4 2005/11/07 11:14:52 gleixner Exp $
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _RSLIB_H_
+#define _RSLIB_H_
+
+#include <linux/list.h>
+
+/**
+ * struct rs_control - rs control structure
+ *
+ * @mm: Bits per symbol
+ * @nn: Symbols per block (= (1<<mm)-1)
+ * @alpha_to: log lookup table
+ * @index_of: Antilog lookup table
+ * @genpoly: Generator polynomial
+ * @nroots: Number of generator roots = number of parity symbols
+ * @fcr: First consecutive root, index form
+ * @prim: Primitive element, index form
+ * @iprim: prim-th root of 1, index form
+ * @gfpoly: The primitive generator polynominal
+ * @users: Users of this structure
+ * @list: List entry for the rs control list
+*/
+struct rs_control {
+ int mm;
+ int nn;
+ uint16_t *alpha_to;
+ uint16_t *index_of;
+ uint16_t *genpoly;
+ int nroots;
+ int fcr;
+ int prim;
+ int iprim;
+ int gfpoly;
+ int users;
+ struct list_head list;
+};
+
+/* General purpose RS codec, 8-bit data width, symbol width 1-15 bit */
+#ifdef CONFIG_REED_SOLOMON_ENC8
+int encode_rs8(struct rs_control *rs, uint8_t *data, int len, uint16_t *par,
+ uint16_t invmsk);
+#endif
+#ifdef CONFIG_REED_SOLOMON_DEC8
+int decode_rs8(struct rs_control *rs, uint8_t *data, uint16_t *par, int len,
+ uint16_t *s, int no_eras, int *eras_pos, uint16_t invmsk,
+ uint16_t *corr);
+#endif
+
+/* General purpose RS codec, 16-bit data width, symbol width 1-15 bit */
+#ifdef CONFIG_REED_SOLOMON_ENC16
+int encode_rs16(struct rs_control *rs, uint16_t *data, int len, uint16_t *par,
+ uint16_t invmsk);
+#endif
+#ifdef CONFIG_REED_SOLOMON_DEC16
+int decode_rs16(struct rs_control *rs, uint16_t *data, uint16_t *par, int len,
+ uint16_t *s, int no_eras, int *eras_pos, uint16_t invmsk,
+ uint16_t *corr);
+#endif
+
+/* Create or get a matching rs control structure */
+struct rs_control *init_rs(int symsize, int gfpoly, int fcr, int prim,
+ int nroots);
+
+/* Release a rs control structure */
+void free_rs(struct rs_control *rs);
+
+/** modulo replacement for galois field arithmetics
+ *
+ * @rs: the rs control structure
+ * @x: the value to reduce
+ *
+ * where
+ * rs->mm = number of bits per symbol
+ * rs->nn = (2^rs->mm) - 1
+ *
+ * Simple arithmetic modulo would return a wrong result for values
+ * >= 3 * rs->nn
+*/
+static inline int rs_modnn(struct rs_control *rs, int x)
+{
+ while (x >= rs->nn) {
+ x -= rs->nn;
+ x = (x >> rs->mm) + (x & rs->nn);
+ }
+ return x;
+}
+
+#endif
diff --git a/linux-2.4.x/include/linux/suspend.h b/linux-2.4.x/include/linux/suspend.h
new file mode 100644
index 0000000..e9b228f
--- /dev/null
+++ b/linux-2.4.x/include/linux/suspend.h
@@ -0,0 +1,10 @@
+/* $Id: suspend.h,v 1.1 2003/10/13 20:56:47 dwmw2 Exp $ */
+
+#ifndef __MTD_COMPAT_VERSION_H__
+#include <linux/version.h>
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
+#include_next <linux/suspend.h>
+#endif
+
+#endif /* __MTD_COMPAT_VERSION_H__ */
diff --git a/linux-2.4.x/include/linux/workqueue.h b/linux-2.4.x/include/linux/workqueue.h
new file mode 100644
index 0000000..d93b64a
--- /dev/null
+++ b/linux-2.4.x/include/linux/workqueue.h
@@ -0,0 +1,21 @@
+/*
+ * 2.5 compatibility
+ * $Id: workqueue.h,v 1.2 2005/11/07 11:14:52 gleixner Exp $
+ */
+
+#ifndef __MTD_COMPAT_WORKQUEUE_H__
+#define __MTD_COMPAT_WORKQUEUE_H__
+
+#include <linux/version.h>
+
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,40)
+#include_next <linux/workqueue.h>
+#else
+#include <linux/tqueue.h>
+#define work_struct tq_struct
+#define schedule_work(x) schedule_task(x)
+#define flush_scheduled_work flush_scheduled_tasks
+#define INIT_WORK(x,y,z) INIT_TQUEUE(x,y,z)
+#endif
+
+#endif /* __MTD_COMPAT_WORKQUEUE_H__ */
diff --git a/linux-2.4.x/include/mtd/inftl-user.h b/linux-2.4.x/include/mtd/inftl-user.h
new file mode 100644
index 0000000..9b1e252
--- /dev/null
+++ b/linux-2.4.x/include/mtd/inftl-user.h
@@ -0,0 +1,91 @@
+/*
+ * $Id: inftl-user.h,v 1.2 2005/11/07 11:14:56 gleixner Exp $
+ *
+ * Parts of INFTL headers shared with userspace
+ *
+ */
+
+#ifndef __MTD_INFTL_USER_H__
+#define __MTD_INFTL_USER_H__
+
+#define OSAK_VERSION 0x5120
+#define PERCENTUSED 98
+
+#define SECTORSIZE 512
+
+/* Block Control Information */
+
+struct inftl_bci {
+ uint8_t ECCsig[6];
+ uint8_t Status;
+ uint8_t Status1;
+} __attribute__((packed));
+
+struct inftl_unithead1 {
+ uint16_t virtualUnitNo;
+ uint16_t prevUnitNo;
+ uint8_t ANAC;
+ uint8_t NACs;
+ uint8_t parityPerField;
+ uint8_t discarded;
+} __attribute__((packed));
+
+struct inftl_unithead2 {
+ uint8_t parityPerField;
+ uint8_t ANAC;
+ uint16_t prevUnitNo;
+ uint16_t virtualUnitNo;
+ uint8_t NACs;
+ uint8_t discarded;
+} __attribute__((packed));
+
+struct inftl_unittail {
+ uint8_t Reserved[4];
+ uint16_t EraseMark;
+ uint16_t EraseMark1;
+} __attribute__((packed));
+
+union inftl_uci {
+ struct inftl_unithead1 a;
+ struct inftl_unithead2 b;
+ struct inftl_unittail c;
+};
+
+struct inftl_oob {
+ struct inftl_bci b;
+ union inftl_uci u;
+};
+
+
+/* INFTL Media Header */
+
+struct INFTLPartition {
+ __u32 virtualUnits;
+ __u32 firstUnit;
+ __u32 lastUnit;
+ __u32 flags;
+ __u32 spareUnits;
+ __u32 Reserved0;
+ __u32 Reserved1;
+} __attribute__((packed));
+
+struct INFTLMediaHeader {
+ char bootRecordID[8];
+ __u32 NoOfBootImageBlocks;
+ __u32 NoOfBinaryPartitions;
+ __u32 NoOfBDTLPartitions;
+ __u32 BlockMultiplierBits;
+ __u32 FormatFlags;
+ __u32 OsakVersion;
+ __u32 PercentUsed;
+ struct INFTLPartition Partitions[4];
+} __attribute__((packed));
+
+/* Partition flag types */
+#define INFTL_BINARY 0x20000000
+#define INFTL_BDTL 0x40000000
+#define INFTL_LAST 0x80000000
+
+#endif /* __MTD_INFTL_USER_H__ */
+
+
diff --git a/linux-2.4.x/include/mtd/jffs2-user.h b/linux-2.4.x/include/mtd/jffs2-user.h
new file mode 100644
index 0000000..d508ef0
--- /dev/null
+++ b/linux-2.4.x/include/mtd/jffs2-user.h
@@ -0,0 +1,35 @@
+/*
+ * $Id: jffs2-user.h,v 1.1 2004/05/05 11:57:54 dwmw2 Exp $
+ *
+ * JFFS2 definitions for use in user space only
+ */
+
+#ifndef __JFFS2_USER_H__
+#define __JFFS2_USER_H__
+
+/* This file is blessed for inclusion by userspace */
+#include <linux/jffs2.h>
+#include <endian.h>
+#include <byteswap.h>
+
+#undef cpu_to_je16
+#undef cpu_to_je32
+#undef cpu_to_jemode
+#undef je16_to_cpu
+#undef je32_to_cpu
+#undef jemode_to_cpu
+
+extern int target_endian;
+
+#define t16(x) ({ uint16_t __b = (x); (target_endian==__BYTE_ORDER)?__b:bswap_16(__b); })
+#define t32(x) ({ uint32_t __b = (x); (target_endian==__BYTE_ORDER)?__b:bswap_32(__b); })
+
+#define cpu_to_je16(x) ((jint16_t){t16(x)})
+#define cpu_to_je32(x) ((jint32_t){t32(x)})
+#define cpu_to_jemode(x) ((jmode_t){t32(x)})
+
+#define je16_to_cpu(x) (t16((x).v16))
+#define je32_to_cpu(x) (t32((x).v32))
+#define jemode_to_cpu(x) (t32((x).m))
+
+#endif /* __JFFS2_USER_H__ */
diff --git a/linux-2.4.x/include/mtd/mtd-abi.h b/linux-2.4.x/include/mtd/mtd-abi.h
new file mode 100644
index 0000000..00ff9db
--- /dev/null
+++ b/linux-2.4.x/include/mtd/mtd-abi.h
@@ -0,0 +1,122 @@
+/*
+ * $Id: mtd-abi.h,v 1.14 2006/02/09 16:12:39 dwmw2 Exp $
+ *
+ * Portions of MTD ABI definition which are shared by kernel and user space
+ */
+
+#ifndef __MTD_ABI_H__
+#define __MTD_ABI_H__
+
+#ifndef __KERNEL__ /* Urgh. The whole point of splitting this out into
+ separate files was to avoid #ifdef __KERNEL__ */
+#define __user
+#endif
+
+struct erase_info_user {
+ uint32_t start;
+ uint32_t length;
+};
+
+struct mtd_oob_buf {
+ uint32_t start;
+ uint32_t length;
+ unsigned char __user *ptr;
+};
+
+#define MTD_ABSENT 0
+#define MTD_RAM 1
+#define MTD_ROM 2
+#define MTD_NORFLASH 3
+#define MTD_NANDFLASH 4
+#define MTD_PEROM 5
+#define MTD_DATAFLASH 6
+#define MTD_BLOCK 7
+#define MTD_OTHER 14
+#define MTD_UNKNOWN 15
+
+#define MTD_CLEAR_BITS 1 // Bits can be cleared (flash)
+#define MTD_SET_BITS 2 // Bits can be set
+#define MTD_ERASEABLE 4 // Has an erase function
+#define MTD_WRITEB_WRITEABLE 8 // Direct IO is possible
+#define MTD_VOLATILE 16 // Set for RAMs
+#define MTD_XIP 32 // eXecute-In-Place possible
+#define MTD_OOB 64 // Out-of-band data (NAND flash)
+#define MTD_ECC 128 // Device capable of automatic ECC
+#define MTD_NO_VIRTBLOCKS 256 // Virtual blocks not allowed
+#define MTD_PROGRAM_REGIONS 512 // Configurable Programming Regions
+
+// Some common devices / combinations of capabilities
+#define MTD_CAP_ROM 0
+#define MTD_CAP_RAM (MTD_CLEAR_BITS|MTD_SET_BITS|MTD_WRITEB_WRITEABLE)
+#define MTD_CAP_NORFLASH (MTD_CLEAR_BITS|MTD_ERASEABLE)
+#define MTD_CAP_NANDFLASH (MTD_CLEAR_BITS|MTD_ERASEABLE|MTD_OOB)
+#define MTD_WRITEABLE (MTD_CLEAR_BITS|MTD_SET_BITS)
+
+
+// Types of automatic ECC/Checksum available
+#define MTD_ECC_NONE 0 // No automatic ECC available
+#define MTD_ECC_RS_DiskOnChip 1 // Automatic ECC on DiskOnChip
+#define MTD_ECC_SW 2 // SW ECC for Toshiba & Samsung devices
+
+/* ECC byte placement */
+#define MTD_NANDECC_OFF 0 // Switch off ECC (Not recommended)
+#define MTD_NANDECC_PLACE 1 // Use the given placement in the structure (YAFFS1 legacy mode)
+#define MTD_NANDECC_AUTOPLACE 2 // Use the default placement scheme
+#define MTD_NANDECC_PLACEONLY 3 // Use the given placement in the structure (Do not store ecc result on read)
+#define MTD_NANDECC_AUTOPL_USR 4 // Use the given autoplacement scheme rather than using the default
+
+/* OTP mode selection */
+#define MTD_OTP_OFF 0
+#define MTD_OTP_FACTORY 1
+#define MTD_OTP_USER 2
+
+struct mtd_info_user {
+ uint8_t type;
+ uint32_t flags;
+ uint32_t size; // Total size of the MTD
+ uint32_t erasesize;
+ uint32_t oobblock; // Size of OOB blocks (e.g. 512)
+ uint32_t oobsize; // Amount of OOB data per block (e.g. 16)
+ uint32_t ecctype;
+ uint32_t eccsize;
+};
+
+struct region_info_user {
+ uint32_t offset; /* At which this region starts,
+ * from the beginning of the MTD */
+ uint32_t erasesize; /* For this region */
+ uint32_t numblocks; /* Number of blocks in this region */
+ uint32_t regionindex;
+};
+
+struct otp_info {
+ uint32_t start;
+ uint32_t length;
+ uint32_t locked;
+};
+
+#define MEMGETINFO _IOR('M', 1, struct mtd_info_user)
+#define MEMERASE _IOW('M', 2, struct erase_info_user)
+#define MEMWRITEOOB _IOWR('M', 3, struct mtd_oob_buf)
+#define MEMREADOOB _IOWR('M', 4, struct mtd_oob_buf)
+#define MEMLOCK _IOW('M', 5, struct erase_info_user)
+#define MEMUNLOCK _IOW('M', 6, struct erase_info_user)
+#define MEMGETREGIONCOUNT _IOR('M', 7, int)
+#define MEMGETREGIONINFO _IOWR('M', 8, struct region_info_user)
+#define MEMSETOOBSEL _IOW('M', 9, struct nand_oobinfo)
+#define MEMGETOOBSEL _IOR('M', 10, struct nand_oobinfo)
+#define MEMGETBADBLOCK _IOW('M', 11, loff_t)
+#define MEMSETBADBLOCK _IOW('M', 12, loff_t)
+#define OTPSELECT _IOR('M', 13, int)
+#define OTPGETREGIONCOUNT _IOW('M', 14, int)
+#define OTPGETREGIONINFO _IOW('M', 15, struct otp_info)
+#define OTPLOCK _IOR('M', 16, struct otp_info)
+
+struct nand_oobinfo {
+ uint32_t useecc;
+ uint32_t eccbytes;
+ uint32_t oobfree[8][2];
+ uint32_t eccpos[32];
+};
+
+#endif /* __MTD_ABI_H__ */
diff --git a/linux-2.4.x/include/mtd/mtd-user.h b/linux-2.4.x/include/mtd/mtd-user.h
new file mode 100644
index 0000000..1c13fc7
--- /dev/null
+++ b/linux-2.4.x/include/mtd/mtd-user.h
@@ -0,0 +1,20 @@
+/*
+ * $Id: mtd-user.h,v 1.2 2004/05/05 14:44:57 dwmw2 Exp $
+ *
+ * MTD ABI header for use by user space only.
+ */
+
+#ifndef __MTD_USER_H__
+#define __MTD_USER_H__
+
+#include <stdint.h>
+
+/* This file is blessed for inclusion by userspace */
+#include <mtd/mtd-abi.h>
+
+typedef struct mtd_info_user mtd_info_t;
+typedef struct erase_info_user erase_info_t;
+typedef struct region_info_user region_info_t;
+typedef struct nand_oobinfo nand_oobinfo_t;
+
+#endif /* __MTD_USER_H__ */
diff --git a/linux-2.4.x/include/mtd/nftl-user.h b/linux-2.4.x/include/mtd/nftl-user.h
new file mode 100644
index 0000000..b2bca18
--- /dev/null
+++ b/linux-2.4.x/include/mtd/nftl-user.h
@@ -0,0 +1,76 @@
+/*
+ * $Id: nftl-user.h,v 1.2 2005/11/07 11:14:56 gleixner Exp $
+ *
+ * Parts of NFTL headers shared with userspace
+ *
+ */
+
+#ifndef __MTD_NFTL_USER_H__
+#define __MTD_NFTL_USER_H__
+
+/* Block Control Information */
+
+struct nftl_bci {
+ unsigned char ECCSig[6];
+ uint8_t Status;
+ uint8_t Status1;
+}__attribute__((packed));
+
+/* Unit Control Information */
+
+struct nftl_uci0 {
+ uint16_t VirtUnitNum;
+ uint16_t ReplUnitNum;
+ uint16_t SpareVirtUnitNum;
+ uint16_t SpareReplUnitNum;
+} __attribute__((packed));
+
+struct nftl_uci1 {
+ uint32_t WearInfo;
+ uint16_t EraseMark;
+ uint16_t EraseMark1;
+} __attribute__((packed));
+
+struct nftl_uci2 {
+ uint16_t FoldMark;
+ uint16_t FoldMark1;
+ uint32_t unused;
+} __attribute__((packed));
+
+union nftl_uci {
+ struct nftl_uci0 a;
+ struct nftl_uci1 b;
+ struct nftl_uci2 c;
+};
+
+struct nftl_oob {
+ struct nftl_bci b;
+ union nftl_uci u;
+};
+
+/* NFTL Media Header */
+
+struct NFTLMediaHeader {
+ char DataOrgID[6];
+ uint16_t NumEraseUnits;
+ uint16_t FirstPhysicalEUN;
+ uint32_t FormattedSize;
+ unsigned char UnitSizeFactor;
+} __attribute__((packed));
+
+#define MAX_ERASE_ZONES (8192 - 512)
+
+#define ERASE_MARK 0x3c69
+#define SECTOR_FREE 0xff
+#define SECTOR_USED 0x55
+#define SECTOR_IGNORE 0x11
+#define SECTOR_DELETED 0x00
+
+#define FOLD_MARK_IN_PROGRESS 0x5555
+
+#define ZONE_GOOD 0xff
+#define ZONE_BAD_ORIGINAL 0
+#define ZONE_BAD_MARKED 7
+
+
+#endif /* __MTD_NFTL_USER_H__ */
diff --git a/linux-2.4.x/lib/Config.in b/linux-2.4.x/lib/Config.in
index 5edba1f..bf8cd09 100644
--- a/linux-2.4.x/lib/Config.in
+++ b/linux-2.4.x/lib/Config.in
@@ -4,17 +4,21 @@
mainmenu_option next_comment
comment 'Library routines'
+tristate 'CRC32 functions' CONFIG_CRC32
+
#
# Do we need the compression support?
#
if [ "$CONFIG_CRAMFS" = "y" -o \
"$CONFIG_PPP_DEFLATE" = "y" -o \
+ "$CONFIG_CRYPTO_DEFLATE" = "y" -o \
"$CONFIG_JFFS2_FS" = "y" -o \
"$CONFIG_ZISOFS_FS" = "y" ]; then
define_tristate CONFIG_ZLIB_INFLATE y
else
if [ "$CONFIG_CRAMFS" = "m" -o \
"$CONFIG_PPP_DEFLATE" = "m" -o \
+ "$CONFIG_CRYPTO_DEFLATE" = "m" -o \
"$CONFIG_JFFS2_FS" = "m" -o \
"$CONFIG_ZISOFS_FS" = "m" ]; then
define_tristate CONFIG_ZLIB_INFLATE m
@@ -24,10 +28,12 @@ else
fi
if [ "$CONFIG_PPP_DEFLATE" = "y" -o \
+ "$CONFIG_CRYPTO_DEFLATE" = "y" -o \
"$CONFIG_JFFS2_FS" = "y" ]; then
define_tristate CONFIG_ZLIB_DEFLATE y
else
if [ "$CONFIG_PPP_DEFLATE" = "m" -o \
+ "$CONFIG_CRYPTO_DEFLATE" = "m" -o \
"$CONFIG_JFFS2_FS" = "m" ]; then
define_tristate CONFIG_ZLIB_DEFLATE m
else
@@ -35,4 +41,24 @@ else
fi
fi
+if [ "$CONFIG_MTD_DOCPROBE" = "y" -o \
+ "$CONFIG_MTD_NAND_RTC_FROM4" = "y" -o \
+ "$CONFIG_MTD_NAND_DISKONCHIP" = "y" ]; then
+ define_tristate CONFIG_REED_SOLOMON y
+ define_tristate CONFIG_REED_SOLOMON_DEC16 y
+else
+ if [ "$CONFIG_MTD_DOCPROBE" = "m" -o \
+ "$CONFIG_MTD_NAND_RTC_FROM4" = "m" -o \
+ "$CONFIG_MTD_NAND_DISKONCHIP" = "m" ]; then
+ define_tristate CONFIG_REED_SOLOMON m
+ define_tristate CONFIG_REED_SOLOMON_DEC16 y
+ else
+ define_tristate CONFIG_REED_SOLOMON n
+ fi
+fi
+if [ "$CONFIG_EXPERIMENTAL" = "y" -a \
+ "$CONFIG_HOTPLUG" = "y" ]; then
+ tristate 'Hotplug firmware loading support (EXPERIMENTAL)' CONFIG_FW_LOADER
+fi
+
endmenu
diff --git a/linux-2.4.x/lib/Config.in.orig b/linux-2.4.x/lib/Config.in.orig
new file mode 100644
index 0000000..5edba1f
--- /dev/null
+++ b/linux-2.4.x/lib/Config.in.orig
@@ -0,0 +1,38 @@
+#
+# Library configuration
+#
+mainmenu_option next_comment
+comment 'Library routines'
+
+#
+# Do we need the compression support?
+#
+if [ "$CONFIG_CRAMFS" = "y" -o \
+ "$CONFIG_PPP_DEFLATE" = "y" -o \
+ "$CONFIG_JFFS2_FS" = "y" -o \
+ "$CONFIG_ZISOFS_FS" = "y" ]; then
+ define_tristate CONFIG_ZLIB_INFLATE y
+else
+ if [ "$CONFIG_CRAMFS" = "m" -o \
+ "$CONFIG_PPP_DEFLATE" = "m" -o \
+ "$CONFIG_JFFS2_FS" = "m" -o \
+ "$CONFIG_ZISOFS_FS" = "m" ]; then
+ define_tristate CONFIG_ZLIB_INFLATE m
+ else
+ tristate 'zlib decompression support' CONFIG_ZLIB_INFLATE
+ fi
+fi
+
+if [ "$CONFIG_PPP_DEFLATE" = "y" -o \
+ "$CONFIG_JFFS2_FS" = "y" ]; then
+ define_tristate CONFIG_ZLIB_DEFLATE y
+else
+ if [ "$CONFIG_PPP_DEFLATE" = "m" -o \
+ "$CONFIG_JFFS2_FS" = "m" ]; then
+ define_tristate CONFIG_ZLIB_DEFLATE m
+ else
+ tristate 'zlib compression support' CONFIG_ZLIB_DEFLATE
+ fi
+fi
+
+endmenu
diff --git a/linux-2.4.x/lib/Makefile b/linux-2.4.x/lib/Makefile
index 82067c9..237da86 100644
--- a/linux-2.4.x/lib/Makefile
+++ b/linux-2.4.x/lib/Makefile
@@ -22,6 +22,7 @@ endif
subdir-$(CONFIG_ZLIB_INFLATE) += zlib_inflate
subdir-$(CONFIG_ZLIB_DEFLATE) += zlib_deflate
+subdir-$(CONFIG_REED_SOLOMON) += reed_solomon
# Include the subdirs, if necessary.
obj-y += $(join $(subdir-y),$(subdir-y:%=/%.o))
diff --git a/linux-2.4.x/lib/Makefile.orig b/linux-2.4.x/lib/Makefile.orig
new file mode 100644
index 0000000..82067c9
--- /dev/null
+++ b/linux-2.4.x/lib/Makefile.orig
@@ -0,0 +1,29 @@
+#
+# Makefile for some libs needed in the kernel.
+#
+# Note! Dependencies are done automagically by 'make dep', which also
+# removes any old dependencies. DON'T put your own dependencies here
+# unless it's something special (ie not a .c file).
+#
+
+L_TARGET := lib.a
+
+export-objs := cmdline.o dec_and_lock.o rwsem-spinlock.o rwsem.o rbtree.o
+
+obj-y := errno.o ctype.o string.o vsprintf.o brlock.o cmdline.o \
+ bust_spinlocks.o rbtree.o dump_stack.o
+
+obj-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o
+obj-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o
+
+ifneq ($(CONFIG_HAVE_DEC_LOCK),y)
+ obj-y += dec_and_lock.o
+endif
+
+subdir-$(CONFIG_ZLIB_INFLATE) += zlib_inflate
+subdir-$(CONFIG_ZLIB_DEFLATE) += zlib_deflate
+
+# Include the subdirs, if necessary.
+obj-y += $(join $(subdir-y),$(subdir-y:%=/%.o))
+
+include $(TOPDIR)/Rules.make
diff --git a/linux-2.4.x/lib/reed_solomon/Makefile b/linux-2.4.x/lib/reed_solomon/Makefile
new file mode 100644
index 0000000..0d623aa
--- /dev/null
+++ b/linux-2.4.x/lib/reed_solomon/Makefile
@@ -0,0 +1,12 @@
+#
+# This is a modified version of reed solomon lib,
+#
+
+O_TARGET:= reed_solomon.o
+
+export-objs := rslib.o
+
+obj-y := rslib.o
+obj-m := $(O_TARGET)
+
+include $(TOPDIR)/Rules.make
diff --git a/linux-2.4.x/lib/reed_solomon/decode_rs.c b/linux-2.4.x/lib/reed_solomon/decode_rs.c
new file mode 100644
index 0000000..a58df56
--- /dev/null
+++ b/linux-2.4.x/lib/reed_solomon/decode_rs.c
@@ -0,0 +1,272 @@
+/*
+ * lib/reed_solomon/decode_rs.c
+ *
+ * Overview:
+ * Generic Reed Solomon encoder / decoder library
+ *
+ * Copyright 2002, Phil Karn, KA9Q
+ * May be used under the terms of the GNU General Public License (GPL)
+ *
+ * Adaption to the kernel by Thomas Gleixner (tglx@linutronix.de)
+ *
+ * $Id: decode_rs.c,v 1.7 2005/11/07 11:14:59 gleixner Exp $
+ *
+ */
+
+/* Generic data width independent code which is included by the
+ * wrappers.
+ */
+{
+ int deg_lambda, el, deg_omega;
+ int i, j, r, k, pad;
+ int nn = rs->nn;
+ int nroots = rs->nroots;
+ int fcr = rs->fcr;
+ int prim = rs->prim;
+ int iprim = rs->iprim;
+ uint16_t *alpha_to = rs->alpha_to;
+ uint16_t *index_of = rs->index_of;
+ uint16_t u, q, tmp, num1, num2, den, discr_r, syn_error;
+ /* Err+Eras Locator poly and syndrome poly The maximum value
+ * of nroots is 8. So the necessary stack size will be about
+ * 220 bytes max.
+ */
+ uint16_t lambda[nroots + 1], syn[nroots];
+ uint16_t b[nroots + 1], t[nroots + 1], omega[nroots + 1];
+ uint16_t root[nroots], reg[nroots + 1], loc[nroots];
+ int count = 0;
+ uint16_t msk = (uint16_t) rs->nn;
+
+ /* Check length parameter for validity */
+ pad = nn - nroots - len;
+ if (pad < 0 || pad >= nn)
+ return -ERANGE;
+
+ /* Does the caller provide the syndrome ? */
+ if (s != NULL)
+ goto decode;
+
+ /* form the syndromes; i.e., evaluate data(x) at roots of
+ * g(x) */
+ for (i = 0; i < nroots; i++)
+ syn[i] = (((uint16_t) data[0]) ^ invmsk) & msk;
+
+ for (j = 1; j < len; j++) {
+ for (i = 0; i < nroots; i++) {
+ if (syn[i] == 0) {
+ syn[i] = (((uint16_t) data[j]) ^
+ invmsk) & msk;
+ } else {
+ syn[i] = ((((uint16_t) data[j]) ^
+ invmsk) & msk) ^
+ alpha_to[rs_modnn(rs, index_of[syn[i]] +
+ (fcr + i) * prim)];
+ }
+ }
+ }
+
+ for (j = 0; j < nroots; j++) {
+ for (i = 0; i < nroots; i++) {
+ if (syn[i] == 0) {
+ syn[i] = ((uint16_t) par[j]) & msk;
+ } else {
+ syn[i] = (((uint16_t) par[j]) & msk) ^
+ alpha_to[rs_modnn(rs, index_of[syn[i]] +
+ (fcr+i)*prim)];
+ }
+ }
+ }
+ s = syn;
+
+ /* Convert syndromes to index form, checking for nonzero condition */
+ syn_error = 0;
+ for (i = 0; i < nroots; i++) {
+ syn_error |= s[i];
+ s[i] = index_of[s[i]];
+ }
+
+ if (!syn_error) {
+ /* if syndrome is zero, data[] is a codeword and there are no
+ * errors to correct. So return data[] unmodified
+ */
+ count = 0;
+ goto finish;
+ }
+
+ decode:
+ memset(&lambda[1], 0, nroots * sizeof(lambda[0]));
+ lambda[0] = 1;
+
+ if (no_eras > 0) {
+ /* Init lambda to be the erasure locator polynomial */
+ lambda[1] = alpha_to[rs_modnn(rs,
+ prim * (nn - 1 - eras_pos[0]))];
+ for (i = 1; i < no_eras; i++) {
+ u = rs_modnn(rs, prim * (nn - 1 - eras_pos[i]));
+ for (j = i + 1; j > 0; j--) {
+ tmp = index_of[lambda[j - 1]];
+ if (tmp != nn) {
+ lambda[j] ^=
+ alpha_to[rs_modnn(rs, u + tmp)];
+ }
+ }
+ }
+ }
+
+ for (i = 0; i < nroots + 1; i++)
+ b[i] = index_of[lambda[i]];
+
+ /*
+ * Begin Berlekamp-Massey algorithm to determine error+erasure
+ * locator polynomial
+ */
+ r = no_eras;
+ el = no_eras;
+ while (++r <= nroots) { /* r is the step number */
+ /* Compute discrepancy at the r-th step in poly-form */
+ discr_r = 0;
+ for (i = 0; i < r; i++) {
+ if ((lambda[i] != 0) && (s[r - i - 1] != nn)) {
+ discr_r ^=
+ alpha_to[rs_modnn(rs,
+ index_of[lambda[i]] +
+ s[r - i - 1])];
+ }
+ }
+ discr_r = index_of[discr_r]; /* Index form */
+ if (discr_r == nn) {
+ /* 2 lines below: B(x) <-- x*B(x) */
+ memmove (&b[1], b, nroots * sizeof (b[0]));
+ b[0] = nn;
+ } else {
+ /* 7 lines below: T(x) <-- lambda(x)-discr_r*x*b(x) */
+ t[0] = lambda[0];
+ for (i = 0; i < nroots; i++) {
+ if (b[i] != nn) {
+ t[i + 1] = lambda[i + 1] ^
+ alpha_to[rs_modnn(rs, discr_r +
+ b[i])];
+ } else
+ t[i + 1] = lambda[i + 1];
+ }
+ if (2 * el <= r + no_eras - 1) {
+ el = r + no_eras - el;
+ /*
+ * 2 lines below: B(x) <-- inv(discr_r) *
+ * lambda(x)
+ */
+ for (i = 0; i <= nroots; i++) {
+ b[i] = (lambda[i] == 0) ? nn :
+ rs_modnn(rs, index_of[lambda[i]]
+ - discr_r + nn);
+ }
+ } else {
+ /* 2 lines below: B(x) <-- x*B(x) */
+ memmove(&b[1], b, nroots * sizeof(b[0]));
+ b[0] = nn;
+ }
+ memcpy(lambda, t, (nroots + 1) * sizeof(t[0]));
+ }
+ }
+
+ /* Convert lambda to index form and compute deg(lambda(x)) */
+ deg_lambda = 0;
+ for (i = 0; i < nroots + 1; i++) {
+ lambda[i] = index_of[lambda[i]];
+ if (lambda[i] != nn)
+ deg_lambda = i;
+ }
+ /* Find roots of error+erasure locator polynomial by Chien search */
+ memcpy(&reg[1], &lambda[1], nroots * sizeof(reg[0]));
+ count = 0; /* Number of roots of lambda(x) */
+ for (i = 1, k = iprim - 1; i <= nn; i++, k = rs_modnn(rs, k + iprim)) {
+ q = 1; /* lambda[0] is always 0 */
+ for (j = deg_lambda; j > 0; j--) {
+ if (reg[j] != nn) {
+ reg[j] = rs_modnn(rs, reg[j] + j);
+ q ^= alpha_to[reg[j]];
+ }
+ }
+ if (q != 0)
+ continue; /* Not a root */
+ /* store root (index-form) and error location number */
+ root[count] = i;
+ loc[count] = k;
+ /* If we've already found max possible roots,
+ * abort the search to save time
+ */
+ if (++count == deg_lambda)
+ break;
+ }
+ if (deg_lambda != count) {
+ /*
+ * deg(lambda) unequal to number of roots => uncorrectable
+ * error detected
+ */
+ count = -1;
+ goto finish;
+ }
+ /*
+ * Compute err+eras evaluator poly omega(x) = s(x)*lambda(x) (modulo
+ * x**nroots). in index form. Also find deg(omega).
+ */
+ deg_omega = deg_lambda - 1;
+ for (i = 0; i <= deg_omega; i++) {
+ tmp = 0;
+ for (j = i; j >= 0; j--) {
+ if ((s[i - j] != nn) && (lambda[j] != nn))
+ tmp ^=
+ alpha_to[rs_modnn(rs, s[i - j] + lambda[j])];
+ }
+ omega[i] = index_of[tmp];
+ }
+
+ /*
+ * Compute error values in poly-form. num1 = omega(inv(X(l))), num2 =
+ * inv(X(l))**(fcr-1) and den = lambda_pr(inv(X(l))) all in poly-form
+ */
+ for (j = count - 1; j >= 0; j--) {
+ num1 = 0;
+ for (i = deg_omega; i >= 0; i--) {
+ if (omega[i] != nn)
+ num1 ^= alpha_to[rs_modnn(rs, omega[i] +
+ i * root[j])];
+ }
+ num2 = alpha_to[rs_modnn(rs, root[j] * (fcr - 1) + nn)];
+ den = 0;
+
+ /* lambda[i+1] for i even is the formal derivative
+ * lambda_pr of lambda[i] */
+ for (i = min(deg_lambda, nroots - 1) & ~1; i >= 0; i -= 2) {
+ if (lambda[i + 1] != nn) {
+ den ^= alpha_to[rs_modnn(rs, lambda[i + 1] +
+ i * root[j])];
+ }
+ }
+ /* Apply error to data */
+ if (num1 != 0 && loc[j] >= pad) {
+ uint16_t cor = alpha_to[rs_modnn(rs,index_of[num1] +
+ index_of[num2] +
+ nn - index_of[den])];
+ /* Store the error correction pattern, if a
+ * correction buffer is available */
+ if (corr) {
+ corr[j] = cor;
+ } else {
+ /* If a data buffer is given and the
+ * error is inside the message,
+ * correct it */
+ if (data && (loc[j] < (nn - nroots)))
+ data[loc[j] - pad] ^= cor;
+ }
+ }
+ }
+
+finish:
+ if (eras_pos != NULL) {
+ for (i = 0; i < count; i++)
+ eras_pos[i] = loc[i] - pad;
+ }
+ return count;
+
+}
diff --git a/linux-2.4.x/lib/reed_solomon/encode_rs.c b/linux-2.4.x/lib/reed_solomon/encode_rs.c
new file mode 100644
index 0000000..0b5b1a6
--- /dev/null
+++ b/linux-2.4.x/lib/reed_solomon/encode_rs.c
@@ -0,0 +1,54 @@
+/*
+ * lib/reed_solomon/encode_rs.c
+ *
+ * Overview:
+ * Generic Reed Solomon encoder / decoder library
+ *
+ * Copyright 2002, Phil Karn, KA9Q
+ * May be used under the terms of the GNU General Public License (GPL)
+ *
+ * Adaption to the kernel by Thomas Gleixner (tglx@linutronix.de)
+ *
+ * $Id: encode_rs.c,v 1.5 2005/11/07 11:14:59 gleixner Exp $
+ *
+ */
+
+/* Generic data width independent code which is included by the
+ * wrappers.
+ * int encode_rsX (struct rs_control *rs, uintX_t *data, int len, uintY_t *par)
+ */
+{
+ int i, j, pad;
+ int nn = rs->nn;
+ int nroots = rs->nroots;
+ uint16_t *alpha_to = rs->alpha_to;
+ uint16_t *index_of = rs->index_of;
+ uint16_t *genpoly = rs->genpoly;
+ uint16_t fb;
+ uint16_t msk = (uint16_t) rs->nn;
+
+ /* Check length parameter for validity */
+ pad = nn - nroots - len;
+ if (pad < 0 || pad >= nn)
+ return -ERANGE;
+
+ for (i = 0; i < len; i++) {
+ fb = index_of[((((uint16_t) data[i])^invmsk) & msk) ^ par[0]];
+ /* feedback term is non-zero */
+ if (fb != nn) {
+ for (j = 1; j < nroots; j++) {
+ par[j] ^= alpha_to[rs_modnn(rs, fb +
+ genpoly[nroots - j])];
+ }
+ }
+ /* Shift */
+ memmove(&par[0], &par[1], sizeof(uint16_t) * (nroots - 1));
+ if (fb != nn) {
+ par[nroots - 1] = alpha_to[rs_modnn(rs,
+ fb + genpoly[0])];
+ } else {
+ par[nroots - 1] = 0;
+ }
+ }
+ return 0;
+}
diff --git a/linux-2.4.x/lib/reed_solomon/rslib.c b/linux-2.4.x/lib/reed_solomon/rslib.c
new file mode 100644
index 0000000..f5fef94
--- /dev/null
+++ b/linux-2.4.x/lib/reed_solomon/rslib.c
@@ -0,0 +1,335 @@
+/*
+ * lib/reed_solomon/rslib.c
+ *
+ * Overview:
+ * Generic Reed Solomon encoder / decoder library
+ *
+ * Copyright (C) 2004 Thomas Gleixner (tglx@linutronix.de)
+ *
+ * Reed Solomon code lifted from reed solomon library written by Phil Karn
+ * Copyright 2002 Phil Karn, KA9Q
+ *
+ * $Id: rslib.c,v 1.7 2005/11/07 11:14:59 gleixner Exp $
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Description:
+ *
+ * The generic Reed Solomon library provides runtime configurable
+ * encoding / decoding of RS codes.
+ * Each user must call init_rs to get a pointer to a rs_control
+ * structure for the given rs parameters. This structure is either
+ * generated or a already available matching control structure is used.
+ * If a structure is generated then the polynomial arrays for
+ * fast encoding / decoding are built. This can take some time so
+ * make sure not to call this function from a time critical path.
+ * Usually a module / driver should initialize the necessary
+ * rs_control structure on module / driver init and release it
+ * on exit.
+ * The encoding puts the calculated syndrome into a given syndrome
+ * buffer.
+ * The decoding is a two step process. The first step calculates
+ * the syndrome over the received (data + syndrome) and calls the
+ * second stage, which does the decoding / error correction itself.
+ * Many hw encoders provide a syndrome calculation over the received
+ * data + syndrome and can call the second stage directly.
+ *
+ */
+
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/rslib.h>
+#include <linux/slab.h>
+#include <asm/semaphore.h>
+
+/* This list holds all currently allocated rs control structures */
+static LIST_HEAD (rslist);
+/* Protection for the list */
+static DECLARE_MUTEX(rslistlock);
+
+/**
+ * rs_init - Initialize a Reed-Solomon codec
+ *
+ * @symsize: symbol size, bits (1-8)
+ * @gfpoly: Field generator polynomial coefficients
+ * @fcr: first root of RS code generator polynomial, index form
+ * @prim: primitive element to generate polynomial roots
+ * @nroots: RS code generator polynomial degree (number of roots)
+ *
+ * Allocate a control structure and the polynom arrays for faster
+ * en/decoding. Fill the arrays according to the given parameters
+ */
+static struct rs_control *rs_init(int symsize, int gfpoly, int fcr,
+ int prim, int nroots)
+{
+ struct rs_control *rs;
+ int i, j, sr, root, iprim;
+
+ /* Allocate the control structure */
+ rs = kmalloc(sizeof (struct rs_control), GFP_KERNEL);
+ if (rs == NULL)
+ return NULL;
+
+ INIT_LIST_HEAD(&rs->list);
+
+ rs->mm = symsize;
+ rs->nn = (1 << symsize) - 1;
+ rs->fcr = fcr;
+ rs->prim = prim;
+ rs->nroots = nroots;
+ rs->gfpoly = gfpoly;
+
+ /* Allocate the arrays */
+ rs->alpha_to = kmalloc(sizeof(uint16_t) * (rs->nn + 1), GFP_KERNEL);
+ if (rs->alpha_to == NULL)
+ goto errrs;
+
+ rs->index_of = kmalloc(sizeof(uint16_t) * (rs->nn + 1), GFP_KERNEL);
+ if (rs->index_of == NULL)
+ goto erralp;
+
+ rs->genpoly = kmalloc(sizeof(uint16_t) * (rs->nroots + 1), GFP_KERNEL);
+ if(rs->genpoly == NULL)
+ goto erridx;
+
+ /* Generate Galois field lookup tables */
+ rs->index_of[0] = rs->nn; /* log(zero) = -inf */
+ rs->alpha_to[rs->nn] = 0; /* alpha**-inf = 0 */
+ sr = 1;
+ for (i = 0; i < rs->nn; i++) {
+ rs->index_of[sr] = i;
+ rs->alpha_to[i] = sr;
+ sr <<= 1;
+ if (sr & (1 << symsize))
+ sr ^= gfpoly;
+ sr &= rs->nn;
+ }
+ /* If it's not primitive, exit */
+ if(sr != 1)
+ goto errpol;
+
+ /* Find prim-th root of 1, used in decoding */
+ for(iprim = 1; (iprim % prim) != 0; iprim += rs->nn);
+ /* prim-th root of 1, index form */
+ rs->iprim = iprim / prim;
+
+ /* Form RS code generator polynomial from its roots */
+ rs->genpoly[0] = 1;
+ for (i = 0, root = fcr * prim; i < nroots; i++, root += prim) {
+ rs->genpoly[i + 1] = 1;
+ /* Multiply rs->genpoly[] by @**(root + x) */
+ for (j = i; j > 0; j--) {
+ if (rs->genpoly[j] != 0) {
+ rs->genpoly[j] = rs->genpoly[j -1] ^
+ rs->alpha_to[rs_modnn(rs,
+ rs->index_of[rs->genpoly[j]] + root)];
+ } else
+ rs->genpoly[j] = rs->genpoly[j - 1];
+ }
+ /* rs->genpoly[0] can never be zero */
+ rs->genpoly[0] =
+ rs->alpha_to[rs_modnn(rs,
+ rs->index_of[rs->genpoly[0]] + root)];
+ }
+ /* convert rs->genpoly[] to index form for quicker encoding */
+ for (i = 0; i <= nroots; i++)
+ rs->genpoly[i] = rs->index_of[rs->genpoly[i]];
+ return rs;
+
+ /* Error exit */
+errpol:
+ kfree(rs->genpoly);
+erridx:
+ kfree(rs->index_of);
+erralp:
+ kfree(rs->alpha_to);
+errrs:
+ kfree(rs);
+ return NULL;
+}
+
+
+/**
+ * free_rs - Free the rs control structure, if its not longer used
+ *
+ * @rs: the control structure which is not longer used by the
+ * caller
+ */
+void free_rs(struct rs_control *rs)
+{
+ down(&rslistlock);
+ rs->users--;
+ if(!rs->users) {
+ list_del(&rs->list);
+ kfree(rs->alpha_to);
+ kfree(rs->index_of);
+ kfree(rs->genpoly);
+ kfree(rs);
+ }
+ up(&rslistlock);
+}
+
+/**
+ * init_rs - Find a matching or allocate a new rs control structure
+ *
+ * @symsize: the symbol size (number of bits)
+ * @gfpoly: the extended Galois field generator polynomial coefficients,
+ * with the 0th coefficient in the low order bit. The polynomial
+ * must be primitive;
+ * @fcr: the first consecutive root of the rs code generator polynomial
+ * in index form
+ * @prim: primitive element to generate polynomial roots
+ * @nroots: RS code generator polynomial degree (number of roots)
+ */
+struct rs_control *init_rs(int symsize, int gfpoly, int fcr, int prim,
+ int nroots)
+{
+ struct list_head *tmp;
+ struct rs_control *rs;
+
+ /* Sanity checks */
+ if (symsize < 1)
+ return NULL;
+ if (fcr < 0 || fcr >= (1<<symsize))
+ return NULL;
+ if (prim <= 0 || prim >= (1<<symsize))
+ return NULL;
+ if (nroots < 0 || nroots >= (1<<symsize))
+ return NULL;
+
+ down(&rslistlock);
+
+ /* Walk through the list and look for a matching entry */
+ list_for_each(tmp, &rslist) {
+ rs = list_entry(tmp, struct rs_control, list);
+ if (symsize != rs->mm)
+ continue;
+ if (gfpoly != rs->gfpoly)
+ continue;
+ if (fcr != rs->fcr)
+ continue;
+ if (prim != rs->prim)
+ continue;
+ if (nroots != rs->nroots)
+ continue;
+ /* We have a matching one already */
+ rs->users++;
+ goto out;
+ }
+
+ /* Create a new one */
+ rs = rs_init(symsize, gfpoly, fcr, prim, nroots);
+ if (rs) {
+ rs->users = 1;
+ list_add(&rs->list, &rslist);
+ }
+out:
+ up(&rslistlock);
+ return rs;
+}
+
+#ifdef CONFIG_REED_SOLOMON_ENC8
+/**
+ * encode_rs8 - Calculate the parity for data values (8bit data width)
+ *
+ * @rs: the rs control structure
+ * @data: data field of a given type
+ * @len: data length
+ * @par: parity data, must be initialized by caller (usually all 0)
+ * @invmsk: invert data mask (will be xored on data)
+ *
+ * The parity uses a uint16_t data type to enable
+ * symbol size > 8. The calling code must take care of encoding of the
+ * syndrome result for storage itself.
+ */
+int encode_rs8(struct rs_control *rs, uint8_t *data, int len, uint16_t *par,
+ uint16_t invmsk)
+{
+#include "encode_rs.c"
+}
+EXPORT_SYMBOL_GPL(encode_rs8);
+#endif
+
+#ifdef CONFIG_REED_SOLOMON_DEC8
+/**
+ * decode_rs8 - Decode codeword (8bit data width)
+ *
+ * @rs: the rs control structure
+ * @data: data field of a given type
+ * @par: received parity data field
+ * @len: data length
+ * @s: syndrome data field (if NULL, syndrome is calculated)
+ * @no_eras: number of erasures
+ * @eras_pos: position of erasures, can be NULL
+ * @invmsk: invert data mask (will be xored on data, not on parity!)
+ * @corr: buffer to store correction bitmask on eras_pos
+ *
+ * The syndrome and parity uses a uint16_t data type to enable
+ * symbol size > 8. The calling code must take care of decoding of the
+ * syndrome result and the received parity before calling this code.
+ */
+int decode_rs8(struct rs_control *rs, uint8_t *data, uint16_t *par, int len,
+ uint16_t *s, int no_eras, int *eras_pos, uint16_t invmsk,
+ uint16_t *corr)
+{
+#include "decode_rs.c"
+}
+EXPORT_SYMBOL_GPL(decode_rs8);
+#endif
+
+#ifdef CONFIG_REED_SOLOMON_ENC16
+/**
+ * encode_rs16 - Calculate the parity for data values (16bit data width)
+ *
+ * @rs: the rs control structure
+ * @data: data field of a given type
+ * @len: data length
+ * @par: parity data, must be initialized by caller (usually all 0)
+ * @invmsk: invert data mask (will be xored on data, not on parity!)
+ *
+ * Each field in the data array contains up to symbol size bits of valid data.
+ */
+int encode_rs16(struct rs_control *rs, uint16_t *data, int len, uint16_t *par,
+ uint16_t invmsk)
+{
+#include "encode_rs.c"
+}
+EXPORT_SYMBOL_GPL(encode_rs16);
+#endif
+
+#ifdef CONFIG_REED_SOLOMON_DEC16
+/**
+ * decode_rs16 - Decode codeword (16bit data width)
+ *
+ * @rs: the rs control structure
+ * @data: data field of a given type
+ * @par: received parity data field
+ * @len: data length
+ * @s: syndrome data field (if NULL, syndrome is calculated)
+ * @no_eras: number of erasures
+ * @eras_pos: position of erasures, can be NULL
+ * @invmsk: invert data mask (will be xored on data, not on parity!)
+ * @corr: buffer to store correction bitmask on eras_pos
+ *
+ * Each field in the data array contains up to symbol size bits of valid data.
+ */
+int decode_rs16(struct rs_control *rs, uint16_t *data, uint16_t *par, int len,
+ uint16_t *s, int no_eras, int *eras_pos, uint16_t invmsk,
+ uint16_t *corr)
+{
+#include "decode_rs.c"
+}
+EXPORT_SYMBOL_GPL(decode_rs16);
+#endif
+
+EXPORT_SYMBOL_GPL(init_rs);
+EXPORT_SYMBOL_GPL(free_rs);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Reed Solomon encoder/decoder");
+MODULE_AUTHOR("Phil Karn, Thomas Gleixner");
+