From edee761616905546cfbe57a18d0e774e528ee1d6 Mon Sep 17 00:00:00 2001 From: Waldemar Brodkorb Date: Tue, 17 Jan 2012 20:11:36 +0100 Subject: make broadcom devices work with selfrelocatable bootloader (even Asus WL500gp) --- target/linux/patches/2.6.39.4/brcm.patch | 396 +++++++++++++ target/linux/patches/2.6.39.4/mtd-rootfs.patch | 786 +++++++++++++++++++++++++ 2 files changed, 1182 insertions(+) create mode 100644 target/linux/patches/2.6.39.4/brcm.patch create mode 100644 target/linux/patches/2.6.39.4/mtd-rootfs.patch (limited to 'target/linux/patches/2.6.39.4') diff --git a/target/linux/patches/2.6.39.4/brcm.patch b/target/linux/patches/2.6.39.4/brcm.patch new file mode 100644 index 000000000..8516e4e61 --- /dev/null +++ b/target/linux/patches/2.6.39.4/brcm.patch @@ -0,0 +1,396 @@ +diff -Nur linux-2.6.39.4.orig/arch/mips/Kconfig linux-2.6.39.4/arch/mips/Kconfig +--- linux-2.6.39.4.orig/arch/mips/Kconfig 2011-08-03 21:43:28.000000000 +0200 ++++ linux-2.6.39.4/arch/mips/Kconfig 2012-01-17 13:14:33.000000000 +0100 +@@ -103,6 +103,7 @@ + select GENERIC_GPIO + select SYS_HAS_EARLY_PRINTK + select CFE ++ select SYS_SUPPORTS_ZBOOT_UART16550 + help + Support for BCM47XX based boards + +diff -Nur linux-2.6.39.4.orig/arch/mips/Makefile linux-2.6.39.4/arch/mips/Makefile +--- linux-2.6.39.4.orig/arch/mips/Makefile 2011-08-03 21:43:28.000000000 +0200 ++++ linux-2.6.39.4/arch/mips/Makefile 2012-01-17 13:14:33.000000000 +0100 +@@ -76,6 +76,7 @@ + all-$(CONFIG_BOOT_ELF32) := $(vmlinux-32) + all-$(CONFIG_BOOT_ELF64) := $(vmlinux-64) + all-$(CONFIG_SYS_SUPPORTS_ZBOOT)+= vmlinuz ++all-$(CONFIG_BCM47XX) += vmlinuz.elf + + # + # GCC uses -G 0 -mabicalls -fpic as default. We don't want PIC in the kernel +@@ -276,7 +277,7 @@ + $(Q)$(MAKE) $(build)=arch/mips/boot VMLINUX=$(vmlinux-32) arch/mips/boot/$@ + + # boot/compressed +-vmlinuz vmlinuz.bin vmlinuz.ecoff vmlinuz.srec: $(vmlinux-32) FORCE ++vmlinuz vmlinuz.bin vmlinuz.ecoff vmlinuz.srec vmlinuz.elf: $(vmlinux-32) FORCE + $(Q)$(MAKE) $(build)=arch/mips/boot/compressed \ + VMLINUX_LOAD_ADDRESS=$(load-y) 32bit-bfd=$(32bit-bfd) $@ + +@@ -313,6 +314,7 @@ + echo ' vmlinuz.ecoff - ECOFF zboot image' + echo ' vmlinuz.bin - Raw binary zboot image' + echo ' vmlinuz.srec - SREC zboot image' ++ echo ' vmlinuz.elf - ELF self-relocating zboot image' + echo + echo ' These will be default as appropriate for a configured platform.' + endef +diff -Nur linux-2.6.39.4.orig/arch/mips/bcm47xx/Makefile linux-2.6.39.4/arch/mips/bcm47xx/Makefile +--- linux-2.6.39.4.orig/arch/mips/bcm47xx/Makefile 2011-08-03 21:43:28.000000000 +0200 ++++ linux-2.6.39.4/arch/mips/bcm47xx/Makefile 2012-01-17 13:14:33.000000000 +0100 +@@ -3,4 +3,4 @@ + # under Linux. + # + +-obj-y := gpio.o irq.o nvram.o prom.o serial.o setup.o time.o wgt634u.o ++obj-y := gpio.o irq.o nvram.o prom.o serial.o setup.o time.o platform.o +diff -Nur linux-2.6.39.4.orig/arch/mips/bcm47xx/platform.c linux-2.6.39.4/arch/mips/bcm47xx/platform.c +--- linux-2.6.39.4.orig/arch/mips/bcm47xx/platform.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-2.6.39.4/arch/mips/bcm47xx/platform.c 2012-01-17 13:14:33.000000000 +0100 +@@ -0,0 +1,146 @@ ++/* ++ * This file is subject to the terms and conditions of the GNU General Public ++ * License. See the file "COPYING" in the main directory of this archive ++ * for more details. ++ * ++ * Copyright (C) 2010, 2011 Waldemar Brodkorb ++ * Copyright © 2007, 2011 Thorsten Glaser ++ */ ++ ++#include ++#include ++#include ++#include ++ ++#include ++#include ++ ++#define CFGFS_FLASH_SIZE (128 * 1024) ++ ++static struct mtd_partition bcm47xx_partitions[] = { ++#define SLOT_CFE 0 ++ { ++ .name = "cfe", ++ .offset = 0, ++ .size = BCM47XX_OVERRIDE_CFESIZE, ++ .mask_flags = MTD_WRITEABLE /* force read-only */ ++ }, ++#define SLOT_LINUX 1 ++ { ++ .name = "linux", ++ .offset = 0, ++ .size = 0, ++ }, ++#define SLOT_ROOTFS 2 ++ { ++ .name = "rootfs", ++ .offset = 0, ++ .size = 0, ++ }, ++#define SLOT_CFGFS 3 ++ { ++ .name = "cfgfs", ++ .offset = 0, ++ .size = 0, ++ }, ++#define SLOT_NVRAM 4 ++ { ++ .name = "nvram", ++ .offset = 0, ++ .size = 0, ++ }, ++}; ++ ++static struct physmap_flash_data bcm47xx_flash_data = { ++ .parts = bcm47xx_partitions, ++ .nr_parts = ARRAY_SIZE(bcm47xx_partitions) ++}; ++ ++static struct resource bcm47xx_flash_resource = { ++ .flags = IORESOURCE_MEM, ++}; ++ ++static struct platform_device bcm47xx_flash = { ++ .name = "physmap-flash", ++ .id = 0, ++ .dev = { .platform_data = &bcm47xx_flash_data, }, ++ .resource = &bcm47xx_flash_resource, ++ .num_resources = 1, ++}; ++ ++static struct platform_device *bcm47xx_devices[] __initdata = { ++ &bcm47xx_flash, ++}; ++ ++struct bcm47xx_trx_header { ++#define BCM47XX_TRX_MAGIC 0x30524448 ++ u32 magic; ++ u32 len; ++ u32 crc32; ++ u32 flag_version; ++ u32 offsets[3]; ++}; ++ ++#define UPTODOWN(slot, psize) do { \ ++ posn -= psize; left -= psize; \ ++ bcm47xx_partitions[slot].offset = posn; \ ++ bcm47xx_partitions[slot].size = psize; \ ++} while (/* CONSTCOND */ 0) ++ ++static int __init bcm47xx_register_devices(void) ++{ ++ u32 flash_size; ++ size_t left, posn; ++ struct ssb_mipscore *mcore = &ssb_bcm47xx.mipscore; ++ struct bcm47xx_trx_header *trx_hdr; ++ ++ trx_hdr = (void *)KSEG1ADDR(mcore->flash_window + BCM47XX_OVERRIDE_CFESIZE); ++ ++ /* devices might have 2, 4 or 8 MB flash size */ ++#ifdef BCM47XX_OVERRIDE_FLASHSIZE ++ flash_size = BCM47XX_OVERRIDE_FLASHSIZE; ++ mcore->flash_window_size = flash_size; ++#define BCM47XX_OVERRODE_FLASHSIZE " (overridden)" ++#else ++ flash_size = mcore->flash_window_size; ++#define BCM47XX_OVERRODE_FLASHSIZE "" ++#endif ++ printk(KERN_INFO "FLASH SIZE%s: %x\n", BCM47XX_OVERRODE_FLASHSIZE, ++ flash_size); ++ ++ left = flash_size - BCM47XX_OVERRIDE_CFESIZE; ++ posn = flash_size; ++ UPTODOWN(SLOT_NVRAM, BCM47XX_OVERRIDE_NVRAMSIZE); ++ UPTODOWN(SLOT_CFGFS, CFGFS_FLASH_SIZE); ++ bcm47xx_partitions[SLOT_LINUX].offset = BCM47XX_OVERRIDE_CFESIZE; ++ bcm47xx_partitions[SLOT_LINUX].size = left; ++ ++ if (trx_hdr->magic == BCM47XX_TRX_MAGIC) { ++ bcm47xx_partitions[SLOT_ROOTFS].offset = ++ bcm47xx_partitions[SLOT_LINUX].offset + ++ trx_hdr->offsets[1]; ++ bcm47xx_partitions[SLOT_ROOTFS].size = ++ bcm47xx_partitions[SLOT_LINUX].size - ++ trx_hdr->offsets[1]; ++ } else ++ printk("bcm47xx/platform: no TRX header found\n"); ++ ++ printk(KERN_INFO "=== Flash map dump ===\n"); ++ for (posn = 0; posn < bcm47xx_flash_data.nr_parts; ++posn) ++ printk(KERN_INFO " #%u %08X @%08X '%s'\n", ++ (unsigned int)posn, ++ (unsigned int)bcm47xx_partitions[posn].size, ++ (unsigned int)bcm47xx_partitions[posn].offset, ++ bcm47xx_partitions[posn].name); ++ printk(KERN_INFO "=== Hope this works, have a nice day\n"); ++ ++ bcm47xx_flash_data.width = mcore->flash_buswidth; ++ bcm47xx_flash_resource.start = mcore->flash_window; ++ bcm47xx_flash_resource.end = mcore->flash_window ++ + mcore->flash_window_size ++ - 1; ++ return platform_add_devices(bcm47xx_devices, ++ ARRAY_SIZE(bcm47xx_devices)); ++} ++ ++device_initcall(bcm47xx_register_devices); +diff -Nur linux-2.6.39.4.orig/arch/mips/boot/compressed/Makefile linux-2.6.39.4/arch/mips/boot/compressed/Makefile +--- linux-2.6.39.4.orig/arch/mips/boot/compressed/Makefile 2011-08-03 21:43:28.000000000 +0200 ++++ linux-2.6.39.4/arch/mips/boot/compressed/Makefile 2012-01-17 13:14:33.000000000 +0100 +@@ -58,8 +58,13 @@ + # Calculate the load address of the compressed kernel image + hostprogs-y := calc_vmlinuz_load_addr + ++ifdef CONFIG_BCM47XX ++# XXX just after CFE, just pray the address is static ++VMLINUZ_LOAD_ADDRESS = 0xffffffff80900000 ++else + VMLINUZ_LOAD_ADDRESS = $(shell $(obj)/calc_vmlinuz_load_addr \ + $(obj)/vmlinux.bin $(VMLINUX_LOAD_ADDRESS)) ++endif + + vmlinuzobjs-y += $(obj)/piggy.o + +@@ -105,4 +110,12 @@ + vmlinuz.srec: vmlinuz + $(call cmd,objcopy) + +-clean-files := $(objtree)/vmlinuz $(objtree)/vmlinuz.{32,ecoff,bin,srec} ++AFLAGS_selfreloc.o := -DVMLINUZ_LOAD_ADDRESS=$(VMLINUZ_LOAD_ADDRESS) ++CPPFLAGS_selfreloc.lds := $(KBUILD_CFLAGS) ++ ++arch/mips/boot/compressed/selfreloc.o: arch/mips/boot/compressed/selfreloc.S vmlinuz.bin FORCE ++ ++vmlinuz.elf: arch/mips/boot/compressed/selfreloc.o arch/mips/boot/compressed/selfreloc.lds FORCE ++ $(LD) $(LDFLAGS) -T arch/mips/boot/compressed/selfreloc.lds arch/mips/boot/compressed/selfreloc.o -o $@ ++ ++clean-files := $(objtree)/vmlinuz $(objtree)/vmlinuz.{32,ecoff,bin,srec,elf} $(objtree)/arch/mips/boot/compressed/selfreloc.{o,lds} +diff -Nur linux-2.6.39.4.orig/arch/mips/boot/compressed/selfreloc.S linux-2.6.39.4/arch/mips/boot/compressed/selfreloc.S +--- linux-2.6.39.4.orig/arch/mips/boot/compressed/selfreloc.S 1970-01-01 01:00:00.000000000 +0100 ++++ linux-2.6.39.4/arch/mips/boot/compressed/selfreloc.S 2012-01-17 13:14:33.000000000 +0100 +@@ -0,0 +1,54 @@ ++/*- ++ * written 2011 by Thorsten Glaser based on ++ * arch/mips/boot/compressed/head.S ++ */ ++ ++#include ++#include ++ ++ .set noreorder ++ .cprestore ++ ++ .text ++ LEAF(selfreloc_start) ++selfreloc_start: ++ /* Save boot rom start args */ ++ move s0, a0 ++ move s1, a1 ++ move s2, a2 ++ move s3, a3 ++ ++ /* Copy code to the correct place */ ++ PTR_LI a0, VMLINUZ_LOAD_ADDRESS ++ PTR_LA a1, imgbeg ++ PTR_LA a2, imgend ++1: lw t0, 0(a1) ++ sw t0, 0(a0) ++ add a1, 4 ++ add a0, 4 ++ blt a1, a2, 1b ++ nop ++ ++ /* Restore boot rom start args */ ++ move a0, s0 ++ move a1, s1 ++ move a2, s2 ++ move a3, s3 ++ ++ /* Jump to the code at its new location */ ++ PTR_LI k0, VMLINUZ_LOAD_ADDRESS ++ jr k0 ++ nop ++ ++ /* Just in case we come back… */ ++3: ++ b 3b ++ nop ++ END(selfreloc_start) ++ ++ .globl imgbeg ++ .p2align 2 ++imgbeg: .incbin "vmlinuz.bin" ++ .globl imgend ++ .p2align 2 ++imgend: +diff -Nur linux-2.6.39.4.orig/arch/mips/boot/compressed/selfreloc.lds.S linux-2.6.39.4/arch/mips/boot/compressed/selfreloc.lds.S +--- linux-2.6.39.4.orig/arch/mips/boot/compressed/selfreloc.lds.S 1970-01-01 01:00:00.000000000 +0100 ++++ linux-2.6.39.4/arch/mips/boot/compressed/selfreloc.lds.S 2012-01-17 13:14:33.000000000 +0100 +@@ -0,0 +1,39 @@ ++/*- ++ * written 2010 by Thorsten Glaser based on ++ * arch/mips/kernel/vmlinux.lds and arch/mips/boot/compressed/ld.script ++ */ ++ ++#include ++#include ++#include ++ ++#undef mips ++#define mips mips ++OUTPUT_ARCH(mips) ++ENTRY(selfreloc_start) ++PHDRS { ++ text PT_LOAD FLAGS(7); /* RWX */ ++} ++SECTIONS ++{ ++ . = VMLINUX_LOAD_ADDRESS; ++ .text : { ++ *(.text) ++ *(.text.*) ++ *(.rodata) ++ *(.rodata.*) ++ *(.data) ++ *(.data.*) ++ *(.bss) ++ *(.bss.*) ++ } :text ++ /DISCARD/ : { ++ *(.MIPS.options) ++ *(.options) ++ *(.pdr) ++ *(.reginfo) ++ *(.comment) ++ *(.note) ++ *(.gnu.attributes) ++ } ++} +diff -Nur linux-2.6.39.4.orig/arch/mips/boot/compressed/uart-16550.c linux-2.6.39.4/arch/mips/boot/compressed/uart-16550.c +--- linux-2.6.39.4.orig/arch/mips/boot/compressed/uart-16550.c 2011-08-03 21:43:28.000000000 +0200 ++++ linux-2.6.39.4/arch/mips/boot/compressed/uart-16550.c 2012-01-17 13:14:33.000000000 +0100 +@@ -18,6 +18,11 @@ + #define PORT(offset) (CKSEG1ADDR(AR7_REGS_UART0) + (4 * offset)) + #endif + ++#ifdef CONFIG_BCM47XX ++#define UART_BASE 0x18000300 ++#define PORT(offset) (CKSEG1ADDR(UART_BASE) + (offset)) ++#endif ++ + #ifndef PORT + #error please define the serial port address for your own machine + #endif +diff -Nur linux-2.6.39.4.orig/drivers/ssb/driver_mipscore.c linux-2.6.39.4/drivers/ssb/driver_mipscore.c +--- linux-2.6.39.4.orig/drivers/ssb/driver_mipscore.c 2011-08-03 21:43:28.000000000 +0200 ++++ linux-2.6.39.4/drivers/ssb/driver_mipscore.c 2012-01-17 13:14:33.000000000 +0100 +@@ -190,10 +190,11 @@ + { + struct ssb_bus *bus = mcore->dev->bus; + ++ printk("Check for vendor with value: %d", bus->chipco.dev->id.vendor); + mcore->flash_buswidth = 2; + if (bus->chipco.dev) { + mcore->flash_window = 0x1c000000; +- mcore->flash_window_size = 0x02000000; ++ mcore->flash_window_size = 0x00800000; + if ((ssb_read32(bus->chipco.dev, SSB_CHIPCO_FLASH_CFG) + & SSB_CHIPCO_CFG_DS16) == 0) + mcore->flash_buswidth = 1; +diff -Nur linux-2.6.39.4.orig/init/Kconfig linux-2.6.39.4/init/Kconfig +--- linux-2.6.39.4.orig/init/Kconfig 2011-08-03 21:43:28.000000000 +0200 ++++ linux-2.6.39.4/init/Kconfig 2012-01-17 13:14:33.000000000 +0100 +@@ -156,6 +156,7 @@ + config KERNEL_GZIP + bool "Gzip" + depends on HAVE_KERNEL_GZIP ++ select DECOMPRESS_GZIP + help + The old and tried gzip compression. It provides a good balance + between compression ratio and decompression speed. +@@ -163,6 +164,7 @@ + config KERNEL_BZIP2 + bool "Bzip2" + depends on HAVE_KERNEL_BZIP2 ++ select DECOMPRESS_BZIP2 + help + Its compression ratio and speed is intermediate. + Decompression speed is slowest among the three. The kernel +@@ -173,6 +175,7 @@ + config KERNEL_LZMA + bool "LZMA" + depends on HAVE_KERNEL_LZMA ++ select DECOMPRESS_LZMA + help + The most recent compression algorithm. + Its ratio is best, decompression speed is between the other +@@ -197,6 +200,7 @@ + config KERNEL_LZO + bool "LZO" + depends on HAVE_KERNEL_LZO ++ select DECOMPRESS_LZO + help + Its compression ratio is the poorest among the 4. The kernel + size is about 10% bigger than gzip; however its speed diff --git a/target/linux/patches/2.6.39.4/mtd-rootfs.patch b/target/linux/patches/2.6.39.4/mtd-rootfs.patch new file mode 100644 index 000000000..35c07c186 --- /dev/null +++ b/target/linux/patches/2.6.39.4/mtd-rootfs.patch @@ -0,0 +1,786 @@ +diff -Nur linux-2.6.39.4.orig/drivers/mtd/mtdpart.c linux-2.6.39.4/drivers/mtd/mtdpart.c +--- linux-2.6.39.4.orig/drivers/mtd/mtdpart.c 2011-08-03 21:43:28.000000000 +0200 ++++ linux-2.6.39.4/drivers/mtd/mtdpart.c 2012-01-17 19:15:04.000000000 +0100 +@@ -30,6 +30,7 @@ + #include + #include + #include ++#include + + /* Our partition linked list */ + static LIST_HEAD(mtd_partitions); +@@ -660,6 +661,14 @@ + if (IS_ERR(slave)) + return PTR_ERR(slave); + ++ if (strcmp(parts[i].name, "rootfs") == 0) { ++ if (ROOT_DEV == 0) { ++ printk(KERN_NOTICE "mtd: partition \"rootfs\" " ++ "set to be root filesystem\n"); ++ ROOT_DEV = MKDEV(MTD_BLOCK_MAJOR, i); ++ } ++ } ++ + mutex_lock(&mtd_partitions_mutex); + list_add(&slave->list, &mtd_partitions); + mutex_unlock(&mtd_partitions_mutex); +diff -Nur linux-2.6.39.4.orig/drivers/mtd/mtdpart.c.orig linux-2.6.39.4/drivers/mtd/mtdpart.c.orig +--- linux-2.6.39.4.orig/drivers/mtd/mtdpart.c.orig 1970-01-01 01:00:00.000000000 +0100 ++++ linux-2.6.39.4/drivers/mtd/mtdpart.c.orig 2011-08-03 21:43:28.000000000 +0200 +@@ -0,0 +1,756 @@ ++/* ++ * Simple MTD partitioning layer ++ * ++ * Copyright © 2000 Nicolas Pitre ++ * Copyright © 2002 Thomas Gleixner ++ * Copyright © 2000-2010 David Woodhouse ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; if not, write to the Free Software ++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA ++ * ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++/* Our partition linked list */ ++static LIST_HEAD(mtd_partitions); ++static DEFINE_MUTEX(mtd_partitions_mutex); ++ ++/* Our partition node structure */ ++struct mtd_part { ++ struct mtd_info mtd; ++ struct mtd_info *master; ++ uint64_t offset; ++ struct list_head list; ++}; ++ ++/* ++ * Given a pointer to the MTD object in the mtd_part structure, we can retrieve ++ * the pointer to that structure with this macro. ++ */ ++#define PART(x) ((struct mtd_part *)(x)) ++ ++ ++/* ++ * MTD methods which simply translate the effective address and pass through ++ * to the _real_ device. ++ */ ++ ++static int part_read(struct mtd_info *mtd, loff_t from, size_t len, ++ size_t *retlen, u_char *buf) ++{ ++ struct mtd_part *part = PART(mtd); ++ struct mtd_ecc_stats stats; ++ int res; ++ ++ stats = part->master->ecc_stats; ++ ++ if (from >= mtd->size) ++ len = 0; ++ else if (from + len > mtd->size) ++ len = mtd->size - from; ++ res = part->master->read(part->master, from + part->offset, ++ len, retlen, buf); ++ if (unlikely(res)) { ++ if (res == -EUCLEAN) ++ mtd->ecc_stats.corrected += part->master->ecc_stats.corrected - stats.corrected; ++ if (res == -EBADMSG) ++ mtd->ecc_stats.failed += part->master->ecc_stats.failed - stats.failed; ++ } ++ return res; ++} ++ ++static int part_point(struct mtd_info *mtd, loff_t from, size_t len, ++ size_t *retlen, void **virt, resource_size_t *phys) ++{ ++ struct mtd_part *part = PART(mtd); ++ if (from >= mtd->size) ++ len = 0; ++ else if (from + len > mtd->size) ++ len = mtd->size - from; ++ return part->master->point (part->master, from + part->offset, ++ len, retlen, virt, phys); ++} ++ ++static void part_unpoint(struct mtd_info *mtd, loff_t from, size_t len) ++{ ++ struct mtd_part *part = PART(mtd); ++ ++ part->master->unpoint(part->master, from + part->offset, len); ++} ++ ++static unsigned long part_get_unmapped_area(struct mtd_info *mtd, ++ unsigned long len, ++ unsigned long offset, ++ unsigned long flags) ++{ ++ struct mtd_part *part = PART(mtd); ++ ++ offset += part->offset; ++ return part->master->get_unmapped_area(part->master, len, offset, ++ flags); ++} ++ ++static int part_read_oob(struct mtd_info *mtd, loff_t from, ++ struct mtd_oob_ops *ops) ++{ ++ struct mtd_part *part = PART(mtd); ++ int res; ++ ++ if (from >= mtd->size) ++ return -EINVAL; ++ if (ops->datbuf && from + ops->len > mtd->size) ++ return -EINVAL; ++ ++ /* ++ * If OOB is also requested, make sure that we do not read past the end ++ * of this partition. ++ */ ++ if (ops->oobbuf) { ++ size_t len, pages; ++ ++ if (ops->mode == MTD_OOB_AUTO) ++ len = mtd->oobavail; ++ else ++ len = mtd->oobsize; ++ pages = mtd_div_by_ws(mtd->size, mtd); ++ pages -= mtd_div_by_ws(from, mtd); ++ if (ops->ooboffs + ops->ooblen > pages * len) ++ return -EINVAL; ++ } ++ ++ res = part->master->read_oob(part->master, from + part->offset, ops); ++ if (unlikely(res)) { ++ if (res == -EUCLEAN) ++ mtd->ecc_stats.corrected++; ++ if (res == -EBADMSG) ++ mtd->ecc_stats.failed++; ++ } ++ return res; ++} ++ ++static int part_read_user_prot_reg(struct mtd_info *mtd, loff_t from, ++ size_t len, size_t *retlen, u_char *buf) ++{ ++ struct mtd_part *part = PART(mtd); ++ return part->master->read_user_prot_reg(part->master, from, ++ len, retlen, buf); ++} ++ ++static int part_get_user_prot_info(struct mtd_info *mtd, ++ struct otp_info *buf, size_t len) ++{ ++ struct mtd_part *part = PART(mtd); ++ return part->master->get_user_prot_info(part->master, buf, len); ++} ++ ++static int part_read_fact_prot_reg(struct mtd_info *mtd, loff_t from, ++ size_t len, size_t *retlen, u_char *buf) ++{ ++ struct mtd_part *part = PART(mtd); ++ return part->master->read_fact_prot_reg(part->master, from, ++ len, retlen, buf); ++} ++ ++static int part_get_fact_prot_info(struct mtd_info *mtd, struct otp_info *buf, ++ size_t len) ++{ ++ struct mtd_part *part = PART(mtd); ++ return part->master->get_fact_prot_info(part->master, buf, len); ++} ++ ++static int part_write(struct mtd_info *mtd, loff_t to, size_t len, ++ size_t *retlen, const u_char *buf) ++{ ++ struct mtd_part *part = PART(mtd); ++ if (!(mtd->flags & MTD_WRITEABLE)) ++ return -EROFS; ++ if (to >= mtd->size) ++ len = 0; ++ else if (to + len > mtd->size) ++ len = mtd->size - to; ++ return part->master->write(part->master, to + part->offset, ++ len, retlen, buf); ++} ++ ++static int part_panic_write(struct mtd_info *mtd, loff_t to, size_t len, ++ size_t *retlen, const u_char *buf) ++{ ++ struct mtd_part *part = PART(mtd); ++ if (!(mtd->flags & MTD_WRITEABLE)) ++ return -EROFS; ++ if (to >= mtd->size) ++ len = 0; ++ else if (to + len > mtd->size) ++ len = mtd->size - to; ++ return part->master->panic_write(part->master, to + part->offset, ++ len, retlen, buf); ++} ++ ++static int part_write_oob(struct mtd_info *mtd, loff_t to, ++ struct mtd_oob_ops *ops) ++{ ++ struct mtd_part *part = PART(mtd); ++ ++ if (!(mtd->flags & MTD_WRITEABLE)) ++ return -EROFS; ++ ++ if (to >= mtd->size) ++ return -EINVAL; ++ if (ops->datbuf && to + ops->len > mtd->size) ++ return -EINVAL; ++ return part->master->write_oob(part->master, to + part->offset, ops); ++} ++ ++static int part_write_user_prot_reg(struct mtd_info *mtd, loff_t from, ++ size_t len, size_t *retlen, u_char *buf) ++{ ++ struct mtd_part *part = PART(mtd); ++ return part->master->write_user_prot_reg(part->master, from, ++ len, retlen, buf); ++} ++ ++static int part_lock_user_prot_reg(struct mtd_info *mtd, loff_t from, ++ size_t len) ++{ ++ struct mtd_part *part = PART(mtd); ++ return part->master->lock_user_prot_reg(part->master, from, len); ++} ++ ++static int part_writev(struct mtd_info *mtd, const struct kvec *vecs, ++ unsigned long count, loff_t to, size_t *retlen) ++{ ++ struct mtd_part *part = PART(mtd); ++ if (!(mtd->flags & MTD_WRITEABLE)) ++ return -EROFS; ++ return part->master->writev(part->master, vecs, count, ++ to + part->offset, retlen); ++} ++ ++static int part_erase(struct mtd_info *mtd, struct erase_info *instr) ++{ ++ struct mtd_part *part = PART(mtd); ++ int ret; ++ if (!(mtd->flags & MTD_WRITEABLE)) ++ return -EROFS; ++ if (instr->addr >= mtd->size) ++ return -EINVAL; ++ instr->addr += part->offset; ++ ret = part->master->erase(part->master, instr); ++ if (ret) { ++ if (instr->fail_addr != MTD_FAIL_ADDR_UNKNOWN) ++ instr->fail_addr -= part->offset; ++ instr->addr -= part->offset; ++ } ++ return ret; ++} ++ ++void mtd_erase_callback(struct erase_info *instr) ++{ ++ if (instr->mtd->erase == part_erase) { ++ struct mtd_part *part = PART(instr->mtd); ++ ++ if (instr->fail_addr != MTD_FAIL_ADDR_UNKNOWN) ++ instr->fail_addr -= part->offset; ++ instr->addr -= part->offset; ++ } ++ if (instr->callback) ++ instr->callback(instr); ++} ++EXPORT_SYMBOL_GPL(mtd_erase_callback); ++ ++static int part_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len) ++{ ++ struct mtd_part *part = PART(mtd); ++ if ((len + ofs) > mtd->size) ++ return -EINVAL; ++ return part->master->lock(part->master, ofs + part->offset, len); ++} ++ ++static int part_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len) ++{ ++ struct mtd_part *part = PART(mtd); ++ if ((len + ofs) > mtd->size) ++ return -EINVAL; ++ return part->master->unlock(part->master, ofs + part->offset, len); ++} ++ ++static int part_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len) ++{ ++ struct mtd_part *part = PART(mtd); ++ if ((len + ofs) > mtd->size) ++ return -EINVAL; ++ return part->master->is_locked(part->master, ofs + part->offset, len); ++} ++ ++static void part_sync(struct mtd_info *mtd) ++{ ++ struct mtd_part *part = PART(mtd); ++ part->master->sync(part->master); ++} ++ ++static int part_suspend(struct mtd_info *mtd) ++{ ++ struct mtd_part *part = PART(mtd); ++ return part->master->suspend(part->master); ++} ++ ++static void part_resume(struct mtd_info *mtd) ++{ ++ struct mtd_part *part = PART(mtd); ++ part->master->resume(part->master); ++} ++ ++static int part_block_isbad(struct mtd_info *mtd, loff_t ofs) ++{ ++ struct mtd_part *part = PART(mtd); ++ if (ofs >= mtd->size) ++ return -EINVAL; ++ ofs += part->offset; ++ return part->master->block_isbad(part->master, ofs); ++} ++ ++static int part_block_markbad(struct mtd_info *mtd, loff_t ofs) ++{ ++ struct mtd_part *part = PART(mtd); ++ int res; ++ ++ if (!(mtd->flags & MTD_WRITEABLE)) ++ return -EROFS; ++ if (ofs >= mtd->size) ++ return -EINVAL; ++ ofs += part->offset; ++ res = part->master->block_markbad(part->master, ofs); ++ if (!res) ++ mtd->ecc_stats.badblocks++; ++ return res; ++} ++ ++static inline void free_partition(struct mtd_part *p) ++{ ++ kfree(p->mtd.name); ++ kfree(p); ++} ++ ++/* ++ * This function unregisters and destroy all slave MTD objects which are ++ * attached to the given master MTD object. ++ */ ++ ++int del_mtd_partitions(struct mtd_info *master) ++{ ++ struct mtd_part *slave, *next; ++ int ret, err = 0; ++ ++ mutex_lock(&mtd_partitions_mutex); ++ list_for_each_entry_safe(slave, next, &mtd_partitions, list) ++ if (slave->master == master) { ++ ret = del_mtd_device(&slave->mtd); ++ if (ret < 0) { ++ err = ret; ++ continue; ++ } ++ list_del(&slave->list); ++ free_partition(slave); ++ } ++ mutex_unlock(&mtd_partitions_mutex); ++ ++ return err; ++} ++EXPORT_SYMBOL(del_mtd_partitions); ++ ++static struct mtd_part *allocate_partition(struct mtd_info *master, ++ const struct mtd_partition *part, int partno, ++ uint64_t cur_offset) ++{ ++ struct mtd_part *slave; ++ char *name; ++ ++ /* allocate the partition structure */ ++ slave = kzalloc(sizeof(*slave), GFP_KERNEL); ++ name = kstrdup(part->name, GFP_KERNEL); ++ if (!name || !slave) { ++ printk(KERN_ERR"memory allocation error while creating partitions for \"%s\"\n", ++ master->name); ++ kfree(name); ++ kfree(slave); ++ return ERR_PTR(-ENOMEM); ++ } ++ ++ /* set up the MTD object for this partition */ ++ slave->mtd.type = master->type; ++ slave->mtd.flags = master->flags & ~part->mask_flags; ++ slave->mtd.size = part->size; ++ slave->mtd.writesize = master->writesize; ++ slave->mtd.writebufsize = master->writebufsize; ++ slave->mtd.oobsize = master->oobsize; ++ slave->mtd.oobavail = master->oobavail; ++ slave->mtd.subpage_sft = master->subpage_sft; ++ ++ slave->mtd.name = name; ++ slave->mtd.owner = master->owner; ++ slave->mtd.backing_dev_info = master->backing_dev_info; ++ ++ /* NOTE: we don't arrange MTDs as a tree; it'd be error-prone ++ * to have the same data be in two different partitions. ++ */ ++ slave->mtd.dev.parent = master->dev.parent; ++ ++ slave->mtd.read = part_read; ++ slave->mtd.write = part_write; ++ ++ if (master->panic_write) ++ slave->mtd.panic_write = part_panic_write; ++ ++ if (master->point && master->unpoint) { ++ slave->mtd.point = part_point; ++ slave->mtd.unpoint = part_unpoint; ++ } ++ ++ if (master->get_unmapped_area) ++ slave->mtd.get_unmapped_area = part_get_unmapped_area; ++ if (master->read_oob) ++ slave->mtd.read_oob = part_read_oob; ++ if (master->write_oob) ++ slave->mtd.write_oob = part_write_oob; ++ if (master->read_user_prot_reg) ++ slave->mtd.read_user_prot_reg = part_read_user_prot_reg; ++ if (master->read_fact_prot_reg) ++ slave->mtd.read_fact_prot_reg = part_read_fact_prot_reg; ++ if (master->write_user_prot_reg) ++ slave->mtd.write_user_prot_reg = part_write_user_prot_reg; ++ if (master->lock_user_prot_reg) ++ slave->mtd.lock_user_prot_reg = part_lock_user_prot_reg; ++ if (master->get_user_prot_info) ++ slave->mtd.get_user_prot_info = part_get_user_prot_info; ++ if (master->get_fact_prot_info) ++ slave->mtd.get_fact_prot_info = part_get_fact_prot_info; ++ if (master->sync) ++ slave->mtd.sync = part_sync; ++ if (!partno && !master->dev.class && master->suspend && master->resume) { ++ slave->mtd.suspend = part_suspend; ++ slave->mtd.resume = part_resume; ++ } ++ if (master->writev) ++ slave->mtd.writev = part_writev; ++ if (master->lock) ++ slave->mtd.lock = part_lock; ++ if (master->unlock) ++ slave->mtd.unlock = part_unlock; ++ if (master->is_locked) ++ slave->mtd.is_locked = part_is_locked; ++ if (master->block_isbad) ++ slave->mtd.block_isbad = part_block_isbad; ++ if (master->block_markbad) ++ slave->mtd.block_markbad = part_block_markbad; ++ slave->mtd.erase = part_erase; ++ slave->master = master; ++ slave->offset = part->offset; ++ ++ if (slave->offset == MTDPART_OFS_APPEND) ++ slave->offset = cur_offset; ++ if (slave->offset == MTDPART_OFS_NXTBLK) { ++ slave->offset = cur_offset; ++ if (mtd_mod_by_eb(cur_offset, master) != 0) { ++ /* Round up to next erasesize */ ++ slave->offset = (mtd_div_by_eb(cur_offset, master) + 1) * master->erasesize; ++ printk(KERN_NOTICE "Moving partition %d: " ++ "0x%012llx -> 0x%012llx\n", partno, ++ (unsigned long long)cur_offset, (unsigned long long)slave->offset); ++ } ++ } ++ if (slave->mtd.size == MTDPART_SIZ_FULL) ++ slave->mtd.size = master->size - slave->offset; ++ ++ printk(KERN_NOTICE "0x%012llx-0x%012llx : \"%s\"\n", (unsigned long long)slave->offset, ++ (unsigned long long)(slave->offset + slave->mtd.size), slave->mtd.name); ++ ++ /* let's do some sanity checks */ ++ if (slave->offset >= master->size) { ++ /* let's register it anyway to preserve ordering */ ++ slave->offset = 0; ++ slave->mtd.size = 0; ++ printk(KERN_ERR"mtd: partition \"%s\" is out of reach -- disabled\n", ++ part->name); ++ goto out_register; ++ } ++ if (slave->offset + slave->mtd.size > master->size) { ++ slave->mtd.size = master->size - slave->offset; ++ printk(KERN_WARNING"mtd: partition \"%s\" extends beyond the end of device \"%s\" -- size truncated to %#llx\n", ++ part->name, master->name, (unsigned long long)slave->mtd.size); ++ } ++ if (master->numeraseregions > 1) { ++ /* Deal with variable erase size stuff */ ++ int i, max = master->numeraseregions; ++ u64 end = slave->offset + slave->mtd.size; ++ struct mtd_erase_region_info *regions = master->eraseregions; ++ ++ /* Find the first erase regions which is part of this ++ * partition. */ ++ for (i = 0; i < max && regions[i].offset <= slave->offset; i++) ++ ; ++ /* The loop searched for the region _behind_ the first one */ ++ if (i > 0) ++ i--; ++ ++ /* Pick biggest erasesize */ ++ for (; i < max && regions[i].offset < end; i++) { ++ if (slave->mtd.erasesize < regions[i].erasesize) { ++ slave->mtd.erasesize = regions[i].erasesize; ++ } ++ } ++ BUG_ON(slave->mtd.erasesize == 0); ++ } else { ++ /* Single erase size */ ++ slave->mtd.erasesize = master->erasesize; ++ } ++ ++ if ((slave->mtd.flags & MTD_WRITEABLE) && ++ mtd_mod_by_eb(slave->offset, &slave->mtd)) { ++ /* Doesn't start on a boundary of major erase size */ ++ /* FIXME: Let it be writable if it is on a boundary of ++ * _minor_ erase size though */ ++ slave->mtd.flags &= ~MTD_WRITEABLE; ++ printk(KERN_WARNING"mtd: partition \"%s\" doesn't start on an erase block boundary -- force read-only\n", ++ part->name); ++ } ++ if ((slave->mtd.flags & MTD_WRITEABLE) && ++ mtd_mod_by_eb(slave->mtd.size, &slave->mtd)) { ++ slave->mtd.flags &= ~MTD_WRITEABLE; ++ printk(KERN_WARNING"mtd: partition \"%s\" doesn't end on an erase block -- force read-only\n", ++ part->name); ++ } ++ ++ slave->mtd.ecclayout = master->ecclayout; ++ if (master->block_isbad) { ++ uint64_t offs = 0; ++ ++ while (offs < slave->mtd.size) { ++ if (master->block_isbad(master, ++ offs + slave->offset)) ++ slave->mtd.ecc_stats.badblocks++; ++ offs += slave->mtd.erasesize; ++ } ++ } ++ ++out_register: ++ return slave; ++} ++ ++int mtd_add_partition(struct mtd_info *master, char *name, ++ long long offset, long long length) ++{ ++ struct mtd_partition part; ++ struct mtd_part *p, *new; ++ uint64_t start, end; ++ int ret = 0; ++ ++ /* the direct offset is expected */ ++ if (offset == MTDPART_OFS_APPEND || ++ offset == MTDPART_OFS_NXTBLK) ++ return -EINVAL; ++ ++ if (length == MTDPART_SIZ_FULL) ++ length = master->size - offset; ++ ++ if (length <= 0) ++ return -EINVAL; ++ ++ part.name = name; ++ part.size = length; ++ part.offset = offset; ++ part.mask_flags = 0; ++ part.ecclayout = NULL; ++ ++ new = allocate_partition(master, &part, -1, offset); ++ if (IS_ERR(new)) ++ return PTR_ERR(new); ++ ++ start = offset; ++ end = offset + length; ++ ++ mutex_lock(&mtd_partitions_mutex); ++ list_for_each_entry(p, &mtd_partitions, list) ++ if (p->master == master) { ++ if ((start >= p->offset) && ++ (start < (p->offset + p->mtd.size))) ++ goto err_inv; ++ ++ if ((end >= p->offset) && ++ (end < (p->offset + p->mtd.size))) ++ goto err_inv; ++ } ++ ++ list_add(&new->list, &mtd_partitions); ++ mutex_unlock(&mtd_partitions_mutex); ++ ++ add_mtd_device(&new->mtd); ++ ++ return ret; ++err_inv: ++ mutex_unlock(&mtd_partitions_mutex); ++ free_partition(new); ++ return -EINVAL; ++} ++EXPORT_SYMBOL_GPL(mtd_add_partition); ++ ++int mtd_del_partition(struct mtd_info *master, int partno) ++{ ++ struct mtd_part *slave, *next; ++ int ret = -EINVAL; ++ ++ mutex_lock(&mtd_partitions_mutex); ++ list_for_each_entry_safe(slave, next, &mtd_partitions, list) ++ if ((slave->master == master) && ++ (slave->mtd.index == partno)) { ++ ret = del_mtd_device(&slave->mtd); ++ if (ret < 0) ++ break; ++ ++ list_del(&slave->list); ++ free_partition(slave); ++ break; ++ } ++ mutex_unlock(&mtd_partitions_mutex); ++ ++ return ret; ++} ++EXPORT_SYMBOL_GPL(mtd_del_partition); ++ ++/* ++ * This function, given a master MTD object and a partition table, creates ++ * and registers slave MTD objects which are bound to the master according to ++ * the partition definitions. ++ * ++ * We don't register the master, or expect the caller to have done so, ++ * for reasons of data integrity. ++ */ ++ ++int add_mtd_partitions(struct mtd_info *master, ++ const struct mtd_partition *parts, ++ int nbparts) ++{ ++ struct mtd_part *slave; ++ uint64_t cur_offset = 0; ++ int i; ++ ++ printk(KERN_NOTICE "Creating %d MTD partitions on \"%s\":\n", nbparts, master->name); ++ ++ for (i = 0; i < nbparts; i++) { ++ slave = allocate_partition(master, parts + i, i, cur_offset); ++ if (IS_ERR(slave)) ++ return PTR_ERR(slave); ++ ++ mutex_lock(&mtd_partitions_mutex); ++ list_add(&slave->list, &mtd_partitions); ++ mutex_unlock(&mtd_partitions_mutex); ++ ++ add_mtd_device(&slave->mtd); ++ ++ cur_offset = slave->offset + slave->mtd.size; ++ } ++ ++ return 0; ++} ++EXPORT_SYMBOL(add_mtd_partitions); ++ ++static DEFINE_SPINLOCK(part_parser_lock); ++static LIST_HEAD(part_parsers); ++ ++static struct mtd_part_parser *get_partition_parser(const char *name) ++{ ++ struct mtd_part_parser *p, *ret = NULL; ++ ++ spin_lock(&part_parser_lock); ++ ++ list_for_each_entry(p, &part_parsers, list) ++ if (!strcmp(p->name, name) && try_module_get(p->owner)) { ++ ret = p; ++ break; ++ } ++ ++ spin_unlock(&part_parser_lock); ++ ++ return ret; ++} ++ ++int register_mtd_parser(struct mtd_part_parser *p) ++{ ++ spin_lock(&part_parser_lock); ++ list_add(&p->list, &part_parsers); ++ spin_unlock(&part_parser_lock); ++ ++ return 0; ++} ++EXPORT_SYMBOL_GPL(register_mtd_parser); ++ ++int deregister_mtd_parser(struct mtd_part_parser *p) ++{ ++ spin_lock(&part_parser_lock); ++ list_del(&p->list); ++ spin_unlock(&part_parser_lock); ++ return 0; ++} ++EXPORT_SYMBOL_GPL(deregister_mtd_parser); ++ ++int parse_mtd_partitions(struct mtd_info *master, const char **types, ++ struct mtd_partition **pparts, unsigned long origin) ++{ ++ struct mtd_part_parser *parser; ++ int ret = 0; ++ ++ for ( ; ret <= 0 && *types; types++) { ++ parser = get_partition_parser(*types); ++ if (!parser && !request_module("%s", *types)) ++ parser = get_partition_parser(*types); ++ if (!parser) { ++ printk(KERN_NOTICE "%s partition parsing not available\n", ++ *types); ++ continue; ++ } ++ ret = (*parser->parse_fn)(master, pparts, origin); ++ if (ret > 0) { ++ printk(KERN_NOTICE "%d %s partitions found on MTD device %s\n", ++ ret, parser->name, master->name); ++ } ++ put_partition_parser(parser); ++ } ++ return ret; ++} ++EXPORT_SYMBOL_GPL(parse_mtd_partitions); ++ ++int mtd_is_partition(struct mtd_info *mtd) ++{ ++ struct mtd_part *part; ++ int ispart = 0; ++ ++ mutex_lock(&mtd_partitions_mutex); ++ list_for_each_entry(part, &mtd_partitions, list) ++ if (&part->mtd == mtd) { ++ ispart = 1; ++ break; ++ } ++ mutex_unlock(&mtd_partitions_mutex); ++ ++ return ispart; ++} ++EXPORT_SYMBOL_GPL(mtd_is_partition); -- cgit v1.2.3