diff -ruw linux-2.6.20.14/arch/arm/boot/compressed/head.S linux-2.6.20.14-fbx/arch/arm/boot/compressed/head.S
--- linux-2.6.20.14/arch/arm/boot/compressed/head.S	2007-06-11 20:37:31.000000000 +0200
+++ linux-2.6.20.14-fbx/arch/arm/boot/compressed/head.S	2010-12-29 19:30:05.191441528 +0100
@@ -749,6 +749,11 @@
 1:		ldr	r3, [r1], r11		@ s/w flush D cache
 		teq	r1, r2
 		bne	1b
+#ifdef CONFIG_ARCH_MV8FXX81
+		/* 4 way cache, load of new cache lines won't be enough. */
+2:		mrc     p15, 0, r15, c7, c14, 3         @ test,clean,invalidate
+		bne     2b
+#endif
 
 		mcr	p15, 0, r1, c7, c5, 0	@ flush I cache
 		mcr	p15, 0, r1, c7, c6, 0	@ flush D cache
diff -ruw linux-2.6.20.14/arch/arm/boot/compressed/vmlinux.lds.in linux-2.6.20.14-fbx/arch/arm/boot/compressed/vmlinux.lds.in
--- linux-2.6.20.14/arch/arm/boot/compressed/vmlinux.lds.in	2007-06-11 20:37:31.000000000 +0200
+++ linux-2.6.20.14-fbx/arch/arm/boot/compressed/vmlinux.lds.in	2010-12-29 19:30:05.191441528 +0100
@@ -35,12 +35,12 @@
   .got			: { *(.got) }
   _got_end = .;
   .got.plt		: { *(.got.plt) }
-  .data			: { *(.data) }
+  .data			: { *(.data) *(.data.*) }
   _edata = .;
 
   . = BSS_START;
   __bss_start = .;
-  .bss			: { *(.bss) }
+  .bss			: { *(.bss) *(.bss.*) }
   _end = .;
 
   .stack (NOLOAD)	: { *(.stack) }
diff -ruw linux-2.6.20.14/arch/arm/Kconfig linux-2.6.20.14-fbx/arch/arm/Kconfig
--- linux-2.6.20.14/arch/arm/Kconfig	2007-06-11 20:37:31.000000000 +0200
+++ linux-2.6.20.14-fbx/arch/arm/Kconfig	2011-09-09 16:14:20.260347746 +0200
@@ -331,6 +331,11 @@
 	help
 	  Support for TI's OMAP platform (OMAP1 and OMAP2).
 
+config ARCH_MV88FXX81
+	bool "Marvell 88FXX81"
+	help
+	  Say Y here if you itend to run this kernel on a Marvell 88FXX81.
+
 endchoice
 
 source "arch/arm/mach-clps711x/Kconfig"
@@ -379,6 +384,8 @@
 
 source "arch/arm/mach-at91rm9200/Kconfig"
 
+source "arch/arm/mach-mv88fxx81/Kconfig"
+
 source "arch/arm/mach-netx/Kconfig"
 
 # Definitions to make life easier
@@ -441,7 +448,7 @@
 	bool
 
 config PCI
-	bool "PCI support" if ARCH_INTEGRATOR_AP || ARCH_VERSATILE_PB || ARCH_IXP4XX
+	bool "PCI support" if ARCH_INTEGRATOR_AP || ARCH_VERSATILE_PB || ARCH_MV88FXX81
 	help
 	  Find out whether you have a PCI motherboard. PCI is the name of a
 	  bus system, i.e. the way the CPU talks to the other stuff inside
@@ -463,6 +470,10 @@
 
 source "drivers/pcmcia/Kconfig"
 
+# Begin Freebox changed code
+source "drivers/pci/hotplug/Kconfig"
+# End Freebox changed code
+
 endmenu
 
 menu "Kernel Features"
@@ -551,6 +562,13 @@
 	default OMAP_32K_TIMER_HZ if ARCH_OMAP && OMAP_32K_TIMER
 	default 100
 
+config GCSECTIONS
+	bool "Garbage collect unused sections"
+	default n
+	depends on !MODULES
+	help
+	  Use ld's --gc-sections option to garbage collect unused sections.
+
 config AEABI
 	bool "Use the ARM EABI to compile the kernel"
 	help
@@ -893,8 +911,19 @@
 
 if ALIGNMENT_TRAP || !CPU_CP15_MMU
 source "drivers/mtd/Kconfig"
+source "drivers/fbxmtd/Kconfig"
 endif
 
+source "drivers/fbxdmamux/Kconfig"
+
+source "drivers/fbxgpio/Kconfig"
+
+source "drivers/fbxpanel/Kconfig"
+
+source "drivers/fbxspi/Kconfig"
+
+source "drivers/fbxwatchdog/Kconfig"
+
 source "drivers/parport/Kconfig"
 
 source "drivers/pnp/Kconfig"
@@ -962,6 +991,12 @@
 
 source "drivers/rtc/Kconfig"
 
+# Begin Freebox changed code
+
+source "drivers/telephony/Kconfig"
+
+# End Freebox changed code
+
 endmenu
 
 source "fs/Kconfig"
@@ -975,3 +1010,8 @@
 source "crypto/Kconfig"
 
 source "lib/Kconfig"
+
+
+config CROSS_PATH
+	string "cross path"
+	default "arm-linux-"
diff -ruw linux-2.6.20.14/arch/arm/kernel/bios32.c linux-2.6.20.14-fbx/arch/arm/kernel/bios32.c
--- linux-2.6.20.14/arch/arm/kernel/bios32.c	2007-06-11 20:37:31.000000000 +0200
+++ linux-2.6.20.14-fbx/arch/arm/kernel/bios32.c	2010-12-29 19:30:05.201441482 +0100
@@ -521,6 +521,25 @@
 	return irq;
 }
 
+
+#ifdef CONFIG_HOTPLUG_PCI
+/*
+ *	pci hotplug won't map the irq for new devices, so use a fixup
+ *	to work around this.
+ */
+void pci_fixup_hotplug_irq(struct pci_dev *dev)
+{
+	int irq;
+
+	irq = pcibios_map_irq(dev, PCI_SLOT(dev->devfn), dev->pin);
+	if (irq == -1)
+		irq = 0;
+	dev->irq = irq;
+	pcibios_update_irq(dev, dev->irq);
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, pci_fixup_hotplug_irq);
+#endif
+
 static void __init pcibios_init_hw(struct hw_pci *hw)
 {
 	struct pci_sys_data *sys = NULL;
diff -ruw linux-2.6.20.14/arch/arm/kernel/setup.c linux-2.6.20.14-fbx/arch/arm/kernel/setup.c
--- linux-2.6.20.14/arch/arm/kernel/setup.c	2007-06-11 20:37:31.000000000 +0200
+++ linux-2.6.20.14-fbx/arch/arm/kernel/setup.c	2010-12-29 19:30:05.211445427 +0100
@@ -698,7 +698,19 @@
 
 static int __init parse_tag_cmdline(const struct tag *tag)
 {
-	strlcpy(default_command_line, tag->u.cmdline.cmdline, COMMAND_LINE_SIZE);
+	/*
+	 * let CONFIG_CMDLINE override bootloader supplied command
+	 * line.
+	 */
+	if (default_command_line[0]) {
+		printk(KERN_INFO "ignoring bootloader-supplied "
+		       "command line.\n");
+		return 0;
+	}
+
+	printk(KERN_INFO "using bootloader-supplied command line.\n");
+	strlcpy(default_command_line, tag->u.cmdline.cmdline,
+		COMMAND_LINE_SIZE);
 	return 0;
 }
 
diff -ruw linux-2.6.20.14/arch/arm/kernel/vmlinux.lds.S linux-2.6.20.14-fbx/arch/arm/kernel/vmlinux.lds.S
--- linux-2.6.20.14/arch/arm/kernel/vmlinux.lds.S	2007-06-11 20:37:31.000000000 +0200
+++ linux-2.6.20.14-fbx/arch/arm/kernel/vmlinux.lds.S	2010-12-29 19:30:05.211445427 +0100
@@ -29,17 +29,17 @@
 			*(.init.text)
 			_einittext = .;
 		__proc_info_begin = .;
-			*(.proc.info.init)
+			KEEP(*(.proc.info.init))
 		__proc_info_end = .;
 		__arch_info_begin = .;
-			*(.arch.info.init)
+			KEEP(*(.arch.info.init))
 		__arch_info_end = .;
 		__tagtable_begin = .;
-			*(.taglist.init)
+			KEEP(*(.taglist.init))
 		__tagtable_end = .;
 		. = ALIGN(16);
 		__setup_start = .;
-			*(.init.setup)
+			KEEP(*(.init.setup))
 		__setup_end = .;
 		__early_begin = .;
 			*(.early_param.init)
@@ -55,7 +55,7 @@
 		__security_initcall_end = .;
 		. = ALIGN(32);
 		__initramfs_start = .;
-			usr/built-in.o(.init.ramfs)
+			KEEP(usr/built-in.o(.init.ramfs))
 		__initramfs_end = .;
 		. = ALIGN(64);
 		__per_cpu_start = .;
@@ -82,6 +82,7 @@
 	.text : {			/* Real text segment		*/
 		_text = .;		/* Text and read-only data	*/
 			*(.text)
+			*(.text.*)
 			SCHED_TEXT
 			LOCK_TEXT
 #ifdef CONFIG_MMU
@@ -142,7 +143,7 @@
 		. = ALIGN(32);
 		__start___ex_table = .;
 #ifdef CONFIG_MMU
-		*(__ex_table)
+		KEEP(*(__ex_table))
 #endif
 		__stop___ex_table = .;
 
@@ -150,6 +151,7 @@
 		 * and the usual data section
 		 */
 		*(.data)
+		*(.data.*)
 		CONSTRUCTORS
 
 		_edata = .;
@@ -158,6 +160,7 @@
 	.bss : {
 		__bss_start = .;	/* BSS				*/
 		*(.bss)
+		*(.bss.*)
 		*(COMMON)
 		_end = .;
 	}
diff -ruw linux-2.6.20.14/arch/arm/Makefile linux-2.6.20.14-fbx/arch/arm/Makefile
--- linux-2.6.20.14/arch/arm/Makefile	2007-06-11 20:37:31.000000000 +0200
+++ linux-2.6.20.14-fbx/arch/arm/Makefile	2011-08-30 20:20:50.157744887 +0200
@@ -10,7 +10,17 @@
 #
 # Copyright (C) 1995-2001 by Russell King
 
-LDFLAGS_vmlinux	:=-p --no-undefined -X
+# Begin Freebox changed code
+ifneq ($(CONFIG_CROSS_PATH),)
+CROSS_COMPILE		:= $(patsubst "%",%,$(CONFIG_CROSS_PATH))
+else
+$(error CONFIG_CROSS_PATH is not set)
+endif
+# End Freebox changed code
+
+ldflags-$(CONFIG_GCSECTIONS) += --gc-sections
+
+LDFLAGS_vmlinux	:=-p --no-undefined -X $(ldflags-y)
 CPPFLAGS_vmlinux.lds = -DTEXT_OFFSET=$(TEXT_OFFSET)
 OBJCOPYFLAGS	:=-O binary -R .note -R .comment -S
 GZFLAGS		:=-9
@@ -31,6 +41,11 @@
 CFLAGS		+=-fno-omit-frame-pointer -mapcs -mno-sched-prolog
 endif
 
+ifeq ($(CONFIG_GCSECTIONS),y)
+CFLAGS		+= -ffunction-sections -fdata-sections
+endif
+
+
 ifeq ($(CONFIG_CPU_BIG_ENDIAN),y)
 CPPFLAGS	+= -mbig-endian
 AS		+= -EB
@@ -85,6 +100,14 @@
 
 CHECKFLAGS	+= -D__arm__
 
+# Begin Freebox added code
+
+ifneq (($wildcard $(TOPDIR)/extdrivers/include/freebox),)
+CFLAGS   += -Iextdrivers/include/freebox
+endif
+
+# End Freebox added code
+
 #Default value
 head-y		:= arch/arm/kernel/head$(MMUEXT).o arch/arm/kernel/init_task.o
 textofs-y	:= 0x00008000
@@ -128,6 +151,9 @@
  machine-$(CONFIG_ARCH_EP93XX)     := ep93xx
  machine-$(CONFIG_ARCH_PNX4008)    := pnx4008
  machine-$(CONFIG_ARCH_NETX)       := netx
+# Begin Freebox changed code
+ machine-$(CONFIG_ARCH_MV88FXX81)  := mv88fxx81
+# End Freebox changed code
 
 ifeq ($(CONFIG_ARCH_EBSA110),y)
 # This is what happens if you forget the IOCS16 line.
diff -ruw linux-2.6.20.14/arch/arm/mm/consistent.c linux-2.6.20.14-fbx/arch/arm/mm/consistent.c
--- linux-2.6.20.14/arch/arm/mm/consistent.c	2007-06-11 20:37:31.000000000 +0200
+++ linux-2.6.20.14-fbx/arch/arm/mm/consistent.c	2010-12-29 19:30:05.241441208 +0100
@@ -485,6 +485,9 @@
 	unsigned long start = (unsigned long)vaddr;
 	unsigned long end   = start + size;
 
+	if (!size)
+		return;
+
 	switch (direction) {
 	case DMA_FROM_DEVICE:		/* invalidate only */
 		dmac_inv_range(start, end);
diff -ruw linux-2.6.20.14/arch/arm/mm/Kconfig linux-2.6.20.14-fbx/arch/arm/mm/Kconfig
--- linux-2.6.20.14/arch/arm/mm/Kconfig	2007-06-11 20:37:31.000000000 +0200
+++ linux-2.6.20.14-fbx/arch/arm/mm/Kconfig	2010-12-29 19:30:05.241441208 +0100
@@ -171,8 +171,8 @@
 # ARM926T
 config CPU_ARM926T
 	bool "Support ARM926T processor"
-	depends on ARCH_INTEGRATOR || ARCH_VERSATILE_PB || MACH_VERSATILE_AB || ARCH_OMAP730 || ARCH_OMAP16XX || MACH_REALVIEW_EB || ARCH_PNX4008 || ARCH_NETX || CPU_S3C2412 || ARCH_AT91SAM9260 || ARCH_AT91SAM9261
-	default y if ARCH_VERSATILE_PB || MACH_VERSATILE_AB || ARCH_OMAP730 || ARCH_OMAP16XX || ARCH_PNX4008 || ARCH_NETX || CPU_S3C2412 || ARCH_AT91SAM9260 || ARCH_AT91SAM9261
+	depends on ARCH_INTEGRATOR || ARCH_VERSATILE_PB || MACH_VERSATILE_AB || ARCH_OMAP730 || ARCH_OMAP16XX || MACH_REALVIEW_EB || ARCH_PNX4008 || ARCH_NETX || CPU_S3C2412 || ARCH_AT91SAM9260 || ARCH_AT91SAM9261 || ARCH_MV88FXX81
+	default y if ARCH_VERSATILE_PB || MACH_VERSATILE_AB || ARCH_OMAP730 || ARCH_OMAP16XX || ARCH_PNX4008 || ARCH_NETX || CPU_S3C2412 || ARCH_AT91SAM9260 || ARCH_AT91SAM9261 || ARCH_MV88FXX81
 	select CPU_32v5
 	select CPU_ABRT_EV5TJ
 	select CPU_CACHE_VIVT
diff -ruw linux-2.6.20.14/arch/arm/tools/mach-types linux-2.6.20.14-fbx/arch/arm/tools/mach-types
--- linux-2.6.20.14/arch/arm/tools/mach-types	2007-06-11 20:37:31.000000000 +0200
+++ linux-2.6.20.14-fbx/arch/arm/tools/mach-types	2010-12-29 19:30:05.251436905 +0100
@@ -1040,6 +1040,7 @@
 ai2410			MACH_AI2410		AI2410			1027
 ixp465			MACH_IXP465		IXP465			1028
 balloon3		MACH_BALLOON3		BALLOON3		1029
+mv_orion		BOARD_ORION		MV_ORION		526
 heins			MACH_HEINS		HEINS			1030
 mpluseva		MACH_MPLUSEVA		MPLUSEVA		1031
 rt042			MACH_RT042		RT042			1032
@@ -1242,3 +1243,10 @@
 tecon_tmezon		MACH_TECON_TMEZON	TECON_TMEZON		1231
 zylonite		MACH_ZYLONITE		ZYLONITE		1233
 gene1270		MACH_GENE1270		GENE1270		1234
+
+# legacy entry for fbxo1_a
+fbxo1_a_legacy		BOARD_FBXO1_A		FBXO1_A_LEGACY		1223
+# that is the official one.
+fbxo1_a			BOARD_FBXO1_A		FBXO1_A			1302
+# this one is 100% unofficial.
+fbx_node		BOARD_FBX_NODE		FBX_NODE		1303
diff -ruw linux-2.6.20.14/drivers/char/Kconfig linux-2.6.20.14-fbx/drivers/char/Kconfig
--- linux-2.6.20.14/drivers/char/Kconfig	2007-06-11 20:37:31.000000000 +0200
+++ linux-2.6.20.14-fbx/drivers/char/Kconfig	2010-12-29 19:30:06.691439775 +0100
@@ -372,19 +372,56 @@
 	  To compile this driver as a module, choose M here: the
 	  module will be called istallion.
 
-config AU1000_UART
-	bool "Enable Au1000 UART Support"
-	depends on SERIAL_NONSTANDARD && MIPS
+config AU1X00_GPIO
+	tristate "Alchemy Au1000 GPIO device support"
+	depends on MIPS && SOC_AU1X00
+
+config TS_AU1X00_ADS7846
+	tristate "Au1000/ADS7846 touchscreen support"
+	depends on MIPS && SOC_AU1X00
+
+config SIBYTE_SB1250_DUART
+	bool "Support for BCM1xxx onchip DUART"
+	depends on MIPS && SIBYTE_SB1xxx_SOC=y
+
+config SIBYTE_SB1250_DUART_CONSOLE
+	bool "Console on BCM1xxx DUART"
+	depends on SIBYTE_SB1250_DUART
+
+config SERIAL_DEC
+	bool "DECstation serial support"
+	depends on MACH_DECSTATION
+	default y
 	help
-	  If you have an Alchemy AU1000 processor (MIPS based) and you want
-	  to use serial ports, say Y.  Otherwise, say N.
+	  This selects whether you want to be asked about drivers for
+	  DECstation serial ports.
+
+	  Note that the answer to this question won't directly affect the
+	  kernel: saying N will just cause the configurator to skip all
+	  the questions about DECstation serial ports.
+
+	  If unsure, say Y.
+
+config SERIAL_DEC_CONSOLE
+	bool "Support for console on a DECstation serial port"
+	depends on SERIAL_DEC
+	default y
+	help
+	  If you say Y here, it will be possible to use a serial port as the
+	  system console (the system console is the device which receives all
+	  kernel messages and warnings and which allows logins in single user
+	  mode).  Note that the firmware uses ttyS0 as the serial console on
+	  the Maxine and ttyS2 on the others.
+
+	  If unsure, say Y.
 
-config AU1000_SERIAL_CONSOLE
-	bool "Enable Au1000 serial console"
-	depends on AU1000_UART
+config ZS
+	bool "Z85C30 Serial Support"
+	depends on SERIAL_DEC
+	default y
 	help
-	  If you have an Alchemy AU1000 processor (MIPS based) and you want
-	  to use a console on a serial port, say Y.  Otherwise, say N.
+	  Documentation on the Zilog 85C350 serial communications controller
+	  is downloadable at <http://www.zilog.com/pdfs/serial/z85c30.pdf>.
 
 config A2232
 	tristate "Commodore A2232 serial support (EXPERIMENTAL)"
diff -ruw linux-2.6.20.14/drivers/char/Makefile linux-2.6.20.14-fbx/drivers/char/Makefile
--- linux-2.6.20.14/drivers/char/Makefile	2007-06-11 20:37:31.000000000 +0200
+++ linux-2.6.20.14-fbx/drivers/char/Makefile	2010-12-29 19:30:06.691439775 +0100
@@ -32,6 +32,7 @@
 obj-$(CONFIG_ATARI_DSP56K)	+= dsp56k.o
 obj-$(CONFIG_MOXA_SMARTIO)	+= mxser.o
 obj-$(CONFIG_MOXA_SMARTIO_NEW)	+= mxser_new.o
+obj-$(CONFIG_SIBYTE_SB1250_DUART) += sb1250_duart.o
 obj-$(CONFIG_COMPUTONE)		+= ip2/
 obj-$(CONFIG_RISCOM8)		+= riscom8.o
 obj-$(CONFIG_ISI)		+= isicom.o
@@ -54,6 +55,7 @@
 obj-$(CONFIG_VIOTAPE)		+= viotape.o
 obj-$(CONFIG_HVCS)		+= hvcs.o
 obj-$(CONFIG_SGI_MBCS)		+= mbcs.o
+obj-$(CONFIG_SERIAL_DEC)	+= decserial.o
 obj-$(CONFIG_BRIQ_PANEL)	+= briq_panel.o
 
 obj-$(CONFIG_PRINTER)		+= lp.o
@@ -80,6 +82,7 @@
 obj-$(CONFIG_DS1620)		+= ds1620.o
 obj-$(CONFIG_HW_RANDOM)		+= hw_random/
 obj-$(CONFIG_COBALT_LCD)	+= lcd.o
+obj-$(CONFIG_AU1000_GPIO)	+= au1000_gpio.o
 obj-$(CONFIG_PPDEV)		+= ppdev.o
 obj-$(CONFIG_NWBUTTON)		+= nwbutton.o
 obj-$(CONFIG_NWFLASH)		+= nwflash.o
diff -ruw linux-2.6.20.14/drivers/char/random.c linux-2.6.20.14-fbx/drivers/char/random.c
--- linux-2.6.20.14/drivers/char/random.c	2007-06-11 20:37:31.000000000 +0200
+++ linux-2.6.20.14-fbx/drivers/char/random.c	2010-12-29 19:30:06.741441217 +0100
@@ -656,6 +656,13 @@
 	add_timer_randomness(irq_timer_state[irq], 0x100 + irq);
 }
 
+void add_raw_randomness(uint8_t *buf, int nbytes)
+{
+	add_entropy_words(&input_pool, (uint32_t *)buf, (nbytes + 3) / 4);
+	credit_entropy_store(&input_pool, nbytes);
+
+}
+
 #ifdef CONFIG_BLOCK
 void add_disk_randomness(struct gendisk *disk)
 {
diff -ruw linux-2.6.20.14/drivers/i2c/busses/i2c-mv64xxx.c linux-2.6.20.14-fbx/drivers/i2c/busses/i2c-mv64xxx.c
--- linux-2.6.20.14/drivers/i2c/busses/i2c-mv64xxx.c	2007-06-11 20:37:31.000000000 +0200
+++ linux-2.6.20.14-fbx/drivers/i2c/busses/i2c-mv64xxx.c	2010-12-29 19:30:06.811487955 +0100
@@ -520,6 +520,7 @@
 		rc = -ENXIO;
 		goto exit_unmap_regs;
 	}
+	drv_data->adapter.dev.parent = &pd->dev;
 	drv_data->adapter.id = I2C_HW_MV64XXX;
 	drv_data->adapter.algo = &mv64xxx_i2c_algo;
 	drv_data->adapter.owner = THIS_MODULE;
diff -ruw linux-2.6.20.14/drivers/i2c/busses/Kconfig linux-2.6.20.14-fbx/drivers/i2c/busses/Kconfig
--- linux-2.6.20.14/drivers/i2c/busses/Kconfig	2007-06-11 20:37:31.000000000 +0200
+++ linux-2.6.20.14-fbx/drivers/i2c/busses/Kconfig	2010-12-29 19:30:06.811487955 +0100
@@ -545,8 +545,8 @@
 	  time).  If unsure, say N.
 
 config I2C_MV64XXX
-	tristate "Marvell mv64xxx I2C Controller"
-	depends on I2C && MV64X60 && EXPERIMENTAL
+	tristate "Marvell mv64xxx/mv88fxx81 I2C Controller"
+	depends on I2C && (MV64X60 || ARCH_MV88FXX81) && EXPERIMENTAL
 	help
 	  If you say yes to this option, support will be included for the
 	  built-in I2C interface on the Marvell 64xxx line of host bridges.
@@ -564,4 +564,10 @@
 	  This driver can also be built as a module.  If so, the module
 	  will be called i2c-pnx.
 
+config I2C_GPIO
+	tristate "GPIO I2C bus"
+	depends on I2C
+	select I2C_ALGOBIT
+	default n
+
 endmenu
diff -ruw linux-2.6.20.14/drivers/i2c/busses/Makefile linux-2.6.20.14-fbx/drivers/i2c/busses/Makefile
--- linux-2.6.20.14/drivers/i2c/busses/Makefile	2007-06-11 20:37:31.000000000 +0200
+++ linux-2.6.20.14-fbx/drivers/i2c/busses/Makefile	2010-12-29 19:30:06.811487955 +0100
@@ -44,6 +44,7 @@
 obj-$(CONFIG_I2C_VIA)		+= i2c-via.o
 obj-$(CONFIG_I2C_VIAPRO)	+= i2c-viapro.o
 obj-$(CONFIG_I2C_VOODOO3)	+= i2c-voodoo3.o
+obj-$(CONFIG_I2C_GPIO)		+= i2c-gpio.o
 obj-$(CONFIG_SCx200_ACB)	+= scx200_acb.o
 obj-$(CONFIG_SCx200_I2C)	+= scx200_i2c.o
 
diff -ruw linux-2.6.20.14/drivers/ide/Kconfig linux-2.6.20.14-fbx/drivers/ide/Kconfig
--- linux-2.6.20.14/drivers/ide/Kconfig	2007-06-11 20:37:31.000000000 +0200
+++ linux-2.6.20.14-fbx/drivers/ide/Kconfig	2010-12-29 19:30:06.821445943 +0100
@@ -798,6 +798,16 @@
        default "128"
        depends on BLK_DEV_IDE_AU1XXX
 
+config BLK_DEV_IDE_TANGO2
+       select IDE_GENERIC
+       bool "IDE for SMP863x"
+       depends on TANGO2
+
+config BLK_DEV_IDE_TANGO2_DMA
+       bool "support DMA operations"
+       default y
+       depends on BLK_DEV_IDE_TANGO2
+
 config IDE_ARM
 	def_bool ARM && (ARCH_A5K || ARCH_CLPS7500 || ARCH_RPC || ARCH_SHARK)
 
@@ -925,6 +935,12 @@
 
 	  If unsure, say N.
 
+config BLK_DEV_MMIOIDE
+	tristate "Memory Mapped IDE support"
+	help
+	  This is the IDE driver for Memory Mapped IDE devices. Like
+	  Compact Flash running in True IDE mode.
+
 choice
 	prompt "Type of MPC8xx IDE interface"
 	depends on BLK_DEV_MPC8xx_IDE
@@ -1033,7 +1049,7 @@
 endif
 
 config BLK_DEV_IDEDMA
-	def_bool BLK_DEV_IDEDMA_PCI || BLK_DEV_IDEDMA_PMAC || BLK_DEV_IDEDMA_ICS || BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA
+	def_bool BLK_DEV_IDEDMA_PCI || BLK_DEV_IDEDMA_PMAC || BLK_DEV_IDEDMA_ICS || BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA || BLK_DEV_IDE_TANGO2_DMA
 
 config IDEDMA_IVB
 	bool "IGNORE word93 Validation BITS"
@@ -1052,7 +1068,7 @@
 	  It is normally safe to answer Y; however, the default is N.
 
 config IDEDMA_AUTO
-	def_bool IDEDMA_PCI_AUTO || IDEDMA_ICS_AUTO
+	def_bool IDEDMA_PCI_AUTO || IDEDMA_ICS_AUTO || BLK_DEV_IDE_TANGO2_DMA
 
 endif
 
diff -ruw linux-2.6.20.14/drivers/ide/legacy/Makefile linux-2.6.20.14-fbx/drivers/ide/legacy/Makefile
--- linux-2.6.20.14/drivers/ide/legacy/Makefile	2007-06-11 20:37:31.000000000 +0200
+++ linux-2.6.20.14-fbx/drivers/ide/legacy/Makefile	2010-12-29 19:30:06.831443133 +0100
@@ -7,6 +7,8 @@
 
 obj-$(CONFIG_BLK_DEV_IDECS)		+= ide-cs.o
 
+obj-$(CONFIG_BLK_DEV_MMIOIDE)		+= mmio-ide.o
+
 # Last of all
 obj-$(CONFIG_BLK_DEV_HD)		+= hd.o
 
diff -ruw linux-2.6.20.14/drivers/ide/Makefile linux-2.6.20.14-fbx/drivers/ide/Makefile
--- linux-2.6.20.14/drivers/ide/Makefile	2007-06-11 20:37:31.000000000 +0200
+++ linux-2.6.20.14-fbx/drivers/ide/Makefile	2010-12-29 19:30:06.821445943 +0100
@@ -40,6 +40,9 @@
 # built-in only drivers from h8300/
 ide-core-$(CONFIG_H8300)		+= h8300/ide-h8300.o
 
+# built-in only driver for tango2/
+ide-core-$(CONFIG_BLK_DEV_IDE_TANGO2)	+= tango2/tango2-ide.o
+
 obj-$(CONFIG_BLK_DEV_IDE)		+= ide-core.o
 obj-$(CONFIG_IDE_GENERIC)		+= ide-generic.o
 
diff -ruw linux-2.6.20.14/drivers/ide/mips/Makefile linux-2.6.20.14-fbx/drivers/ide/mips/Makefile
--- linux-2.6.20.14/drivers/ide/mips/Makefile	2007-06-11 20:37:31.000000000 +0200
+++ linux-2.6.20.14-fbx/drivers/ide/mips/Makefile	2010-12-29 19:30:06.831443133 +0100
@@ -1,4 +1,4 @@
 obj-$(CONFIG_BLK_DEV_IDE_SWARM)		+= swarm.o
 obj-$(CONFIG_BLK_DEV_IDE_AU1XXX)	+= au1xxx-ide.o
 
-EXTRA_CFLAGS    := -Idrivers/ide
+CFLAGS_au1xxx-ide.o := -Idrivers/ide
diff -ruw linux-2.6.20.14/drivers/leds/Kconfig linux-2.6.20.14-fbx/drivers/leds/Kconfig
--- linux-2.6.20.14/drivers/leds/Kconfig	2007-06-11 20:37:31.000000000 +0200
+++ linux-2.6.20.14-fbx/drivers/leds/Kconfig	2010-12-29 19:30:06.931441175 +0100
@@ -115,5 +115,12 @@
 	  load average.
 	  If unsure, say Y.
 
+config LEDS_TRIGGER_NETDEV
+	tristate "LED netdev Trigger"
+	depends on LEDS_TRIGGERS
+	help
+	  This allows LEDs to be controlled by a given network device
+	  activity.
+
 endmenu
 
diff -ruw linux-2.6.20.14/drivers/leds/Makefile linux-2.6.20.14-fbx/drivers/leds/Makefile
--- linux-2.6.20.14/drivers/leds/Makefile	2007-06-11 20:37:31.000000000 +0200
+++ linux-2.6.20.14-fbx/drivers/leds/Makefile	2010-12-29 19:30:06.931441175 +0100
@@ -19,3 +19,4 @@
 obj-$(CONFIG_LEDS_TRIGGER_TIMER)	+= ledtrig-timer.o
 obj-$(CONFIG_LEDS_TRIGGER_IDE_DISK)	+= ledtrig-ide-disk.o
 obj-$(CONFIG_LEDS_TRIGGER_HEARTBEAT)	+= ledtrig-heartbeat.o
+obj-$(CONFIG_LEDS_TRIGGER_NETDEV)	+= ledtrig-netdev.o
diff -ruw linux-2.6.20.14/drivers/Makefile linux-2.6.20.14-fbx/drivers/Makefile
--- linux-2.6.20.14/drivers/Makefile	2007-06-11 20:37:31.000000000 +0200
+++ linux-2.6.20.14-fbx/drivers/Makefile	2010-12-29 19:30:06.601441338 +0100
@@ -39,6 +39,12 @@
 obj-$(CONFIG_IEEE1394)		+= ieee1394/
 obj-y				+= cdrom/
 obj-$(CONFIG_MTD)		+= mtd/
+obj-$(CONFIG_FREEBOX_MTD)	+= fbxmtd/
+obj-$(CONFIG_FREEBOX_GPIO)	+= fbxgpio/
+obj-$(CONFIG_FREEBOX_DMAMUX)	+= fbxdmamux/
+obj-$(CONFIG_FREEBOX_PANEL)	+= fbxpanel/
+obj-$(CONFIG_FREEBOX_SPI)	+= fbxspi/
+obj-$(CONFIG_FREEBOX_WATCHDOG)	+= fbxwatchdog/
 obj-$(CONFIG_SPI)		+= spi/
 obj-$(CONFIG_PCCARD)		+= pcmcia/
 obj-$(CONFIG_DIO)		+= dio/
@@ -80,3 +86,4 @@
 obj-$(CONFIG_DMA_ENGINE)	+= dma/
 obj-$(CONFIG_HID)		+= hid/
 obj-$(CONFIG_PPC_PS3)		+= ps3/
+obj-$(CONFIG_TANGO2)		+= tango2/
diff -ruw linux-2.6.20.14/drivers/media/dvb/dvb-usb/Kconfig linux-2.6.20.14-fbx/drivers/media/dvb/dvb-usb/Kconfig
--- linux-2.6.20.14/drivers/media/dvb/dvb-usb/Kconfig	2007-06-11 20:37:31.000000000 +0200
+++ linux-2.6.20.14-fbx/drivers/media/dvb/dvb-usb/Kconfig	2010-12-29 19:30:06.951441061 +0100
@@ -72,6 +72,7 @@
 	select DVB_DIB7000P
 	select DVB_DIB7000M
 	select DVB_DIB3000MC
+	select DVB_TUNER_DIB0070
 	select DVB_TUNER_MT2060 if !DVB_FE_CUSTOMISE
 	help
 	  Support for USB2.0/1.1 DVB receivers based on the DiB0700 USB bridge. The
diff -ruw linux-2.6.20.14/drivers/media/dvb/frontends/Kconfig linux-2.6.20.14-fbx/drivers/media/dvb/frontends/Kconfig
--- linux-2.6.20.14/drivers/media/dvb/frontends/Kconfig	2007-06-11 20:37:31.000000000 +0200
+++ linux-2.6.20.14-fbx/drivers/media/dvb/frontends/Kconfig	2010-12-29 19:30:06.951441061 +0100
@@ -305,6 +305,15 @@
 	help
 	  A driver for the LG TDVS-H06xF ATSC tuner family.
 
+config DVB_TUNER_DIB0070
+	tristate "DiBcom DiB0070 silicon base-band tuner"
+	depends on I2C
+	default m if DVB_FE_CUSTOMISE
+	help
+	  A driver for the silicon baseband tuner DiB0070 from DiBcom.
+	  This device is only used inside a SiP called togther with a
+	  demodulator for now.
+
 comment "Miscellaneous devices"
 	depends on DVB_CORE
 
diff -ruw linux-2.6.20.14/drivers/media/dvb/frontends/Makefile linux-2.6.20.14-fbx/drivers/media/dvb/frontends/Makefile
--- linux-2.6.20.14/drivers/media/dvb/frontends/Makefile	2007-06-11 20:37:31.000000000 +0200
+++ linux-2.6.20.14-fbx/drivers/media/dvb/frontends/Makefile	2010-12-29 19:30:06.951441061 +0100
@@ -37,6 +37,7 @@
 obj-$(CONFIG_DVB_ISL6421) += isl6421.o
 obj-$(CONFIG_DVB_TDA10086) += tda10086.o
 obj-$(CONFIG_DVB_TDA826X) += tda826x.o
+obj-$(CONFIG_DVB_TUNER_DIB0070) += dib0070.o
 obj-$(CONFIG_DVB_TUNER_MT2060) += mt2060.o
 obj-$(CONFIG_DVB_TUA6100) += tua6100.o
 obj-$(CONFIG_DVB_TUNER_LGH06XF) += lgh06xf.o
diff -ruw linux-2.6.20.14/drivers/media/dvb/Kconfig linux-2.6.20.14-fbx/drivers/media/dvb/Kconfig
--- linux-2.6.20.14/drivers/media/dvb/Kconfig	2007-06-11 20:37:31.000000000 +0200
+++ linux-2.6.20.14-fbx/drivers/media/dvb/Kconfig	2010-12-29 19:30:06.941449342 +0100
@@ -44,6 +44,12 @@
 	depends on DVB_CORE && PCI && I2C
 source "drivers/media/dvb/pluto2/Kconfig"
 
+# Start Freebox added code
+comment "Supported Tango2 Adapters"
+	depends on DVB_CORE && ARCH_FBX5_B
+source "drivers/media/dvb/tango2/Kconfig"
+# End Freebox added code
+
 comment "Supported DVB Frontends"
 	depends on DVB_CORE
 source "drivers/media/dvb/frontends/Kconfig"
diff -ruw linux-2.6.20.14/drivers/media/dvb/Makefile linux-2.6.20.14-fbx/drivers/media/dvb/Makefile
--- linux-2.6.20.14/drivers/media/dvb/Makefile	2007-06-11 20:37:31.000000000 +0200
+++ linux-2.6.20.14-fbx/drivers/media/dvb/Makefile	2010-12-29 19:30:06.941449342 +0100
@@ -2,4 +2,7 @@
 # Makefile for the kernel multimedia device drivers.
 #
 
-obj-y        := dvb-core/ frontends/ ttpci/ ttusb-dec/ ttusb-budget/ b2c2/ bt8xx/ cinergyT2/ dvb-usb/ pluto2/
+# Start Freebox altered code
+#obj-y        := dvb-core/ frontends/ ttpci/ ttusb-dec/ ttusb-budget/ b2c2/ bt8xx/ cinergyT2/ dvb-usb/ pluto2/
+obj-y        := dvb-core/ frontends/ ttpci/ ttusb-dec/ ttusb-budget/ b2c2/ bt8xx/ cinergyT2/ dvb-usb/ pluto2/ tango2/
+# End Freebox altered code
diff -ruw linux-2.6.20.14/drivers/misc/Kconfig linux-2.6.20.14-fbx/drivers/misc/Kconfig
--- linux-2.6.20.14/drivers/misc/Kconfig	2007-06-11 20:37:31.000000000 +0200
+++ linux-2.6.20.14-fbx/drivers/misc/Kconfig	2010-12-29 19:30:07.031445324 +0100
@@ -88,4 +88,14 @@
 
 	  If you have an MSI S270 laptop, say Y or M here.
 
+config CRASHZONE
+	bool "crashzone support"
+
+config BCM963XX_DSL_ALT
+	tristate "Support for 63xx DSL"
+	depends on BCM963XX
+	select FW_LOADER
+	select CRC_CCITT
+	select GENERIC_ALLOCATOR
+
 endmenu
diff -ruw linux-2.6.20.14/drivers/misc/Makefile linux-2.6.20.14-fbx/drivers/misc/Makefile
--- linux-2.6.20.14/drivers/misc/Makefile	2007-06-11 20:37:31.000000000 +0200
+++ linux-2.6.20.14-fbx/drivers/misc/Makefile	2010-12-29 19:30:07.031445324 +0100
@@ -10,3 +10,5 @@
 obj-$(CONFIG_TIFM_CORE)       	+= tifm_core.o
 obj-$(CONFIG_TIFM_7XX1)       	+= tifm_7xx1.o
 obj-$(CONFIG_SGI_IOC4)		+= ioc4.o
+obj-$(CONFIG_CRASHZONE)		+= crash_zone.o
+obj-$(CONFIG_BCM963XX_DSL_ALT)	+= bcm963xx_dsl/
diff -ruw linux-2.6.20.14/drivers/net/Kconfig linux-2.6.20.14-fbx/drivers/net/Kconfig
--- linux-2.6.20.14/drivers/net/Kconfig	2007-06-11 20:37:31.000000000 +0200
+++ linux-2.6.20.14-fbx/drivers/net/Kconfig	2010-12-29 19:30:07.061441458 +0100
@@ -452,6 +452,14 @@
 	  This is the driver for the onboard card of MIPS Magnum 4000,
 	  Acer PICA, Olivetti M700-10 and a few other identical OEM systems.
 
+config GALILEO_64240_ETH
+	tristate "Galileo GT64240 Ethernet support"
+	depends on NET_ETHERNET && MOMENCO_OCELOT_G
+	select MII
+	help
+	  This is the driver for the ethernet interfaces integrated into
+	  the Galileo (now Marvell) GT64240 chipset.
+
 config MIPS_AU1X00_ENET
 	bool "MIPS AU1000 Ethernet support"
 	depends on NET_ETHERNET && SOC_AU1X00
@@ -461,10 +469,6 @@
 	  If you have an Alchemy Semi AU1X00 based system
 	  say Y.  Otherwise, say N.
 
-config NET_SB1250_MAC
-	tristate "SB1250 Ethernet support"
-	depends on NET_ETHERNET && SIBYTE_SB1xxx_SOC
-
 config SGI_IOC3_ETH
 	bool "SGI IOC3 Ethernet"
 	depends on NET_ETHERNET && PCI && SGI_IP27
@@ -475,25 +479,13 @@
 	  the Ethernet-HOWTO, available from
 	  <http://www.tldp.org/docs.html#howto>.
 
-config SGI_IOC3_ETH_HW_RX_CSUM
-	bool "Receive hardware checksums"
-	depends on SGI_IOC3_ETH && INET
-	default y
-	help
-	  The SGI IOC3 network adapter supports TCP and UDP checksums in
-	  hardware to offload processing of these checksums from the CPU.  At
-	  the moment only acceleration of IPv4 is supported.  This option
-	  enables offloading for checksums on receive.  If unsure, say Y.
-
-config SGI_IOC3_ETH_HW_TX_CSUM
-	bool "Transmit hardware checksums"
-	depends on SGI_IOC3_ETH && INET
-	default y
+config MIPS_SIM_NET
+	tristate "MIPS simulator Network device (EXPERIMENTAL)"
+	depends on NETDEVICES && MIPS_SIM && EXPERIMENTAL
 	help
-	  The SGI IOC3 network adapter supports TCP and UDP checksums in
-	  hardware to offload processing of these checksums from the CPU.  At
-	  the moment only acceleration of IPv4 is supported.  This option
-	  enables offloading for checksums on transmit.  If unsure, say Y.
+	  The MIPSNET device is a simple Ethernet network device which is
+	  emulated by the MIPS Simulator.
+	  If you are not using a MIPSsim or are unsure, say N.
 
 config MIPS_SIM_NET
 	tristate "MIPS simulator Network device (EXPERIMENTAL)"
@@ -992,6 +984,20 @@
 	  <file:Documentation/networking/net-modules.txt>. The module
 	  will be called hp100.
 
+config BCM963XX_ENET
+	tristate "Broadcom 963xx internal mac support"
+	depends on NET_ETHERNET && BCM963XX
+	select MII
+	select PHYLIB
+	help
+	  If you have a network (Ethernet) controller of this type, say Y and
+	  read the Ethernet-HOWTO, available from
+	  <http://www.tldp.org/docs.html#howto>.
+
+	  To compile this driver as a module, choose M here and read
+	  <file:Documentation/networking/net-modules.txt>.  The module will be
+	  called bcm963xx_enet.
+
 config NET_ISA
 	bool "Other ISA cards"
 	depends on NET_ETHERNET && ISA
@@ -1280,6 +1286,28 @@
 	depends on IBM_EMAC && 440GX
 	default y
 
+config TANGO2_ENET
+	tristate "SMP863x Builtin Ethernet support"
+	depends on NET_ETHERNET && TANGO2
+	select MII
+	select CRC32
+	help
+	 This option adds support for the SMP863x integrated Ethernet
+	 controller.  This driver uses NAPI and generic Linux MII
+	 support.
+
+config TANGO2_ENET_FIFOCHECK
+	bool "warn when internal fifo overflows"
+	depends on TANGO2_ENET
+
+config TANGO2_ENET_OLD
+	tristate "SMP863x Builtin Ethernet support (old driver)"
+	depends on NET_ETHERNET && TANGO2
+	help
+	 This option adds support  for the SMP863x integrated Ethernet
+	 controller. This  is the orignal driver from  sigma with only
+	 small changes.
+
 config NET_PCI
 	bool "EISA, VLB, PCI and on board controllers"
 	depends on NET_ETHERNET && (ISA || EISA || PCI)
@@ -2012,6 +2040,14 @@
 	  To compile this driver as a module, choose M here: the module
 	  will be called myri_sbus.  This is recommended.
 
+config MV88FXX81_ETH
+	tristate "mv88fxx81 builtin Gigabit Ethernet support"
+	depends on ARCH_MV88FXX81
+	select MII
+	help
+	 This option adds support for the mv88Fxx81 integrated Ethernet
+	 controller.  This driver uses NAPI.
+
 config NS83820
 	tristate "National Semiconductor DP83820 support"
 	depends on PCI
@@ -2076,6 +2112,10 @@
 
 	  If in doubt, say N.
 
+config NET_SB1250_MAC
+	tristate "SB1250 Ethernet support"
+	depends on SIBYTE_SB1xxx_SOC
+
 config R8169_VLAN
 	bool "VLAN support"
 	depends on R8169 && VLAN_8021Q
@@ -2274,6 +2314,7 @@
 	tristate "Gianfar Ethernet"
 	depends on 85xx || 83xx || PPC_86xx
 	select PHYLIB
+	select CRC32
 	help
 	  This driver supports the Gigabit TSEC on the MPC83xx, MPC85xx,
 	  and MPC86xx family of chips, and the FEC on the 8540.
@@ -2315,8 +2356,8 @@
 	select MII
 	help
 	  This driver supports the gigabit Ethernet on the Marvell MV643XX
-	  chipset which is used in the Momenco Ocelot C and Jaguar ATX and
-	  Pegasos II, amongst other PPC and MIPS boards.
+	  chipset which is used in the Momenco Ocelot C Ocelot, Jaguar ATX
+	  and Pegasos II, amongst other PPC and MIPS boards.
 
 config MV643XX_ETH_0
 	bool "MV-643XX Port 0"
@@ -2339,6 +2380,20 @@
 	  This enables support for Port 2 of the Marvell MV643XX Gigabit
 	  Ethernet.
 
+config BIG_SUR_FE
+	bool "PMC-Sierra TITAN Fast Ethernet Support"
+	depends on NET_ETHERNET && PMC_BIG_SUR
+	help
+	  This enables support for the the integrated ethernet of
+	  PMC-Sierra's Big Sur SoC.
+
+config TITAN_GE
+	bool "PMC-Sierra TITAN Gigabit Ethernet Support"
+	depends on PMC_YOSEMITE
+	help
+	  This enables support for the the integrated ethernet of
+	  PMC-Sierra's Titan SoC.
+
 config QLA3XXX
 	tristate "QLogic QLA3XXX Network Driver Support"
 	depends on PCI
@@ -2766,9 +2821,13 @@
 	 See http://pptpclient.sourceforge.net/ for information on
 	 configuring PPTP clients and servers to utilize this method.
 
+config PPPOX
+	tristate "PPP over X"
+	depends on EXPERIMENTAL && PPP
+
 config PPPOE
 	tristate "PPP over Ethernet (EXPERIMENTAL)"
-	depends on EXPERIMENTAL && PPP
+	depends on PPPOX
 	help
 	  Support for PPP over Ethernet.
 
@@ -2915,4 +2974,16 @@
 config NET_POLL_CONTROLLER
 	def_bool NETPOLL
 
+config TANGO2_PCINET_H
+	tristate "SMP863x network over PCI support (smp863x side)"
+	depends on TANGO2
+
+config TANGO2_PCINET_D
+	tristate "SMP863x network over PCI support (agent side)"
+	depends on PCI
+
+config TANGO2_PCINET_D_DMAMUX
+	bool "use fbxdmamux for tx"
+	depends on TANGO2_PCINET_D && FREEBOX_DMAMUX
+
 endmenu
diff -ruw linux-2.6.20.14/drivers/net/Makefile linux-2.6.20.14-fbx/drivers/net/Makefile
--- linux-2.6.20.14/drivers/net/Makefile	2007-06-11 20:37:31.000000000 +0200
+++ linux-2.6.20.14-fbx/drivers/net/Makefile	2010-12-29 19:30:07.061441458 +0100
@@ -77,6 +77,11 @@
 obj-$(CONFIG_MII) += mii.o
 obj-$(CONFIG_PHYLIB) += phy/
 
+obj-$(CONFIG_TANGO2_ENET) += tango2_enet.o
+obj-$(CONFIG_TANGO2_ENET_OLD) += tango2_enet_old.o
+obj-$(CONFIG_TANGO2_PCINET_H) += tango2_pcinet_h.o
+obj-$(CONFIG_TANGO2_PCINET_D) += tango2_pcinet_d.o
+obj-$(CONFIG_MV88FXX81_ETH) += mv88fxx81_eth.o
 obj-$(CONFIG_SUNDANCE) += sundance.o
 obj-$(CONFIG_HAMACHI) += hamachi.o
 obj-$(CONFIG_NET) += Space.o loopback.o
@@ -87,6 +92,7 @@
 obj-$(CONFIG_PCMCIA_PCNET) += 8390.o
 obj-$(CONFIG_SHAPER) += shaper.o
 obj-$(CONFIG_HP100) += hp100.o
+obj-$(CONFIG_BCM963XX_ENET) += bcm963xx_enet.o
 obj-$(CONFIG_SMC9194) += smc9194.o
 obj-$(CONFIG_FEC) += fec.o
 obj-$(CONFIG_68360_ENET) += 68360enet.o
@@ -112,13 +118,18 @@
 obj-$(CONFIG_MV643XX_ETH) += mv643xx_eth.o
 obj-$(CONFIG_QLA3XXX) += qla3xxx.o
 
+obj-$(CONFIG_GALILEO_64240_ETH) += gt64240eth.o
+obj-$(CONFIG_BIG_SUR_FE) += big_sur_ge.o
+obj-$(CONFIG_TITAN_GE) += titan_mdio.o titan_ge.o
+
 obj-$(CONFIG_PPP) += ppp_generic.o
 obj-$(CONFIG_PPP_ASYNC) += ppp_async.o
 obj-$(CONFIG_PPP_SYNC_TTY) += ppp_synctty.o
 obj-$(CONFIG_PPP_DEFLATE) += ppp_deflate.o
 obj-$(CONFIG_PPP_BSDCOMP) += bsd_comp.o
 obj-$(CONFIG_PPP_MPPE) += ppp_mppe.o
-obj-$(CONFIG_PPPOE) += pppox.o pppoe.o
+obj-$(CONFIG_PPPOX) += pppox.o
+obj-$(CONFIG_PPPOE) += pppoe.o
 
 obj-$(CONFIG_SLIP) += slip.o
 obj-$(CONFIG_SLHC) += slhc.o
diff -ruw linux-2.6.20.14/drivers/net/wireless/hostap/Kconfig linux-2.6.20.14-fbx/drivers/net/wireless/hostap/Kconfig
--- linux-2.6.20.14/drivers/net/wireless/hostap/Kconfig	2007-06-11 20:37:31.000000000 +0200
+++ linux-2.6.20.14-fbx/drivers/net/wireless/hostap/Kconfig	2010-12-29 19:30:07.231436622 +0100
@@ -85,3 +85,7 @@
 
 	The driver can be compiled as a module and will be named
 	"hostap_cs.ko".
+
+config HOSTAP_CS_FBXDMAMUX
+	bool "use fbxdmamux for data transfer"
+	depends on HOSTAP_CS
diff -ruw linux-2.6.20.14/drivers/pci/hotplug/fakephp.c linux-2.6.20.14-fbx/drivers/pci/hotplug/fakephp.c
--- linux-2.6.20.14/drivers/pci/hotplug/fakephp.c	2007-06-11 20:37:31.000000000 +0200
+++ linux-2.6.20.14-fbx/drivers/pci/hotplug/fakephp.c	2010-12-29 19:30:07.261441558 +0100
@@ -189,6 +189,7 @@
 				dbg("New device on %s function %x:%x\n",
 					bus->name, temp->devfn >> 3,
 					temp->devfn & 7);
+				pci_bus_assign_resources(bus);
 				retval = pci_bus_add_device(dev);
 				if (retval)
 					dev_err(&dev->dev, "error adding "
@@ -347,16 +348,37 @@
 	
 }
 
+static ssize_t rescan_write(struct kobject *kobj, char *buf, loff_t off,
+			    size_t size)
+{
+	pci_rescan();
+	return size;
+}
+
+static struct bin_attribute rescan_attribute = {
+	.attr = { .name = "rescan", .mode = 0200 },
+	.size = 0,
+	.write = rescan_write,
+};
+
 static int __init dummyphp_init(void)
 {
+        struct kobject *sysdir = &pci_hotplug_slots_subsys.kset.kobj;
+	int ret;
+
 	info(DRIVER_DESC "\n");
 
+	if ((ret = sysfs_create_bin_file(sysdir, &rescan_attribute)))
+		return ret;
 	return pci_scan_buses();
 }
 
 
 static void __exit dummyphp_exit(void)
 {
+        struct kobject *sysdir = &pci_hotplug_slots_subsys.kset.kobj;
+
+	sysfs_remove_bin_file(sysdir, &rescan_attribute);
 	cleanup_slots();
 }
 
diff -ruw linux-2.6.20.14/drivers/pcmcia/Kconfig linux-2.6.20.14-fbx/drivers/pcmcia/Kconfig
--- linux-2.6.20.14/drivers/pcmcia/Kconfig	2007-06-11 20:37:31.000000000 +0200
+++ linux-2.6.20.14-fbx/drivers/pcmcia/Kconfig	2010-12-29 19:30:07.271445410 +0100
@@ -197,6 +197,10 @@
 	tristate "Au1x00 pcmcia support"
 	depends on SOC_AU1X00 && PCMCIA
 
+config PCMCIA_BCM963XX
+	tristate "bcm963xx pcmcia support"
+	depends on BCM963XX && PCMCIA
+
 config PCMCIA_SA1100
 	tristate "SA1100 support"
 	depends on ARM && ARCH_SA1100 && PCMCIA
diff -ruw linux-2.6.20.14/drivers/pcmcia/Makefile linux-2.6.20.14-fbx/drivers/pcmcia/Makefile
--- linux-2.6.20.14/drivers/pcmcia/Makefile	2007-06-11 20:37:31.000000000 +0200
+++ linux-2.6.20.14-fbx/drivers/pcmcia/Makefile	2010-12-29 19:30:07.271445410 +0100
@@ -33,6 +33,7 @@
 obj-$(CONFIG_M32R_PCC)				+= m32r_pcc.o
 obj-$(CONFIG_M32R_CFC)				+= m32r_cfc.o
 obj-$(CONFIG_PCMCIA_AU1X00)			+= au1x00_ss.o
+obj-$(CONFIG_PCMCIA_BCM963XX)			+= bcm963xx_pcmcia.o
 obj-$(CONFIG_PCMCIA_VRC4171)			+= vrc4171_card.o
 obj-$(CONFIG_PCMCIA_VRC4173)			+= vrc4173_cardu.o
 obj-$(CONFIG_OMAP_CF)				+= omap_cf.o
diff -ruw linux-2.6.20.14/drivers/serial/8250.c linux-2.6.20.14-fbx/drivers/serial/8250.c
--- linux-2.6.20.14/drivers/serial/8250.c	2007-06-11 20:37:31.000000000 +0200
+++ linux-2.6.20.14-fbx/drivers/serial/8250.c	2010-12-29 19:30:07.451437167 +0100
@@ -41,6 +41,12 @@
 #include <linux/nmi.h>
 #include <linux/mutex.h>
 
+#ifdef CONFIG_TANGO2
+#include <asm/tango2/tango2_gbus.h>
+
+extern unsigned long em8xxx_sys_frequency;
+#endif
+
 #include <asm/io.h>
 #include <asm/irq.h>
 
@@ -308,7 +314,21 @@
 		return inb(up->port.iobase + 1);
 
 	case UPIO_MEM:
+#ifdef CONFIG_TANGO2
+	{
+		unsigned long v;
+
+		/* no EFR on tango2 */
+		if (offset == (UART_EFR << up->port.regshift))
+			v = 0;
+		else
+			v = gbus_readl((unsigned long)up->port.membase +
+				       offset);
+		return v;
+	}
+#else
 		return readb(up->port.membase + offset);
+#endif
 
 	case UPIO_MEM32:
 		return readl(up->port.membase + offset);
@@ -342,8 +362,25 @@
 		break;
 
 	case UPIO_MEM:
+#ifdef CONFIG_TANGO2
+		/*
+		 * we add  a special case for  UART_DL register, since
+		 * register content has a different meaning for us.
+		 */
+		if (offset == (UART_DL << up->port.regshift)) {
+			/* select right clock source */
+			value = (em8xxx_sys_frequency / up->port.uartclk);
+		}
+
+		/* no EFR on tango2 */
+		if (offset != (UART_EFR << up->port.regshift))
+			gbus_writel((unsigned long)up->port.membase +
+				    offset, value);
+		break;
+#else
 		writeb(value, up->port.membase + offset);
 		break;
+#endif
 
 	case UPIO_MEM32:
 		writel(value, up->port.membase + offset);
@@ -374,6 +411,7 @@
 #define serial_outp(up, offset, value)	serial_out(up, offset, value)
 
 /* Uart divisor latch read */
+#ifndef CONFIG_TANGO2
 static inline int _serial_dl_read(struct uart_8250_port *up)
 {
 	return serial_inp(up, UART_DLL) | serial_inp(up, UART_DLM) << 8;
@@ -407,6 +445,7 @@
 #define serial_dl_read(up) _serial_dl_read(up)
 #define serial_dl_write(up, value) _serial_dl_write(up, value)
 #endif
+#endif
 
 /*
  * For the 16C950
@@ -540,7 +579,11 @@
 static int size_fifo(struct uart_8250_port *up)
 {
 	unsigned char old_fcr, old_mcr, old_lcr;
+#ifdef CONFIG_TANGO2
+	unsigned short old_dll, old_dlm;
+#else
 	unsigned short old_dl;
+#endif
 	int count;
 
 	old_lcr = serial_inp(up, UART_LCR);
@@ -551,8 +594,14 @@
 		    UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT);
 	serial_outp(up, UART_MCR, UART_MCR_LOOP);
 	serial_outp(up, UART_LCR, UART_LCR_DLAB);
+#ifdef CONFIG_TANGO2
+	old_dll = serial_inp(up, UART_DL) & 0xff;
+	old_dlm = serial_inp(up, UART_DL) >> 8;
+	serial_outp(up, UART_DL, 0x01);
+#else
 	old_dl = serial_dl_read(up);
 	serial_dl_write(up, 0x0001);
+#endif
 	serial_outp(up, UART_LCR, 0x03);
 	for (count = 0; count < 256; count++)
 		serial_outp(up, UART_TX, count);
@@ -563,7 +612,11 @@
 	serial_outp(up, UART_FCR, old_fcr);
 	serial_outp(up, UART_MCR, old_mcr);
 	serial_outp(up, UART_LCR, UART_LCR_DLAB);
+#ifdef CONFIG_TANGO2
+	serial_outp(up, UART_DL, (old_dlm << 8) | old_dll);
+#else
 	serial_dl_write(up, old_dl);
+#endif
 	serial_outp(up, UART_LCR, old_lcr);
 
 	return count;
@@ -582,6 +635,16 @@
 	old_lcr = serial_inp(p, UART_LCR);
 	serial_outp(p, UART_LCR, UART_LCR_DLAB);
 
+#ifdef CONFIG_TANGO2
+	old_dll = serial_inp(p, UART_DL) & 0xff;
+	old_dlm = serial_inp(p, UART_DL) >> 8;
+
+	serial_outp(p, UART_DL, 0);
+
+	id = serial_inp(p, UART_DL);
+
+	serial_outp(p, UART_DL, (old_dlm << 8) | old_dll);
+#else
 	old_dll = serial_inp(p, UART_DLL);
 	old_dlm = serial_inp(p, UART_DLM);
 
@@ -592,6 +655,7 @@
 
 	serial_outp(p, UART_DLL, old_dll);
 	serial_outp(p, UART_DLM, old_dlm);
+#endif
 	serial_outp(p, UART_LCR, old_lcr);
 
 	return id;
@@ -793,7 +857,11 @@
 
 			serial_outp(up, UART_LCR, 0xE0);
 
+#ifdef CONFIG_TANGO2
+			quot = serial_inp(up, UART_DL);
+#else
 			quot = serial_dl_read(up);
+#endif
 			quot <<= 3;
 
 			status1 = serial_in(up, 0x04); /* EXCR1 */
@@ -801,7 +869,11 @@
 			status1 |= 0x10;  /* 1.625 divisor for baud_base --> 921600 */
 			serial_outp(up, 0x04, status1);
 			
+#ifdef CONFIG_TANGO2
+			serial_outp(up, UART_DL, quot);
+#else
 			serial_dl_write(up, quot);
+#endif
 
 			serial_outp(up, UART_LCR, 0);
 
@@ -1915,7 +1987,11 @@
 		serial_outp(up, UART_LCR, cval | UART_LCR_DLAB);/* set DLAB */
 	}
 
+#ifdef CONFIG_TANGO2
+	serial_outp(up, UART_DL, quot);
+#else
 	serial_dl_write(up, quot);
+#endif
 
 	/*
 	 * LCR DLAB must be set to enable 64-byte FIFO mode. If the FCR
diff -ruw linux-2.6.20.14/drivers/serial/8250_early.c linux-2.6.20.14-fbx/drivers/serial/8250_early.c
--- linux-2.6.20.14/drivers/serial/8250_early.c	2007-06-11 20:37:31.000000000 +0200
+++ linux-2.6.20.14-fbx/drivers/serial/8250_early.c	2010-12-29 19:30:07.451437167 +0100
@@ -103,8 +103,13 @@
 
 	lcr = serial_in(port, UART_LCR);
 	serial_out(port, UART_LCR, lcr | UART_LCR_DLAB);
+#ifdef CONFIG_TANGO2
+	dll = serial_in(port, UART_DL) & 0xff;
+	dlm = serial_in(port, UART_DL) >> 8;
+#else
 	dll = serial_in(port, UART_DLL);
 	dlm = serial_in(port, UART_DLM);
+#endif
 	serial_out(port, UART_LCR, lcr);
 
 	quot = (dlm << 8) | dll;
@@ -125,8 +130,12 @@
 	divisor = port->uartclk / (16 * device->baud);
 	c = serial_in(port, UART_LCR);
 	serial_out(port, UART_LCR, c | UART_LCR_DLAB);
+#ifdef CONFIG_TANGO2
+	serial_out(port, UART_DL, divisor & 0xffff);
+#else
 	serial_out(port, UART_DLL, divisor & 0xff);
 	serial_out(port, UART_DLM, (divisor >> 8) & 0xff);
+#endif
 	serial_out(port, UART_LCR, c & ~UART_LCR_DLAB);
 }
 
diff -ruw linux-2.6.20.14/drivers/serial/Kconfig linux-2.6.20.14-fbx/drivers/serial/Kconfig
--- linux-2.6.20.14/drivers/serial/Kconfig	2007-06-11 20:37:31.000000000 +0200
+++ linux-2.6.20.14-fbx/drivers/serial/Kconfig	2010-12-29 19:30:07.451437167 +0100
@@ -686,6 +686,25 @@
 	depends on SERIAL_SH_SCI=y
 	select SERIAL_CORE_CONSOLE
 
+config SERIAL_IP3106
+	bool "Enable IP3106 UART Support (Philips PNX 8xx0 SoCs)"
+	depends on MIPS && (SOC_PNX8550 || SOC_PNX8330)
+	select SERIAL_CORE
+	help
+	  If you have a Philips SoC with an IP 3106 UART in it, such as
+	  the PNX8550 or PNX8330 (MIPS based) and you want to use
+	  serial ports, say Y.  Otherwise, say N.
+
+config SERIAL_IP3106_CONSOLE
+	bool "Enable PNX8XX0 serial console"
+	depends on SERIAL_IP3106
+	select SERIAL_CORE_CONSOLE
+	help
+	  If you have a Philips SoC with an IP 3106 UART in it, such as
+	  the PNX8550 or PNX8330 (MIPS based) and you want to use
+	  a serial console, say Y.
+	  Otherwise, say N.
+
 config SERIAL_CORE
 	tristate
 
@@ -994,4 +1013,23 @@
 	  If you have enabled the serial port on the Motorola IMX
 	  CPU you can make it the console by answering Y to this option.
 
+config SERIAL_BCM963XX
+	tristate "bcm963xx serial port support"
+	select SERIAL_CORE
+	depends on BCM963XX
+	help
+	  If you have a bcm963xx CPU, you can enable its onboard
+	  serial port by enabling this options.
+
+          To compile this driver as a module, choose M here: the
+          module will be called bcm963xx_uart.
+
+config SERIAL_BCM963XX_CONSOLE
+	bool "Console on bcm963xx serial port"
+	depends on SERIAL_BCM963XX=y
+	select SERIAL_CORE_CONSOLE
+	help
+	  If you have enabled the serial port on the bcm963xx CPU
+	  you can make it the console by answering Y to this option.
+
 endmenu
diff -ruw linux-2.6.20.14/drivers/serial/Makefile linux-2.6.20.14-fbx/drivers/serial/Makefile
--- linux-2.6.20.14/drivers/serial/Makefile	2007-06-11 20:37:31.000000000 +0200
+++ linux-2.6.20.14-fbx/drivers/serial/Makefile	2010-12-29 19:30:07.451437167 +0100
@@ -40,6 +40,7 @@
 obj-$(CONFIG_V850E_UART) += v850e_uart.o
 obj-$(CONFIG_SERIAL_PMACZILOG) += pmac_zilog.o
 obj-$(CONFIG_SERIAL_LH7A40X) += serial_lh7a40x.o
+obj-$(CONFIG_SERIAL_IP3106) += ip3106_uart.o
 obj-$(CONFIG_SERIAL_DZ) += dz.o
 obj-$(CONFIG_SERIAL_SH_SCI) += sh-sci.o
 obj-$(CONFIG_SERIAL_SGI_L1_CONSOLE) += sn_console.o
@@ -58,3 +59,4 @@
 obj-$(CONFIG_SERIAL_ATMEL) += atmel_serial.o
 obj-$(CONFIG_SERIAL_UARTLITE) += uartlite.o
 obj-$(CONFIG_SERIAL_NETX) += netx-serial.o
+obj-$(CONFIG_SERIAL_BCM963XX) += bcm963xx_uart.o
diff -ruw linux-2.6.20.14/drivers/usb/gadget/Kconfig linux-2.6.20.14-fbx/drivers/usb/gadget/Kconfig
--- linux-2.6.20.14/drivers/usb/gadget/Kconfig	2007-06-11 20:37:31.000000000 +0200
+++ linux-2.6.20.14-fbx/drivers/usb/gadget/Kconfig	2010-12-29 19:30:07.481437074 +0100
@@ -176,7 +176,28 @@
 	default USB_GADGET
 	select USB_GADGET_SELECTED
 
-config USB_OTG
+config USB_GADGET_BCM963xx
+	bool "BCM963xx USB Device Controller"
+	depends on BCM963xx
+
+config USB_BCM963xx
+	tristate
+	depends on USB_GADGET_BCM963xx
+	default USB_GADGET
+	select USB_GADGET_SELECTED
+
+config USB_GADGET_VOX160
+	boolean "IKANOS VX160"
+	help
+	   This driver provides USB Device Controller driver for IKanos's VX160
+
+config USB_VOX160
+	tristate
+	depends on USB_GADGET_VOX160
+	default USB_GADGET
+	select USB_GADGET_SELECTED
+
+config USB_OMAP_OTG
 	boolean "OTG Support"
 	depends on USB_GADGET_OMAP && ARCH_OMAP_OTG && USB_OHCI_HCD
 	help
diff -ruw linux-2.6.20.14/drivers/usb/gadget/Makefile linux-2.6.20.14-fbx/drivers/usb/gadget/Makefile
--- linux-2.6.20.14/drivers/usb/gadget/Makefile	2007-06-11 20:37:31.000000000 +0200
+++ linux-2.6.20.14-fbx/drivers/usb/gadget/Makefile	2010-12-29 19:30:07.481437074 +0100
@@ -8,6 +8,8 @@
 obj-$(CONFIG_USB_OMAP)		+= omap_udc.o
 obj-$(CONFIG_USB_LH7A40X)	+= lh7a40x_udc.o
 obj-$(CONFIG_USB_AT91)		+= at91_udc.o
+obj-$(CONFIG_USB_BCM963xx)	+= bcm963xx_udc.o
+obj-$(CONFIG_USB_VOX160)	+= vox160_udc.o
 
 #
 # USB gadget drivers
diff -ruw linux-2.6.20.14/drivers/usb/host/Kconfig linux-2.6.20.14-fbx/drivers/usb/host/Kconfig
--- linux-2.6.20.14/drivers/usb/host/Kconfig	2007-06-11 20:37:31.000000000 +0200
+++ linux-2.6.20.14-fbx/drivers/usb/host/Kconfig	2010-12-29 19:30:07.491479975 +0100
@@ -67,6 +67,11 @@
 
 	  If unsure, say N.
 
+config USB_EHCI_BIG_ENDIAN_MMIO
+	bool
+	depends on USB_EHCI_HCD
+	default n
+
 config USB_ISP116X_HCD
 	tristate "ISP116X HCD support"
 	depends on USB
@@ -123,7 +128,7 @@
 config USB_OHCI_LITTLE_ENDIAN
 	bool
 	depends on USB_OHCI_HCD
-	default n if STB03xxx || PPC_MPC52xx
+	default n if STB03xxx || PPC_MPC52xx || BCM963XX
 	default y
 
 config USB_UHCI_HCD
diff -ruw linux-2.6.20.14/drivers/video/Kconfig linux-2.6.20.14-fbx/drivers/video/Kconfig
--- linux-2.6.20.14/drivers/video/Kconfig	2007-06-11 20:37:31.000000000 +0200
+++ linux-2.6.20.14-fbx/drivers/video/Kconfig	2010-12-29 19:30:07.551440887 +0100
@@ -1281,6 +1281,17 @@
 	  Please read the <file:Documentation/fb/README-sstfb.txt> for supported
 	  options and other important info  support.
 
+config FB_SMIVGX
+	tristate "Silicon Motion VoyagerGX support"
+	depends on FB && PCI && (MIPS || EXPERIMENTAL)
+	select FB_CFB_FILLRECT
+	select FB_CFB_COPYAREA
+	select FB_CFB_IMAGEBLIT
+	---help---
+	  This drivers supports SMI VoyagerGX 501 based PCI boards
+	  The default settings drive both a CRT and LCD.  The CRT
+	  can be turned off by passing in the no_crt option
+
 config FB_CYBLA
 	tristate "Cyberblade/i1 support"
 	depends on FB && PCI && X86_32 && !64BIT
@@ -1345,7 +1356,25 @@
 
 config FB_AU1100
 	bool "Au1100 LCD Driver"
-	depends on (FB = y) && EXPERIMENTAL && PCI && MIPS && MIPS_PB1100=y
+	depends on FB && MIPS && SOC_AU1100
+	select FB_CFB_FILLRECT
+	select FB_CFB_COPYAREA
+	select FB_CFB_IMAGEBLIT
+	help
+	  This is the framebuffer driver for the AMD Au1100 SOC.  It can drive
+	  various panels and CRTs by passing in kernel cmd line option
+	  au1100fb:panel=<name>.
+
+config FB_AU1200
+	bool "Au1200 LCD Driver"
+	depends on FB && MIPS && SOC_AU1200
+	select FB_CFB_FILLRECT
+	select FB_CFB_COPYAREA
+	select FB_CFB_IMAGEBLIT
+	help
+	  This is the framebuffer driver for the AMD Au1200 SOC.  It can drive
+	  various panels and CRTs by passing in kernel cmd line option
+	  au1200fb:panel=<name>.
 
 config FB_AU1200
 	bool "Au1200 LCD Driver"
diff -ruw linux-2.6.20.14/drivers/video/Makefile linux-2.6.20.14-fbx/drivers/video/Makefile
--- linux-2.6.20.14/drivers/video/Makefile	2007-06-11 20:37:31.000000000 +0200
+++ linux-2.6.20.14-fbx/drivers/video/Makefile	2010-12-29 19:30:07.551440887 +0100
@@ -96,6 +96,7 @@
 obj-$(CONFIG_FB_TX3912)		  += tx3912fb.o
 obj-$(CONFIG_FB_S1D13XXX)	  += s1d13xxxfb.o
 obj-$(CONFIG_FB_IMX)              += imxfb.o
+obj-$(CONFIG_FB_SMIVGX)		  += smivgxfb.o
 obj-$(CONFIG_FB_S3C2410)	  += s3c2410fb.o
 obj-$(CONFIG_FB_PNX4008_DUM)	  += pnx4008/
 obj-$(CONFIG_FB_PNX4008_DUM_RGB)  += pnx4008/
diff -ruw linux-2.6.20.14/fs/exec.c linux-2.6.20.14-fbx/fs/exec.c
--- linux-2.6.20.14/fs/exec.c	2007-06-11 20:37:31.000000000 +0200
+++ linux-2.6.20.14-fbx/fs/exec.c	2010-12-29 19:30:08.251455515 +0100
@@ -1459,9 +1459,17 @@
 	return core_waiters;
 }
 
+#define CORE_ENV_MAX_ARGS 4
+
 int do_coredump(long signr, int exit_code, struct pt_regs * regs)
 {
 	char corename[CORENAME_MAX_SIZE + 1];
+	char *core_argv[2];
+	char *core_envp[CORE_ENV_MAX_ARGS + 1];
+	char core_pid[CORENAME_MAX_SIZE + 1];
+	char core_signal[CORENAME_MAX_SIZE + 1];
+	char core_time[CORENAME_MAX_SIZE + 1];
+	char core_comm[CORENAME_MAX_SIZE + 1];
 	struct mm_struct *mm = current->mm;
 	struct linux_binfmt * binfmt;
 	struct inode * inode;
@@ -1512,8 +1520,36 @@
 	ispipe = format_corename(corename, core_pattern, signr);
 	unlock_kernel();
  	if (ispipe) {
+		int i = 0;
+		struct timeval tv;
+
+#define CORE_ENV_ADD(__buf, __fmt, __arg)				\
+	do {								\
+		if (i < CORE_ENV_MAX_ARGS ) {				\
+			snprintf(__buf, sizeof(__buf), __fmt, __arg);	\
+			core_envp[i++] = __buf;				\
+		} else							\
+			WARN_ON(1);					\
+	} while(0)
+
+		/* Create the env */
+		CORE_ENV_ADD(core_pid, "CORE_PID=%d", current->tgid);
+		CORE_ENV_ADD(core_signal, "CORE_SIGNAL=%ld", signr);
+		CORE_ENV_ADD(core_comm, "CORE_EXECUTABLE=%s", current->comm);
+
+		do_gettimeofday(&tv);
+		CORE_ENV_ADD(core_time, "CORE_TIME=%lu", tv.tv_sec);
+
+#undef CORE_ENV_ADD
+
+		core_envp[i] = NULL;
+
+		core_argv[0] = corename+1;
+		core_argv[1] = NULL;
+
 		/* SIGPIPE can happen, but it's just never processed */
- 		if(call_usermodehelper_pipe(corename+1, NULL, NULL, &file)) {
+ 		if (call_usermodehelper_pipe(core_argv[0], core_argv,
+					     core_envp, &file)) {
  			printk(KERN_INFO "Core dump to %s pipe failed\n",
 			       corename);
  			goto fail_unlock;
diff -ruw linux-2.6.20.14/fs/Kconfig linux-2.6.20.14-fbx/fs/Kconfig
--- linux-2.6.20.14/fs/Kconfig	2007-06-11 20:37:31.000000000 +0200
+++ linux-2.6.20.14-fbx/fs/Kconfig	2010-12-29 19:30:08.211439653 +0100
@@ -1027,6 +1027,18 @@
 	  To compile this as a module, choose M here: the module will be called
 	  ramfs.
 
+config RAMFS_XATTR
+	bool
+	default n
+
+config RAMFS_XATTR_USER
+	bool "Enable user extended attributes on RAMFS filesystem"
+	default n
+	select RAMFS_XATTR
+	help
+	  Select this to enable extended user attributes on RAMFS
+	  filesystem.
+
 config CONFIGFS_FS
 	tristate "Userspace-driven configuration filesystem (EXPERIMENTAL)"
 	depends on SYSFS && EXPERIMENTAL
@@ -1039,6 +1051,23 @@
 	  Both sysfs and configfs can and should exist together on the
 	  same system. One is not a replacement for the other.
 
+# Start Freebox added code
+config CTMP_FS
+	tristate "Compact virtual memory file system support"
+	help
+	  ctmp_fs is a file system which keeps all files in virtual
+	  memory, but unlike tmpfs/ramfs, it does not use the VFS as a
+	  backend storage.  It keeps file/directory data and names
+	  contiguous in memory to save some RAM.
+
+	  This result  in really smaller memory usage  when using lots of
+	  small files, but at the cost of very/insanely expensive
+	  write/delete operation.
+
+	  If you unmount a ctmp_fs instance, everything stored therein
+	  is lost.
+# End Freebox added code
+
 endmenu
 
 menu "Miscellaneous filesystems"
@@ -1404,6 +1433,73 @@
 
 	  If unsure, say N.
 
+config SQUASHFS
+	tristate "SquashFS 3.2 - Squashed file system support"
+	depends on BLOCK
+	select ZLIB_INFLATE
+	select SQLZMA_UNCOMP
+	help
+	  Saying Y here includes support for SquashFS 3.2 (a Compressed Read-Only File
+	  System).  Squashfs is a highly compressed read-only filesystem for Linux.
+	  It uses zlib compression to compress both files, inodes and directories.
+	  Inodes in the system are very small and all blocks are packed to minimise
+	  data overhead. Block sizes greater than 4K are supported up to a maximum of 64K.
+	  SquashFS 3.1 supports 64 bit filesystems and files (larger than 4GB), full
+	  uid/gid information, hard links and timestamps.
+
+	  Squashfs is intended for general read-only filesystem use, for archival
+	  use (i.e. in cases where a .tar.gz file may be used), and in embedded
+	  systems where low overhead is needed.  Further information and filesystem tools
+	  are available from http://squashfs.sourceforge.net.
+
+	  If you want to compile this as a module ( = code which can be
+	  inserted in and removed from the running kernel whenever you want),
+	  say M here and read <file:Documentation/modules.txt>.  The module
+	  will be called squashfs.  Note that the root file system (the one
+	  containing the directory /) cannot be compiled as a module.
+
+	  If unsure, say N.
+
+config SQUASHFS_EMBEDDED
+
+	bool "Additional options for memory-constrained systems" 
+	depends on SQUASHFS
+	default n
+	help
+	  Saying Y here allows you to specify cache sizes and how Squashfs
+	  allocates memory.  This is only intended for memory constrained
+	  systems.
+
+	  If unsure, say N.
+
+config SQUASHFS_FRAGMENT_CACHE_SIZE
+	int "Number of fragments cached" if SQUASHFS_EMBEDDED
+	depends on SQUASHFS
+	default "3"
+	help
+	  By default SquashFS caches the last 3 fragments read from
+	  the filesystem.  Increasing this amount may mean SquashFS
+	  has to re-read fragments less often from disk, at the expense
+	  of extra system memory.  Decreasing this amount will mean
+	  SquashFS uses less memory at the expense of extra reads from disk.
+
+	  Note there must be at least one cached fragment.  Anything
+	  much more than three will probably not make much difference.
+
+config SQUASHFS_VMALLOC
+	bool "Use Vmalloc rather than Kmalloc" if SQUASHFS_EMBEDDED
+	depends on SQUASHFS
+	default n
+	help
+	  By default SquashFS uses kmalloc to obtain fragment cache memory.
+	  Kmalloc memory is the standard kernel allocator, but it can fail
+	  on memory constrained systems.  Because of the way Vmalloc works,
+	  Vmalloc can succeed when kmalloc fails.  Specifying this option
+	  will make SquashFS always use Vmalloc to allocate the
+	  fragment cache memory.
+
+	  If unsure, say N.
+
 config VXFS_FS
 	tristate "FreeVxFS file system support (VERITAS VxFS(TM) compatible)"
 	depends on BLOCK
diff -ruw linux-2.6.20.14/fs/Makefile linux-2.6.20.14-fbx/fs/Makefile
--- linux-2.6.20.14/fs/Makefile	2007-06-11 20:37:31.000000000 +0200
+++ linux-2.6.20.14-fbx/fs/Makefile	2010-12-29 19:30:08.221445776 +0100
@@ -68,6 +68,7 @@
 obj-$(CONFIG_JBD2)		+= jbd2/
 obj-$(CONFIG_EXT2_FS)		+= ext2/
 obj-$(CONFIG_CRAMFS)		+= cramfs/
+obj-$(CONFIG_SQUASHFS)		+= squashfs/
 obj-$(CONFIG_RAMFS)		+= ramfs/
 obj-$(CONFIG_HUGETLBFS)		+= hugetlbfs/
 obj-$(CONFIG_CODA_FS)		+= coda/
@@ -115,3 +116,6 @@
 obj-$(CONFIG_DEBUG_FS)		+= debugfs/
 obj-$(CONFIG_OCFS2_FS)		+= ocfs2/
 obj-$(CONFIG_GFS2_FS)           += gfs2/
+# Start Freebox added code
+obj-$(CONFIG_CTMP_FS)		+= ctmpfs/
+# End Freebox added code
diff -ruw linux-2.6.20.14/fs/ramfs/file-mmu.c linux-2.6.20.14-fbx/fs/ramfs/file-mmu.c
--- linux-2.6.20.14/fs/ramfs/file-mmu.c	2007-06-11 20:37:31.000000000 +0200
+++ linux-2.6.20.14-fbx/fs/ramfs/file-mmu.c	2010-12-29 19:30:08.351440994 +0100
@@ -26,6 +26,7 @@
 
 #include <linux/fs.h>
 #include <linux/mm.h>
+#include <linux/xattr.h>
 
 const struct address_space_operations ramfs_aops = {
 	.readpage	= simple_readpage,
@@ -47,4 +48,11 @@
 
 struct inode_operations ramfs_file_inode_operations = {
 	.getattr	= simple_getattr,
+#ifdef CONFIG_RAMFS_XATTR
+	.setxattr	= generic_setxattr,
+	.getxattr	= generic_getxattr,
+	.listxattr	= generic_listxattr,
+	.removexattr	= generic_removexattr,
+#endif
 };
+
diff -ruw linux-2.6.20.14/fs/ramfs/inode.c linux-2.6.20.14-fbx/fs/ramfs/inode.c
--- linux-2.6.20.14/fs/ramfs/inode.c	2007-06-11 20:37:31.000000000 +0200
+++ linux-2.6.20.14-fbx/fs/ramfs/inode.c	2010-12-29 19:30:08.351440994 +0100
@@ -33,6 +33,7 @@
 #include <linux/smp_lock.h>
 #include <linux/backing-dev.h>
 #include <linux/ramfs.h>
+#include <linux/xattr.h>
 
 #include <asm/uaccess.h>
 #include "internal.h"
@@ -42,6 +43,7 @@
 
 static struct super_operations ramfs_ops;
 static struct inode_operations ramfs_dir_inode_operations;
+struct kmem_cache *ramfs_inode_cache;
 
 static struct backing_dev_info ramfs_backing_dev_info = {
 	.ra_pages	= 0,	/* No readahead */
@@ -50,6 +52,28 @@
 			  BDI_CAP_READ_MAP | BDI_CAP_WRITE_MAP | BDI_CAP_EXEC_MAP,
 };
 
+static struct inode *ramfs_alloc_inode(struct super_block *sb)
+{
+	struct ramfs_inode_info *rii;
+
+	rii = kmem_cache_alloc(ramfs_inode_cache, GFP_KERNEL);
+	if (!rii)
+		return NULL;
+	return &rii->vfs_inode;
+}
+
+static void ramfs_destroy_inode(struct inode *ino)
+{
+	struct ramfs_inode_info *rii;
+
+	rii = RAMFS_I(ino);
+
+#ifdef CONFIG_RAMFS_XATTR
+	ramfs_inode_purge_xattrs(rii);
+#endif
+	kmem_cache_free(ramfs_inode_cache, rii);
+}
+
 struct inode *ramfs_get_inode(struct super_block *sb, int mode, dev_t dev)
 {
 	struct inode * inode = new_inode(sb);
@@ -153,9 +177,17 @@
 	.rmdir		= simple_rmdir,
 	.mknod		= ramfs_mknod,
 	.rename		= simple_rename,
+#ifdef CONFIG_RAMFS_XATTR
+	.setxattr	= generic_setxattr,
+	.getxattr	= generic_getxattr,
+	.listxattr	= generic_listxattr,
+	.removexattr	= generic_removexattr,
+#endif
 };
 
 static struct super_operations ramfs_ops = {
+	.alloc_inode	= ramfs_alloc_inode,
+	.destroy_inode	= ramfs_destroy_inode,
 	.statfs		= simple_statfs,
 	.drop_inode	= generic_delete_inode,
 };
@@ -171,6 +203,9 @@
 	sb->s_magic = RAMFS_MAGIC;
 	sb->s_op = &ramfs_ops;
 	sb->s_time_gran = 1;
+#ifdef CONFIG_RAMFS_XATTR
+	sb->s_xattr = ramfs_xattr_handlers;
+#endif
 	inode = ramfs_get_inode(sb, S_IFDIR | 0755, 0);
 	if (!inode)
 		return -ENOMEM;
@@ -208,6 +243,21 @@
 	.kill_sb	= kill_litter_super,
 };
 
+static void ramfs_inode_init_once(void *ptr, struct kmem_cache *cachep,
+		      unsigned long flags)
+{
+	struct ramfs_inode_info *p = (struct ramfs_inode_info *)ptr;
+
+	if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
+	    SLAB_CTOR_CONSTRUCTOR) {
+		inode_init_once(&p->vfs_inode);
+#ifdef CONFIG_RAMFS_XATTR_USER
+		INIT_LIST_HEAD(&p->xattr_user_list);
+#endif
+	}
+}
+
+
 static int __init init_ramfs_fs(void)
 {
 	return register_filesystem(&ramfs_fs_type);
@@ -223,7 +273,20 @@
 
 int __init init_rootfs(void)
 {
-	return register_filesystem(&rootfs_fs_type);
+	int error;
+
+	ramfs_inode_cache = kmem_cache_create("ramfs_inode_cache",
+					      sizeof (struct ramfs_inode_info),
+					      0, 0, ramfs_inode_init_once,
+					      NULL);
+	if (!ramfs_inode_cache)
+		return -ENOMEM;
+
+	error = register_filesystem(&rootfs_fs_type);
+	if (error)
+		kmem_cache_destroy(ramfs_inode_cache);
+
+	return error;
 }
 
 MODULE_LICENSE("GPL");
diff -ruw linux-2.6.20.14/fs/ramfs/internal.h linux-2.6.20.14-fbx/fs/ramfs/internal.h
--- linux-2.6.20.14/fs/ramfs/internal.h	2007-06-11 20:37:31.000000000 +0200
+++ linux-2.6.20.14-fbx/fs/ramfs/internal.h	2010-12-29 19:30:08.351440994 +0100
@@ -9,7 +9,42 @@
  * 2 of the License, or (at your option) any later version.
  */
 
+#ifndef RAMFS_INTERNAL_H
+# define RAMFS_INTERNALH
+
+/* need list_head */
+#include <linux/list.h>
 
 extern const struct address_space_operations ramfs_aops;
 extern const struct file_operations ramfs_file_operations;
 extern struct inode_operations ramfs_file_inode_operations;
+
+struct ramfs_xattr
+{
+	char *name;
+	void *data;
+	size_t data_len;
+
+	struct list_head list;
+};
+
+struct ramfs_inode_info
+{
+	struct inode vfs_inode;
+#ifdef CONFIG_RAMFS_XATTR_USER
+	struct list_head xattr_user_list;
+#endif
+};
+
+static inline struct ramfs_inode_info *RAMFS_I(struct inode *inode)
+{
+	return container_of(inode, struct ramfs_inode_info, vfs_inode);
+}
+
+#ifdef CONFIG_RAMFS_XATTR
+void ramfs_inode_purge_xattrs(struct ramfs_inode_info *rii);
+extern struct xattr_handler *ramfs_xattr_handlers[];
+#endif
+
+
+#endif /* !RAMFS_INTERNAL_H */
diff -ruw linux-2.6.20.14/fs/ramfs/Makefile linux-2.6.20.14-fbx/fs/ramfs/Makefile
--- linux-2.6.20.14/fs/ramfs/Makefile	2007-06-11 20:37:31.000000000 +0200
+++ linux-2.6.20.14-fbx/fs/ramfs/Makefile	2010-12-29 19:30:08.351440994 +0100
@@ -6,4 +6,5 @@
 
 file-mmu-y := file-nommu.o
 file-mmu-$(CONFIG_MMU) := file-mmu.o
-ramfs-objs += inode.o $(file-mmu-y)
+ramfs-xattr-$(CONFIG_RAMFS_XATTR) += xattr.o
+ramfs-objs += inode.o $(file-mmu-y) $(ramfs-xattr-y)
diff -ruw linux-2.6.20.14/include/asm-arm/setup.h linux-2.6.20.14-fbx/include/asm-arm/setup.h
--- linux-2.6.20.14/include/asm-arm/setup.h	2007-06-11 20:37:31.000000000 +0200
+++ linux-2.6.20.14-fbx/include/asm-arm/setup.h	2010-12-29 19:30:08.451437774 +0100
@@ -143,6 +143,22 @@
 	__u32 fmemclk;
 };
 
+/* Freebox random data is stored on the sigma side. */
+#define ATAG_RANDOM_DATA	0x41000042
+
+struct tag_random_data
+{
+	uint8_t	data[32];
+};
+
+/* Freebox random seed given by the sigma CPU */
+#define ATAG_RANDOM_SEED	0x41000043
+
+struct tag_random_seed
+{
+	uint8_t seed[4];
+};
+
 struct tag {
 	struct tag_header hdr;
 	union {
@@ -165,6 +181,12 @@
 		 * DC21285 specific
 		 */
 		struct tag_memclk	memclk;
+
+		/*
+		 * FBXO1_A specific
+		 */
+		struct tag_random_data	random_data;
+		struct tag_random_seed	random_seed;
 	} u;
 };
 
diff -ruw linux-2.6.20.14/include/asm-arm/sizes.h linux-2.6.20.14-fbx/include/asm-arm/sizes.h
--- linux-2.6.20.14/include/asm-arm/sizes.h	2007-06-11 20:37:31.000000000 +0200
+++ linux-2.6.20.14-fbx/include/asm-arm/sizes.h	2010-12-29 19:30:08.451437774 +0100
@@ -25,9 +25,11 @@
 
 /* handy sizes */
 #define SZ_1K                           0x00000400
+#define SZ_2K				0x00000800
 #define SZ_4K                           0x00001000
 #define SZ_8K                           0x00002000
 #define SZ_16K                          0x00004000
+#define SZ_32K				0x00008000
 #define SZ_64K                          0x00010000
 #define SZ_128K                         0x00020000
 #define SZ_256K                         0x00040000
diff -ruw linux-2.6.20.14/include/asm-generic/vmlinux.lds.h linux-2.6.20.14-fbx/include/asm-generic/vmlinux.lds.h
--- linux-2.6.20.14/include/asm-generic/vmlinux.lds.h	2007-06-11 20:37:31.000000000 +0200
+++ linux-2.6.20.14-fbx/include/asm-generic/vmlinux.lds.h	2010-12-29 19:30:08.491477111 +0100
@@ -24,19 +24,19 @@
 	/* PCI quirks */						\
 	.pci_fixup        : AT(ADDR(.pci_fixup) - LOAD_OFFSET) {	\
 		VMLINUX_SYMBOL(__start_pci_fixups_early) = .;		\
-		*(.pci_fixup_early)					\
+		KEEP(*(.pci_fixup_early))				\
 		VMLINUX_SYMBOL(__end_pci_fixups_early) = .;		\
 		VMLINUX_SYMBOL(__start_pci_fixups_header) = .;		\
-		*(.pci_fixup_header)					\
+		KEEP(*(.pci_fixup_header))				\
 		VMLINUX_SYMBOL(__end_pci_fixups_header) = .;		\
 		VMLINUX_SYMBOL(__start_pci_fixups_final) = .;		\
-		*(.pci_fixup_final)					\
+		KEEP(*(.pci_fixup_final))				\
 		VMLINUX_SYMBOL(__end_pci_fixups_final) = .;		\
 		VMLINUX_SYMBOL(__start_pci_fixups_enable) = .;		\
-		*(.pci_fixup_enable)					\
+		KEEP(*(.pci_fixup_enable))				\
 		VMLINUX_SYMBOL(__end_pci_fixups_enable) = .;		\
 		VMLINUX_SYMBOL(__start_pci_fixups_resume) = .;		\
-		*(.pci_fixup_resume)					\
+		KEEP(*(.pci_fixup_resume))				\
 		VMLINUX_SYMBOL(__end_pci_fixups_resume) = .;		\
 	}								\
 									\
@@ -211,21 +211,20 @@
 		.notes : { *(.note.*) } :note
 
 #define INITCALLS							\
-  	*(.initcall0.init)						\
-  	*(.initcall0s.init)						\
-  	*(.initcall1.init)						\
-  	*(.initcall1s.init)						\
-  	*(.initcall2.init)						\
-  	*(.initcall2s.init)						\
-  	*(.initcall3.init)						\
-  	*(.initcall3s.init)						\
-  	*(.initcall4.init)						\
-  	*(.initcall4s.init)						\
-  	*(.initcall5.init)						\
-  	*(.initcall5s.init)						\
-	*(.initcallrootfs.init)						\
-  	*(.initcall6.init)						\
-  	*(.initcall6s.init)						\
-  	*(.initcall7.init)						\
-  	*(.initcall7s.init)
-
+  	KEEP(*(.initcall0.init))					\
+  	KEEP(*(.initcall0s.init))					\
+  	KEEP(*(.initcall1.init))					\
+  	KEEP(*(.initcall1s.init))					\
+  	KEEP(*(.initcall2.init))					\
+  	KEEP(*(.initcall2s.init))					\
+  	KEEP(*(.initcall3.init))					\
+  	KEEP(*(.initcall3s.init))					\
+  	KEEP(*(.initcall4.init))					\
+  	KEEP(*(.initcall4s.init))					\
+  	KEEP(*(.initcall5.init))					\
+  	KEEP(*(.initcall5s.init))					\
+	KEEP(*(.initcallrootfs.init))					\
+  	KEEP(*(.initcall6.init))					\
+  	KEEP(*(.initcall6s.init))					\
+  	KEEP(*(.initcall7.init))					\
+  	KEEP(*(.initcall7s.init))
diff -ruw linux-2.6.20.14/include/linux/if_arp.h linux-2.6.20.14-fbx/include/linux/if_arp.h
--- linux-2.6.20.14/include/linux/if_arp.h	2007-06-11 20:37:31.000000000 +0200
+++ linux-2.6.20.14-fbx/include/linux/if_arp.h	2010-12-29 19:30:08.701442239 +0100
@@ -40,6 +40,9 @@
 #define ARPHRD_METRICOM	23		/* Metricom STRIP (new IANA id)	*/
 #define	ARPHRD_IEEE1394	24		/* IEEE 1394 IPv4 - RFC 2734	*/
 #define ARPHRD_EUI64	27		/* EUI-64                       */
+/* Start Freebox added code */
+#define ARPHRD_DSL	29		/* ADSL                         */
+/* End Freebox added code */
 #define ARPHRD_INFINIBAND 32		/* InfiniBand			*/
 
 /* Dummy types for non ARP hardware */
diff -ruw linux-2.6.20.14/include/linux/if_pppox.h linux-2.6.20.14-fbx/include/linux/if_pppox.h
--- linux-2.6.20.14/include/linux/if_pppox.h	2007-06-11 20:37:31.000000000 +0200
+++ linux-2.6.20.14-fbx/include/linux/if_pppox.h	2010-12-29 19:30:08.711441045 +0100
@@ -47,16 +47,27 @@
 }; 
  
 /************************************************************************ 
+ * PPPoA addressing definition
+ */
+struct pppoa_addr{
+	unsigned int    vpi;
+	unsigned int    vci;
+};
+
+/************************************************************************ 
  * Protocols supported by AF_PPPOX 
  */ 
-#define PX_PROTO_OE    0 /* Currently just PPPoE */
-#define PX_MAX_PROTO   1	
+#define PX_PROTO_OE    0
+#define PX_PROTO_OA    1
+
+#define PX_MAX_PROTO   2	
  
 struct sockaddr_pppox { 
        sa_family_t     sa_family;            /* address family, AF_PPPOX */ 
        unsigned int    sa_protocol;          /* protocol identifier */ 
        union{ 
                struct pppoe_addr       pppoe; 
+	       struct pppoa_addr       pppoa;
        }sa_addr; 
 }__attribute__ ((packed)); 
 
@@ -112,6 +123,22 @@
 } __attribute__ ((packed));
 
 #ifdef __KERNEL__
+
+#define PPPOA_ENCAP_UNKNOWN	0
+#define PPPOA_ENCAP_VCMUX	1
+#define PPPOA_ENCAP_LLC		2
+
+struct pppoa_opt
+{
+	unsigned int		vpi;
+	unsigned int		vci;
+	unsigned int		stopped;
+	struct tasklet_struct	rcv_tasklet;
+	struct sk_buff_head	rcv_queue;
+	struct tasklet_struct	wakeup_tasklet;
+	unsigned int		encap;
+};
+
 struct pppoe_opt {
 	struct net_device      *dev;	  /* device associated with socket*/
 	struct pppoe_addr	pa;	  /* what this socket is bound to*/
@@ -128,6 +155,7 @@
 	struct pppox_sock	*next;	  /* for hash table */
 	union {
 		struct pppoe_opt pppoe;
+		struct pppoa_opt pppoa;
 	} proto;
 	unsigned short		num;
 };
diff -ruw linux-2.6.20.14/include/linux/if_tunnel.h linux-2.6.20.14-fbx/include/linux/if_tunnel.h
--- linux-2.6.20.14/include/linux/if_tunnel.h	2007-06-11 20:37:31.000000000 +0200
+++ linux-2.6.20.14-fbx/include/linux/if_tunnel.h	2010-12-29 19:30:08.711441045 +0100
@@ -25,6 +25,11 @@
 	__be16			o_flags;
 	__be32			i_key;
 	__be32			o_key;
+
+	/* this is the first bits to match on ipv6 address */
+	struct in6_addr		fbx6to4_zone;
+	__u8			fbx6to4_prefix;
+
 	struct iphdr		iph;
 };
 
diff -ruw linux-2.6.20.14/include/linux/in.h linux-2.6.20.14-fbx/include/linux/in.h
--- linux-2.6.20.14/include/linux/in.h	2007-06-11 20:37:31.000000000 +0200
+++ linux-2.6.20.14-fbx/include/linux/in.h	2010-12-29 19:30:08.711441045 +0100
@@ -79,6 +79,10 @@
 /* BSD compatibility */
 #define IP_RECVRETOPTS	IP_RETOPTS
 
+/* TProxy original addresses */
+#define IP_ORIGADDRS	11273
+#define IP_RECVORIGADDRS	IP_ORIGADDRS
+
 /* IP_MTU_DISCOVER values */
 #define IP_PMTUDISC_DONT		0	/* Never send DF frames */
 #define IP_PMTUDISC_WANT		1	/* Use per route hints	*/
@@ -175,6 +179,13 @@
 	struct in_addr	ipi_addr;
 };
 
+struct in_origaddrs {
+        struct in_addr ioa_srcaddr;
+        struct in_addr ioa_dstaddr;
+        unsigned short int ioa_srcport;
+        unsigned short int ioa_dstport;
+};
+
 /* Structure describing an Internet (IP) socket address. */
 #define __SOCK_SIZE__	16		/* sizeof(struct sockaddr)	*/
 struct sockaddr_in {
diff -ruw linux-2.6.20.14/include/linux/Kbuild linux-2.6.20.14-fbx/include/linux/Kbuild
--- linux-2.6.20.14/include/linux/Kbuild	2007-06-11 20:37:31.000000000 +0200
+++ linux-2.6.20.14-fbx/include/linux/Kbuild	2010-12-29 19:30:08.671444866 +0100
@@ -338,4 +338,8 @@
 unifdef-y += wireless.h
 unifdef-y += xfrm.h
 
+unifdef-y += fbxatm.h
+unifdef-y += fbxmtd_map_ioctl.h
+
 objhdr-y += version.h
+
diff -ruw linux-2.6.20.14/include/linux/kernel.h linux-2.6.20.14-fbx/include/linux/kernel.h
--- linux-2.6.20.14/include/linux/kernel.h	2007-06-11 20:37:31.000000000 +0200
+++ linux-2.6.20.14-fbx/include/linux/kernel.h	2010-12-29 19:30:08.711441045 +0100
@@ -175,6 +175,8 @@
 		console_loglevel = 15;
 }
 
+extern void console_emergency_dump(char *buf, int *len);
+
 extern void bust_spinlocks(int yes);
 extern int oops_in_progress;		/* If set, an oops, panic(), BUG() or die() is in progress */
 extern int panic_timeout;
diff -ruw linux-2.6.20.14/include/linux/kmalloc_sizes.h linux-2.6.20.14-fbx/include/linux/kmalloc_sizes.h
--- linux-2.6.20.14/include/linux/kmalloc_sizes.h	2007-06-11 20:37:31.000000000 +0200
+++ linux-2.6.20.14-fbx/include/linux/kmalloc_sizes.h	2010-12-29 19:30:08.711441045 +0100
@@ -19,12 +19,10 @@
 	CACHE(32768)
 	CACHE(65536)
 	CACHE(131072)
-#if (NR_CPUS > 512) || (MAX_NUMNODES > 256) || !defined(CONFIG_MMU)
 	CACHE(262144)
-#endif
-#ifndef CONFIG_MMU
 	CACHE(524288)
 	CACHE(1048576)
+#ifndef CONFIG_MMU
 #ifdef CONFIG_LARGE_ALLOCS
 	CACHE(2097152)
 	CACHE(4194304)
diff -ruw linux-2.6.20.14/include/linux/list.h linux-2.6.20.14-fbx/include/linux/list.h
--- linux-2.6.20.14/include/linux/list.h	2007-06-11 20:37:31.000000000 +0200
+++ linux-2.6.20.14-fbx/include/linux/list.h	2010-12-29 19:30:08.721437040 +0100
@@ -369,6 +369,17 @@
 	container_of(ptr, type, member)
 
 /**
+ * list_first_entry - get the first element from a list
+ * @ptr:	the list head to take the element from.
+ * @type:	the type of the struct this is embedded in.
+ * @member:	the name of the list_struct within the struct.
+ *
+ * Note, that list is expected to be not empty.
+ */
+#define list_first_entry(ptr, type, member) \
+	list_entry((ptr)->next, type, member)
+
+/**
  * list_for_each	-	iterate over a list
  * @pos:	the &struct list_head to use as a loop cursor.
  * @head:	the head for your list.
diff -ruw linux-2.6.20.14/include/linux/netdevice.h linux-2.6.20.14-fbx/include/linux/netdevice.h
--- linux-2.6.20.14/include/linux/netdevice.h	2007-06-11 20:37:31.000000000 +0200
+++ linux-2.6.20.14-fbx/include/linux/netdevice.h	2010-12-29 19:30:08.721437040 +0100
@@ -525,9 +525,26 @@
 	void                    (*poll_controller)(struct net_device *dev);
 #endif
 
+#if defined(CONFIG_FREEBOX_MVDSA) || defined(CONFIG_FREEBOX_MVDSA_MODULE)
+	int			(*fbxmvdsa_rx_fix)(struct sk_buff *skb);
+	void			*fbxmvdsa_priv;
+#endif
+
 	/* bridge stuff */
 	struct net_bridge_port	*br_port;
 
+#if defined(CONFIG_FREEBOX_BRIDGE) || defined(CONFIG_FREEBOX_BRIDGE_MODULE)
+	struct fbxbridge        *fbx_bridge;
+	struct fbxbridge        *fbx_bridge_port;
+#endif
+
+/* Start Freebox added code */
+#if defined (CONFIG_FREEBOX_L2BR) || defined(CONFIG_FREEBOX_L2BR_MODULE)
+	struct fbxl2br		*fbx_l2br;
+	struct fbxl2br_port	*fbx_l2br_port;
+#endif
+/* End Freebox added code */
+
 	/* class/net/name entry */
 	struct class_device	class_dev;
 	/* space for optional statistics and wireless sysfs groups */
diff -ruw linux-2.6.20.14/include/linux/netfilter/nf_conntrack_common.h linux-2.6.20.14-fbx/include/linux/netfilter/nf_conntrack_common.h
--- linux-2.6.20.14/include/linux/netfilter/nf_conntrack_common.h	2007-06-11 20:37:31.000000000 +0200
+++ linux-2.6.20.14-fbx/include/linux/netfilter/nf_conntrack_common.h	2010-12-29 19:30:08.721437040 +0100
@@ -73,6 +73,18 @@
 	/* Connection has fixed timeout. */
 	IPS_FIXED_TIMEOUT_BIT = 10,
 	IPS_FIXED_TIMEOUT = (1 << IPS_FIXED_TIMEOUT_BIT),
+
+#if defined(CONFIG_IP_NF_TPROXY) || defined (CONFIG_IP_NF_TPROXY_MODULE)
+	/* Connection is tproxy-ed */
+	IPS_TPROXY_BIT = 11,
+	IPS_TPROXY = (1 << IPS_TPROXY_BIT),
+
+	IPS_TPROXY_RELATED_BIT = 12,
+	IPS_TPROXY_RELATED = (1 << IPS_TPROXY_RELATED_BIT),
+
+	IPS_MAY_DELETE_BIT = 12,
+	IPS_MAY_DELETE = (1 << IPS_MAY_DELETE_BIT),
+#endif
 };
 
 /* Connection tracking event bits */
diff -ruw linux-2.6.20.14/include/linux/netfilter/nf_conntrack_ftp.h linux-2.6.20.14-fbx/include/linux/netfilter/nf_conntrack_ftp.h
--- linux-2.6.20.14/include/linux/netfilter/nf_conntrack_ftp.h	2007-06-11 20:37:31.000000000 +0200
+++ linux-2.6.20.14-fbx/include/linux/netfilter/nf_conntrack_ftp.h	2010-12-29 19:30:08.721437040 +0100
@@ -26,6 +26,11 @@
 	u_int32_t seq_aft_nl[IP_CT_DIR_MAX][NUM_SEQ_TO_REMEMBER];
 	/* 0 means seq_match_aft_nl not set */
 	int seq_aft_nl_num[IP_CT_DIR_MAX];
+#if defined(CONFIG_FREEBOX_BRIDGE) || defined(CONFIG_FREEBOX_BRIDGE_MODULE)
+	unsigned int is_fbxbridge;
+	unsigned long fbxbridge_remote;
+	unsigned long fbxbridge_wan;
+#endif
 };
 
 struct nf_conntrack_expect;
diff -ruw linux-2.6.20.14/include/linux/netfilter_ipv4/ip_conntrack_ftp.h linux-2.6.20.14-fbx/include/linux/netfilter_ipv4/ip_conntrack_ftp.h
--- linux-2.6.20.14/include/linux/netfilter_ipv4/ip_conntrack_ftp.h	2007-06-11 20:37:31.000000000 +0200
+++ linux-2.6.20.14-fbx/include/linux/netfilter_ipv4/ip_conntrack_ftp.h	2010-12-29 19:30:08.731445169 +0100
@@ -26,6 +26,16 @@
 	u_int32_t seq_aft_nl[IP_CT_DIR_MAX][NUM_SEQ_TO_REMEMBER];
 	/* 0 means seq_match_aft_nl not set */
 	int seq_aft_nl_num[IP_CT_DIR_MAX];
+#if defined(CONFIG_FREEBOX_BRIDGE) || defined(CONFIG_FREEBOX_BRIDGE_MODULE)
+	unsigned int is_fbxbridge;
+	unsigned long fbxbridge_remote;
+	unsigned long fbxbridge_wan;
+#endif
+#if defined (CONFIG_FREEBOX_L2BR) || defined(CONFIG_FREEBOX_L2BR_MODULE)
+	unsigned int is_fbxl2br;
+	unsigned long fbxl2br_remote;
+	unsigned long fbxl2br_wan;
+#endif
 };
 
 struct ip_conntrack_expect;
diff -ruw linux-2.6.20.14/include/linux/netfilter_ipv4/ip_conntrack.h linux-2.6.20.14-fbx/include/linux/netfilter_ipv4/ip_conntrack.h
--- linux-2.6.20.14/include/linux/netfilter_ipv4/ip_conntrack.h	2007-06-11 20:37:31.000000000 +0200
+++ linux-2.6.20.14-fbx/include/linux/netfilter_ipv4/ip_conntrack.h	2010-12-29 19:30:08.731445169 +0100
@@ -115,6 +115,14 @@
 		int masq_index;
 #endif
 	} nat;
+
+#if defined(CONFIG_IP_NF_TPROXY) || defined (CONFIG_IP_NF_TPROXY_MODULE)
+	union _tproxy_data {
+		void *sockref;
+		struct list_head related;
+	} tproxy;
+#endif /* CONFIG_IP_NF_TPROXY */
+
 #endif /* CONFIG_IP_NF_NAT_NEEDED */
 
 #if defined(CONFIG_IP_NF_CONNTRACK_MARK)
@@ -165,6 +173,11 @@
 	/* Direction relative to the master connection. */
 	enum ip_conntrack_dir dir;
 #endif
+
+#ifdef CONFIG_IP_NF_NAT_NRES
+	/* List of registered reservations */
+	struct list_head reserved_list;
+#endif
 };
 
 #define IP_CT_EXPECT_PERMANENT	0x1
@@ -232,6 +245,8 @@
 	__ip_ct_refresh_acct(ct, 0, skb, extra_jiffies, 0);
 }
 
+extern void __death_by_timeout(unsigned long ul_conntrack);
+
 /* These are for NAT.  Icky. */
 /* Update TCP window tracking data when NAT mangles the packet */
 extern void ip_conntrack_tcp_update(struct sk_buff *skb,
@@ -240,6 +255,15 @@
 
 /* Call me when a conntrack is destroyed. */
 extern void (*ip_conntrack_destroyed)(struct ip_conntrack *conntrack);
+#ifdef CONFIG_IP_NF_NAT_NRES
+/* Call when an expectation is destroyed. */
+extern void (*ip_conntrack_expect_destroyed)(struct ip_conntrack_expect *exp);
+#endif
+
+#if defined(CONFIG_IP_NF_TPROXY) || defined (CONFIG_IP_NF_TPROXY_MODULE)
+/* Call me when a conntrack is confirmed. */
+extern void (*ip_conntrack_confirmed)(struct ip_conntrack *conntrack);
+#endif
 
 /* Fake conntrack entry for untracked connections */
 extern struct ip_conntrack ip_conntrack_untracked;
@@ -268,7 +292,8 @@
 extern void ip_ct_remove_expectations(struct ip_conntrack *ct);
 
 extern struct ip_conntrack *ip_conntrack_alloc(struct ip_conntrack_tuple *,
-					       struct ip_conntrack_tuple *);
+					       struct ip_conntrack_tuple *,
+					       int);
 
 extern void ip_conntrack_free(struct ip_conntrack *ct);
 
diff -ruw linux-2.6.20.14/include/linux/netfilter_ipv4/ip_nat_core.h linux-2.6.20.14-fbx/include/linux/netfilter_ipv4/ip_nat_core.h
--- linux-2.6.20.14/include/linux/netfilter_ipv4/ip_nat_core.h	2007-06-11 20:37:31.000000000 +0200
+++ linux-2.6.20.14-fbx/include/linux/netfilter_ipv4/ip_nat_core.h	2010-12-29 19:30:08.731445169 +0100
@@ -15,4 +15,13 @@
 					 enum ip_conntrack_info ctinfo,
 					 unsigned int hooknum,
 					 struct sk_buff **pskb);
+
+#if defined(CONFIG_IP_NF_TPROXY) || defined (CONFIG_IP_NF_TPROXY_MODULE)
+extern int ip_nat_manip_pkt(u_int16_t proto,
+			    struct sk_buff **pskb,
+			    unsigned int iphdroff,
+			    const struct ip_conntrack_tuple *target,
+			    enum ip_nat_manip_type maniptype);
+#endif
+
 #endif /* _IP_NAT_CORE_H */
diff -ruw linux-2.6.20.14/include/linux/netfilter_ipv4/ip_nat.h linux-2.6.20.14-fbx/include/linux/netfilter_ipv4/ip_nat.h
--- linux-2.6.20.14/include/linux/netfilter_ipv4/ip_nat.h	2007-06-11 20:37:31.000000000 +0200
+++ linux-2.6.20.14-fbx/include/linux/netfilter_ipv4/ip_nat.h	2010-12-29 19:30:08.731445169 +0100
@@ -16,6 +16,11 @@
 
 #define IP_NAT_RANGE_MAP_IPS 1
 #define IP_NAT_RANGE_PROTO_SPECIFIED 2
+#define IP_NAT_RANGE_BYPASS_HELPERS 4
+
+#ifdef CONFIG_IP_NF_NAT_NRES
+#define IP_NAT_RANGE_USE_RESERVED 8
+#endif
 
 /* NAT sequence number modifications */
 struct ip_nat_seq {
@@ -51,6 +56,9 @@
 #ifdef __KERNEL__
 #include <linux/list.h>
 
+/* call this to signal dependency on the NAT hooks */
+extern void need_nat_hooks(void);
+
 /* Protects NAT hash tables, and NAT-private part of conntracks. */
 extern rwlock_t ip_nat_lock;
 
@@ -63,6 +71,18 @@
 
 struct ip_conntrack;
 
+#ifdef CONFIG_IP_NF_NAT_NRES
+/* Structure to store reserved manips */
+struct ip_nat_reserved {
+	struct list_head hash;			/* Hash chain */
+	struct list_head exp;			/* Per-expectation list */
+	atomic_t use;				/* Reference count */
+	struct ip_conntrack_manip manip;	/* Reserved manip */
+	struct ip_conntrack_manip peer;		/* Peer (optional) */
+	u_int16_t proto;			/* Protocol number of reserved manip */
+};
+#endif
+
 /* Set up the info structure to map into this range. */
 extern unsigned int ip_nat_setup_info(struct ip_conntrack *conntrack,
 				      const struct ip_nat_range *range,
@@ -70,7 +90,39 @@
 
 /* Is this tuple already taken? (not by us)*/
 extern int ip_nat_used_tuple(const struct ip_conntrack_tuple *tuple,
-			     const struct ip_conntrack *ignored_conntrack);
+			     const struct ip_conntrack *ignored_conntrack,
+			     const enum ip_nat_manip_type maniptype,
+			     const unsigned int flags);
+
+#ifdef CONFIG_IP_NF_NAT_NRES
+struct ip_conntrack_expect;
+
+/* NAT port reservation: allocate and hash a new entry */
+extern struct ip_nat_reserved *__ip_nat_reserved_new_hash(const struct ip_conntrack_manip *manip,
+					 const u_int16_t proto, const struct ip_conntrack_manip *peer);
+
+/* NAT port reservation: unhash an entry */
+extern struct ip_nat_reserved *__ip_nat_reserved_unhash(const struct ip_conntrack_manip *manip,
+				       const u_int16_t proto, const struct ip_conntrack_manip *peer);
+
+/* NAT port reservation: free a reservation */
+extern void __ip_nat_reserved_free(struct ip_nat_reserved *res);
+
+/* NAT port reservation: register a new reservation */
+extern int ip_nat_reserved_register(struct ip_conntrack_expect *exp,
+				    const struct ip_conntrack_manip *manip,
+				    const u_int16_t proto,
+				    const struct ip_conntrack_manip *peer);
+
+/* NAT port reservation: unregister a reservation */
+extern int ip_nat_reserved_unregister(struct ip_conntrack_expect *exp,
+				      const struct ip_conntrack_manip *manip,
+				      const u_int16_t proto,
+				      const struct ip_conntrack_manip *peer);
+
+/* NAT port reservation: unregister all reservations for a given expectation */
+extern void ip_nat_reserved_unregister_all(struct ip_conntrack_expect *exp);
+#endif /*CONFIG_IP_NF_NAT_NRES*/
 
 #else  /* !__KERNEL__: iptables wants this to compile. */
 #define ip_nat_multi_range ip_nat_multi_range_compat
diff -ruw linux-2.6.20.14/include/linux/netfilter_ipv4/Kbuild linux-2.6.20.14-fbx/include/linux/netfilter_ipv4/Kbuild
--- linux-2.6.20.14/include/linux/netfilter_ipv4/Kbuild	2007-06-11 20:37:31.000000000 +0200
+++ linux-2.6.20.14-fbx/include/linux/netfilter_ipv4/Kbuild	2010-12-29 19:30:08.731445169 +0100
@@ -59,3 +59,5 @@
 unifdef-y += ip_nat_rule.h
 unifdef-y += ip_queue.h
 unifdef-y += ip_tables.h
+
+unifdef-y += ip_tproxy.h
diff -ruw linux-2.6.20.14/include/linux/net.h linux-2.6.20.14-fbx/include/linux/net.h
--- linux-2.6.20.14/include/linux/net.h	2007-06-11 20:37:31.000000000 +0200
+++ linux-2.6.20.14-fbx/include/linux/net.h	2010-12-29 19:30:08.721437040 +0100
@@ -24,7 +24,7 @@
 struct poll_table_struct;
 struct inode;
 
-#define NPROTO		32		/* should be enough for now..	*/
+#define NPROTO		AF_MAX		/* should be enough for now..	*/
 
 #define SYS_SOCKET	1		/* sys_socket(2)		*/
 #define SYS_BIND	2		/* sys_bind(2)			*/
@@ -223,6 +223,11 @@
 			   size_t size, int flags);
 extern int kernel_sock_ioctl(struct socket *sock, int cmd, unsigned long arg);
 
+#if defined(CONFIG_IP_NF_TPROXY) || defined (CONFIG_IP_NF_TPROXY_MODULE)
+extern void (*ip_tproxy_tcp_unhashed)(struct sock *, int proto);
+extern void (*ip_tproxy_udp_unhashed)(struct sock *, int proto);
+#endif
+
 #ifndef CONFIG_SMP
 #define SOCKOPS_WRAPPED(name) name
 #define SOCKOPS_WRAP(name, fam)
diff -ruw linux-2.6.20.14/include/linux/netlink.h linux-2.6.20.14-fbx/include/linux/netlink.h
--- linux-2.6.20.14/include/linux/netlink.h	2007-06-11 20:37:31.000000000 +0200
+++ linux-2.6.20.14-fbx/include/linux/netlink.h	2010-12-29 19:30:08.731445169 +0100
@@ -24,6 +24,10 @@
 /* leave room for NETLINK_DM (DM Events) */
 #define NETLINK_SCSITRANSPORT	18	/* SCSI Transports */
 
+/* Begin Freebox added code. */
+#define NETLINK_FBXL2BR		24	/* Freebox L2 Bridge */
+/* End Freebox added code. */
+
 #define MAX_LINKS 32		
 
 struct sockaddr_nl
diff -ruw linux-2.6.20.14/include/linux/pci_ids.h linux-2.6.20.14-fbx/include/linux/pci_ids.h
--- linux-2.6.20.14/include/linux/pci_ids.h	2007-06-11 20:37:31.000000000 +0200
+++ linux-2.6.20.14-fbx/include/linux/pci_ids.h	2010-12-29 19:30:08.741453526 +0100
@@ -1581,6 +1581,9 @@
 #define PCI_DEVICE_ID_3DFX_VOODOO3	0x0005
 #define PCI_DEVICE_ID_3DFX_VOODOO5	0x0009
 
+#define PCI_VENDOR_ID_SIGMADES		0x1236
+#define PCI_DEVICE_ID_SIGMADES_OHCI	0x1234
+#define PCI_DEVICE_ID_SIGMADES_EHCI	0x1235
 
 
 #define PCI_VENDOR_ID_AVM		0x1244
@@ -1614,6 +1617,9 @@
 #define PCI_VENDOR_ID_SATSAGEM		0x1267
 #define PCI_DEVICE_ID_SATSAGEM_NICCY	0x1016
 
+#define PCI_VENDOR_ID_SILICON_MOTION		0x126f
+#define PCI_DEVICE_ID_SM501_VOYAGER_GX_REV_AA	0x0501
+#define PCI_DEVICE_ID_SM501_VOYAGER_GX_REV_B	0x0510
 
 #define PCI_VENDOR_ID_ENSONIQ		0x1274
 #define PCI_DEVICE_ID_ENSONIQ_CT5880	0x5880
diff -ruw linux-2.6.20.14/include/linux/random.h linux-2.6.20.14-fbx/include/linux/random.h
--- linux-2.6.20.14/include/linux/random.h	2007-06-11 20:37:31.000000000 +0200
+++ linux-2.6.20.14-fbx/include/linux/random.h	2010-12-29 19:30:08.741453526 +0100
@@ -48,6 +48,8 @@
 				 unsigned int value);
 extern void add_interrupt_randomness(int irq);
 
+extern void add_raw_randomness(uint8_t *buf, int nbytes);
+
 extern void get_random_bytes(void *buf, int nbytes);
 void generate_random_uuid(unsigned char uuid_out[16]);
 
diff -ruw linux-2.6.20.14/include/linux/serial_core.h linux-2.6.20.14-fbx/include/linux/serial_core.h
--- linux-2.6.20.14/include/linux/serial_core.h	2007-06-11 20:37:31.000000000 +0200
+++ linux-2.6.20.14-fbx/include/linux/serial_core.h	2010-12-29 19:30:08.751517111 +0100
@@ -135,6 +135,9 @@
 /* Xilinx uartlite */
 #define PORT_UARTLITE	74
 
+/* Broadcom bcm963xx */
+#define PORT_BCM963XX	75
+
 #ifdef __KERNEL__
 
 #include <linux/compiler.h>
diff -ruw linux-2.6.20.14/include/linux/serial.h linux-2.6.20.14-fbx/include/linux/serial.h
--- linux-2.6.20.14/include/linux/serial.h	2007-06-11 20:37:31.000000000 +0200
+++ linux-2.6.20.14-fbx/include/linux/serial.h	2010-12-29 19:30:08.751517111 +0100
@@ -76,7 +76,8 @@
 #define PORT_16654	11
 #define PORT_16850	12
 #define PORT_RSA	13	/* RSA-DV II/S card */
-#define PORT_MAX	13
+#define PORT_SB1250	14
+#define PORT_MAX	14
 
 #define SERIAL_IO_PORT	0
 #define SERIAL_IO_HUB6	1
diff -ruw linux-2.6.20.14/include/linux/serial_reg.h linux-2.6.20.14-fbx/include/linux/serial_reg.h
--- linux-2.6.20.14/include/linux/serial_reg.h	2007-06-11 20:37:31.000000000 +0200
+++ linux-2.6.20.14-fbx/include/linux/serial_reg.h	2010-12-29 19:30:08.751517111 +0100
@@ -15,12 +15,44 @@
 #define _LINUX_SERIAL_REG_H
 
 /*
- * DLAB=0
+ * smp863x has an 16550 uart, but registers have been messed up...
  */
+#ifdef CONFIG_TANGO2
+
 #define UART_RX		0	/* In:  Receive buffer */
-#define UART_TX		0	/* Out: Transmit buffer */
+#define UART_TX		1	/* Out: Transmit buffer */
+#define UART_IER	2	/* Out: Interrupt Enable Register */
+#define UART_IIR	3	/* In:  Interrupt ID Register */
+#define UART_FCR	4	/* Out: FIFO Control Register */
+#define UART_LCR	5	/* Out: Line Control Register */
+#define UART_MCR	6	/* Out: Modem Control Register */
+#define UART_LSR	7	/* In:  Line Status Register */
+#define UART_MSR	8	/* In:  Modem Status Register */
+#define UART_SCR	9	/* I/O: Scratch Register */
+
+/* EFR does not exist on TANGO2,  we use a magic to catch accesses and
+ * make them nop */
+#define UART_EFR	42
+
+#else
 
+#define UART_RX		0	/* In:  Receive buffer */
+#define UART_TX		0	/* Out: Transmit buffer */
 #define UART_IER	1	/* Out: Interrupt Enable Register */
+#define UART_IIR	2	/* In:  Interrupt ID Register */
+#define UART_EFR	2	/* I/O: Extended Features Register */
+#define UART_FCR	2	/* Out: FIFO Control Register */
+#define UART_LCR	3	/* Out: Line Control Register */
+#define UART_MCR	4	/* Out: Modem Control Register */
+#define UART_LSR	5	/* In:  Line Status Register */
+#define UART_MSR	6	/* In:  Modem Status Register */
+#define UART_SCR	7	/* I/O: Scratch Register */
+
+#endif
+
+/*
+ * DLAB=0
+ */
 #define UART_IER_MSI		0x08 /* Enable Modem status interrupt */
 #define UART_IER_RLSI		0x04 /* Enable receiver line status interrupt */
 #define UART_IER_THRI		0x02 /* Enable Transmitter holding register int. */
@@ -30,7 +62,6 @@
  */
 #define UART_IERX_SLEEP		0x10 /* Enable sleep mode */
 
-#define UART_IIR	2	/* In:  Interrupt ID Register */
 #define UART_IIR_NO_INT		0x01 /* No interrupts pending */
 #define UART_IIR_ID		0x06 /* Mask for the interrupt ID */
 #define UART_IIR_MSI		0x00 /* Modem status interrupt */
@@ -38,7 +69,6 @@
 #define UART_IIR_RDI		0x04 /* Receiver data interrupt */
 #define UART_IIR_RLSI		0x06 /* Receiver line status interrupt */
 
-#define UART_FCR	2	/* Out: FIFO Control Register */
 #define UART_FCR_ENABLE_FIFO	0x01 /* Enable the FIFO */
 #define UART_FCR_CLEAR_RCVR	0x02 /* Clear the RCVR FIFO */
 #define UART_FCR_CLEAR_XMIT	0x04 /* Clear the XMIT FIFO */
@@ -81,7 +111,6 @@
 #define UART_FCR6_T_TRIGGER_30	0x30 /* Mask for transmit trigger set at 30 */
 #define UART_FCR7_64BYTE	0x20 /* Go into 64 byte mode (TI16C750) */
 
-#define UART_LCR	3	/* Out: Line Control Register */
 /*
  * Note: if the word length is 5 bits (UART_LCR_WLEN5), then setting 
  * UART_LCR_STOP will select 1.5 stop bits, not 2 stop bits.
@@ -97,7 +126,6 @@
 #define UART_LCR_WLEN7		0x02 /* Wordlength: 7 bits */
 #define UART_LCR_WLEN8		0x03 /* Wordlength: 8 bits */
 
-#define UART_MCR	4	/* Out: Modem Control Register */
 #define UART_MCR_CLKSEL		0x80 /* Divide clock by 4 (TI16C752, EFR[4]=1) */
 #define UART_MCR_TCRTLR		0x40 /* Access TCR/TLR (TI16C752, EFR[4]=1) */
 #define UART_MCR_XONANY		0x20 /* Enable Xon Any (TI16C752, EFR[4]=1) */
@@ -108,7 +136,6 @@
 #define UART_MCR_RTS		0x02 /* RTS complement */
 #define UART_MCR_DTR		0x01 /* DTR complement */
 
-#define UART_LSR	5	/* In:  Line Status Register */
 #define UART_LSR_TEMT		0x40 /* Transmitter empty */
 #define UART_LSR_THRE		0x20 /* Transmit-hold-register empty */
 #define UART_LSR_BI		0x10 /* Break interrupt indicator */
@@ -117,7 +144,6 @@
 #define UART_LSR_OE		0x02 /* Overrun error indicator */
 #define UART_LSR_DR		0x01 /* Receiver data ready */
 
-#define UART_MSR	6	/* In:  Modem Status Register */
 #define UART_MSR_DCD		0x80 /* Data Carrier Detect */
 #define UART_MSR_RI		0x40 /* Ring Indicator */
 #define UART_MSR_DSR		0x20 /* Data Set Ready */
@@ -128,18 +154,25 @@
 #define UART_MSR_DCTS		0x01 /* Delta CTS */
 #define UART_MSR_ANY_DELTA	0x0F /* Any of the delta bits! */
 
-#define UART_SCR	7	/* I/O: Scratch Register */
 
 /*
  * DLAB=1
  */
+
+/*
+ * smp863x has DLM and DLM in one register
+ */
+#ifdef CONFIG_TANGO2
+#define UART_DL		10
+#define UART_CLKSEL	11	/* Clock selection */
+#else
 #define UART_DLL	0	/* Out: Divisor Latch Low */
 #define UART_DLM	1	/* Out: Divisor Latch High */
+#endif
 
 /*
  * LCR=0xBF (or DLAB=1 for 16C660)
  */
-#define UART_EFR	2	/* I/O: Extended Features Register */
 #define UART_EFR_CTS		0x80 /* CTS flow control */
 #define UART_EFR_RTS		0x40 /* RTS flow control */
 #define UART_EFR_SCD		0x20 /* Special character detect */
diff -ruw linux-2.6.20.14/include/linux/skbuff.h linux-2.6.20.14-fbx/include/linux/skbuff.h
--- linux-2.6.20.14/include/linux/skbuff.h	2007-06-11 20:37:31.000000000 +0200
+++ linux-2.6.20.14-fbx/include/linux/skbuff.h	2010-12-29 19:30:08.751517111 +0100
@@ -140,6 +140,10 @@
 	__be32          ip6_frag_id;
 	struct sk_buff	*frag_list;
 	skb_frag_t	frags[MAX_SKB_FRAGS];
+#ifdef CONFIG_ADI_FUSIV
+	struct sk_buff	*link_to_header;
+	unsigned int	apbuf_used_by_host;
+#endif
 };
 
 /* We divide dataref into two halves.  The higher 16 bits hold references
@@ -285,6 +289,12 @@
 				nfctinfo:3;
 	__u8			pkt_type:3,
 				fclone:2,
+#ifdef CONFIG_SKB_RECYCLE
+				cache_clean:1,
+#endif
+#if defined(CONFIG_FREEBOX_MVDSA) || defined(CONFIG_FREEBOX_MVDSA_MODULE)
+				dsa_done:1,
+#endif
 				ipvs_property:1;
 	__be16			protocol;
 
@@ -297,6 +307,9 @@
 #ifdef CONFIG_BRIDGE_NETFILTER
 	struct nf_bridge_info	*nf_bridge;
 #endif
+#ifdef CONFIG_IP_FFN
+	int			ffn_state;
+#endif
 #endif /* CONFIG_NETFILTER */
 #ifdef CONFIG_NET_SCHED
 	__u16			tc_index;	/* traffic control index */
@@ -312,6 +325,15 @@
 #endif
 
 	__u32			mark;
+	__u32			rx_class;
+
+#ifdef CONFIG_SKB_RECYCLE
+	/* callback just before skb header memory is about to be
+	 * released, memory is not freed if callback returns 1 */
+	int			(*recycle)(void *recycle_data,
+					   struct sk_buff *skb);
+	void			*recycle_data;
+#endif
 
 	/* These elements must be at the end, see alloc_skb() for details.  */
 	unsigned int		truesize;
@@ -997,6 +1019,10 @@
  * Various parts of the networking layer expect at least 16 bytes of
  * headroom, you should not reduce this.
  */
+#ifdef CONFIG_NETSKBPAD
+#define NET_SKB_PAD	CONFIG_NETSKBPAD
+#endif
+
 #ifndef NET_SKB_PAD
 #define NET_SKB_PAD	16
 #endif
@@ -1482,5 +1508,33 @@
 	return skb_shinfo(skb)->gso_size;
 }
 
+#ifdef CONFIG_SKB_RECYCLE
+static inline void skb_clean_state(struct sk_buff *skb)
+{
+	unsigned int datasize;
+	struct skb_shared_info *shinfo;
+
+	memset(skb, 0, offsetof(struct sk_buff, recycle));
+	atomic_set(&skb->users, 1);
+
+	datasize = skb->end - skb->head;
+	skb->truesize = datasize + sizeof (struct sk_buff);
+	skb->data = skb->head;
+	skb->tail = skb->head;
+
+	shinfo = skb_shinfo(skb);
+	atomic_set(&shinfo->dataref, 1);
+	shinfo->nr_frags  = 0;
+	shinfo->gso_size = 0;
+	shinfo->gso_segs = 0;
+	shinfo->gso_type = 0;
+	shinfo->ip6_frag_id = 0;
+	shinfo->frag_list = NULL;
+}
+
+extern void kfree_recycled_skbmem(struct sk_buff *skb);
+
+#endif
+
 #endif	/* __KERNEL__ */
 #endif	/* _LINUX_SKBUFF_H */
diff -ruw linux-2.6.20.14/include/linux/socket.h linux-2.6.20.14-fbx/include/linux/socket.h
--- linux-2.6.20.14/include/linux/socket.h	2007-06-11 20:37:31.000000000 +0200
+++ linux-2.6.20.14-fbx/include/linux/socket.h	2010-12-29 19:30:08.751517111 +0100
@@ -187,7 +187,8 @@
 #define AF_LLC		26	/* Linux LLC			*/
 #define AF_TIPC		30	/* TIPC sockets			*/
 #define AF_BLUETOOTH	31	/* Bluetooth sockets 		*/
-#define AF_MAX		32	/* For now.. */
+#define AF_FBXATM	32
+#define AF_MAX		33	/* For now.. */
 
 /* Protocol families, same as address families. */
 #define PF_UNSPEC	AF_UNSPEC
@@ -220,6 +221,7 @@
 #define PF_LLC		AF_LLC
 #define PF_TIPC		AF_TIPC
 #define PF_BLUETOOTH	AF_BLUETOOTH
+#define PF_FBXATM	AF_FBXATM
 #define PF_MAX		AF_MAX
 
 /* Maximum queue length specifiable by listen.  */
diff -ruw linux-2.6.20.14/include/linux/sockios.h linux-2.6.20.14-fbx/include/linux/sockios.h
--- linux-2.6.20.14/include/linux/sockios.h	2007-06-11 20:37:31.000000000 +0200
+++ linux-2.6.20.14-fbx/include/linux/sockios.h	2010-12-29 19:30:08.751517111 +0100
@@ -122,6 +122,28 @@
 #define SIOCBRADDIF	0x89a2		/* add interface to bridge      */
 #define SIOCBRDELIF	0x89a3		/* remove interface from bridge */
 
+/* Start Freebox added code */
+/* fbxdiverter call */
+#define SIOCGFBXDIVERT	0x89b0		/* fbxdiverter support		*/
+#define SIOCSFBXDIVERT	0x89b1		/* Set fbxdiverter options 	*/
+/* End Freebox added code */
+
+/* Start Freebox added code */
+/* fbxbridge call */
+#define SIOCGFBXBRIDGE	0x89b2		/* fbxbridge support          */
+#define SIOCSFBXBRIDGE	0x89b3		/* Set fbxbridge options      */
+/* End Freebox added code */
+
+/* Start Freebox added code */
+/* fbxmvdsa call */
+#define SIOCFBXMVDSA	0x89b4		/* fbxmvdsa support          */
+/* fbxmvdsa call */
+
+/* Start Freebox added code */
+/* fbxl2br call */
+#define SIOCFBXL2BR	0x89b5		/* fbxl2br support          */
+/* fbxl2br call */
+
 /* Device private ioctl calls */
 
 /*
diff -ruw linux-2.6.20.14/include/linux/sunrpc/xprt.h linux-2.6.20.14-fbx/include/linux/sunrpc/xprt.h
--- linux-2.6.20.14/include/linux/sunrpc/xprt.h	2007-06-11 20:37:31.000000000 +0200
+++ linux-2.6.20.14-fbx/include/linux/sunrpc/xprt.h	2010-12-29 19:30:08.751517111 +0100
@@ -32,7 +32,7 @@
 
 #define RPC_MIN_RESVPORT	(1U)
 #define RPC_MAX_RESVPORT	(65535U)
-#define RPC_DEF_MIN_RESVPORT	(665U)
+#define RPC_DEF_MIN_RESVPORT	(670U)
 #define RPC_DEF_MAX_RESVPORT	(1023U)
 
 /*
diff -ruw linux-2.6.20.14/include/net/ip.h linux-2.6.20.14-fbx/include/net/ip.h
--- linux-2.6.20.14/include/net/ip.h	2007-06-11 20:37:31.000000000 +0200
+++ linux-2.6.20.14-fbx/include/net/ip.h	2010-12-29 19:30:08.781442539 +0100
@@ -41,6 +41,17 @@
 #define IPSKB_XFRM_TRANSFORMED	4
 #define IPSKB_FRAG_COMPLETE	8
 #define IPSKB_REROUTED		16
+
+#if defined(CONFIG_IP_NF_TPROXY) || defined (CONFIG_IP_NF_TPROXY_MODULE)
+
+	/* these fields unfortunately do not fit into the 40 bytes of
+	 * allocated cb space... we have to allocate at least 48 bytes */
+
+	u32 orig_srcaddr;
+	u32 orig_dstaddr;
+	u16 orig_srcport;
+	u16 orig_dstport;
+#endif
 };
 
 struct ipcm_cookie
@@ -326,6 +337,14 @@
 extern atomic_t ip_frag_mem;
 
 /*
+ *	Functions provided by ip_ffn.c
+ */
+extern void ip_ffn_init(void);
+extern int ip_ffn_process(struct sk_buff *skb);
+extern void ip_ffn_add(struct sk_buff *skb);
+extern void ip_ffn_flush_all(void);
+
+/*
  *	Functions provided by ip_forward.c
  */
  
diff -ruw linux-2.6.20.14/init/do_mounts.c linux-2.6.20.14-fbx/init/do_mounts.c
--- linux-2.6.20.14/init/do_mounts.c	2007-06-11 20:37:31.000000000 +0200
+++ linux-2.6.20.14-fbx/init/do_mounts.c	2011-09-09 16:10:02.080493458 +0200
@@ -10,6 +10,10 @@
 #include <linux/mount.h>
 #include <linux/device.h>
 
+#ifdef CONFIG_DMCRYPTATBOOT
+#include <linux/dm-ioctl.h>
+#endif
+
 #include <linux/nfs_fs.h>
 #include <linux/nfs_fs_sb.h>
 #include <linux/nfs_mount.h>
@@ -402,6 +406,122 @@
 #endif
 }
 
+#ifdef CONFIG_DMCRYPTATBOOT
+/*
+ * Create dm device
+ */
+int dm_ctl_ioctl(struct inode *inode, struct file *file,
+		 uint command, ulong u);
+
+static int dm_run_setup(void)
+{
+	struct dm_ioctl dm, *dmp;
+	struct dm_target_spec *spec;
+	uint64_t size;
+	char *data, *tmp, *major, *minor;
+	uint8_t *target_info;
+	dev_t tomap;
+	int ret, fd;
+	uint8_t key[128];
+
+	/* read config */
+	ret = -EINVAL;
+	tmp = CONFIG_DMCRYPTATBOOT_DEVICE;
+	major = minor = NULL;
+	if (tmp)
+		major = strsep(&tmp, ":");
+	if (tmp)
+		minor = strsep(&tmp, ":");
+	if (!major || !minor)
+		goto end;
+
+	/* create device to map */
+	tomap = MKDEV(simple_strtoul(major, NULL, 10),
+		      simple_strtoul(minor, NULL, 10));
+	if (create_dev("/dev/tomap", tomap))
+		goto end;
+
+	fd = sys_open("/dev/tomap", 0, 0);
+	if (fd < 0)
+		goto end;
+
+	/* fetch its size */
+	if (sys_ioctl(fd, BLKGETSIZE64, (unsigned long)&size)) {
+		sys_close(fd);
+		goto end;
+	}
+	sys_close(fd);
+	size /= 512;
+
+	/* create dm device */
+	memset(&dm, 0, sizeof (dm));
+	dm.version[0] = DM_VERSION_MAJOR;
+	dm.version[1] = DM_VERSION_MINOR;
+	dm.version[2] = DM_VERSION_PATCHLEVEL;
+	dm.data_size = sizeof (dm);
+	strcpy(dm.name, "root");
+
+	ret = dm_ctl_ioctl(NULL, NULL, DM_DEV_CREATE, (ulong)&dm);
+	if (ret < 0) {
+		printk("dm_ctl_ioctl create failed\n");
+		goto end;
+	}
+
+	/* create table */
+	data = kmalloc(sizeof (*dmp) + sizeof (*spec) + 128, GFP_KERNEL);
+	if (!data)
+		goto end;
+	dmp = (struct dm_ioctl *)data;
+	spec = (struct dm_target_spec *)(dmp + 1);
+	target_info = (uint8_t *)(spec + 1);
+
+	memset(dmp, 0, sizeof (*dmp));
+	dmp->version[0] = DM_VERSION_MAJOR;
+	dmp->version[1] = DM_VERSION_MINOR;
+	dmp->version[2] = DM_VERSION_PATCHLEVEL;
+	dmp->data_size = sizeof (*dmp) + sizeof (*spec) + 128;
+	dmp->data_start = sizeof (*dmp);
+	dmp->target_count = 1;
+	strcpy(dmp->name, "root");
+
+	memset(spec, 0, sizeof (*spec));
+	spec->sector_start = 0;
+	spec->length = size;
+	strcpy(spec->target_type, "crypt");
+
+	strcpy(key, CONFIG_DMCRYPTATBOOT_KEY);
+
+	memset(target_info, 0, 128);
+	snprintf((char *)target_info, 128, "%s %s 0 /dev/tomap 0",
+		 CONFIG_DMCRYPTATBOOT_CIPHER, key);
+	target_info[127] = 0;
+
+	ret = dm_ctl_ioctl(NULL, NULL, DM_TABLE_LOAD, (ulong)data);
+	if (ret < 0) {
+		printk("dm_ctl_ioctl table load failed\n");
+		goto end;
+	}
+
+	/* resume device */
+	memset(&dm, 0, sizeof (dm));
+	dm.version[0] = DM_VERSION_MAJOR;
+	dm.version[1] = DM_VERSION_MINOR;
+	dm.version[2] = DM_VERSION_PATCHLEVEL;
+	dm.data_size = sizeof (dm);
+	strcpy(dm.name, "root");
+
+	ret = dm_ctl_ioctl(NULL, NULL, DM_DEV_SUSPEND, (ulong)&dm);
+	if (ret < 0) {
+		printk("dm_ctl_ioctl resume failed\n");
+		goto end;
+	}
+
+	strcpy(saved_root_name, "/dev/dm-0");
+end:
+	return ret;
+}
+#endif
+
 /*
  * Prepare the namespace - decide what/where to mount, load ramdisks, etc.
  */
@@ -421,6 +541,16 @@
 
 	md_run_setup();
 
+#ifdef CONFIG_DMCRYPTATBOOT
+#ifdef CONFIG_DMCRYPTATBOOT_ONLY
+	if (dm_run_setup() != 0)
+		/* disallow other root= value */
+		saved_root_name[0] = 0;
+#else
+	dm_run_setup();
+#endif
+#endif
+
 	if (saved_root_name[0]) {
 		root_device_name = saved_root_name;
 		if (!strncmp(root_device_name, "mtd", 3)) {
diff -ruw linux-2.6.20.14/init/Kconfig linux-2.6.20.14-fbx/init/Kconfig
--- linux-2.6.20.14/init/Kconfig	2007-06-11 20:37:31.000000000 +0200
+++ linux-2.6.20.14-fbx/init/Kconfig	2010-12-29 19:30:08.821439310 +0100
@@ -91,6 +91,14 @@
 	  Note: This requires Perl, and a git repository, but not necessarily
 	  the git or cogito tools to be installed.
 
+config IGNORE_COMPILE_INFO
+	bool "Ignore non constant compile time info"
+	default n
+	help
+	  This options makes  linux binary invariant across successive
+	  compilation by disabling inclusions of variant compile time
+	  data (build user, build_hostname, ...).
+
 config SWAP
 	bool "Support for paging of anonymous memory (swap)"
 	depends on MMU && BLOCK
@@ -238,6 +246,31 @@
 	  This option enables access to the kernel configuration file
 	  through /proc/config.gz.
 
+config DMCRYPTATBOOT
+	bool "Create device-mapper crypt target before root"
+	depends on DM_CRYPT
+	default n
+
+config DMCRYPTATBOOT_DEVICE
+	string "Device major:minor"
+	depends on DMCRYPTATBOOT
+
+config DMCRYPTATBOOT_CIPHER
+	string "Cipher"
+	depends on DMCRYPTATBOOT
+
+config DMCRYPTATBOOT_KEY
+	string "Key"
+	depends on DMCRYPTATBOOT
+
+config DMCRYPTATBOOT_KEY_DECRYPT
+	string "Decryption key"
+	depends on DMCRYPTATBOOT
+
+config DMCRYPTATBOOT_ONLY
+	bool "Refuse to mount something else"
+	depends on DMCRYPTATBOOT
+
 config CPUSETS
 	bool "Cpuset support"
 	depends on SMP
diff -ruw linux-2.6.20.14/init/Makefile linux-2.6.20.14-fbx/init/Makefile
--- linux-2.6.20.14/init/Makefile	2007-06-11 20:37:31.000000000 +0200
+++ linux-2.6.20.14-fbx/init/Makefile	2011-09-09 16:10:02.080493458 +0200
@@ -10,8 +10,9 @@
 mounts-$(CONFIG_BLK_DEV_INITRD)	+= do_mounts_initrd.o
 mounts-$(CONFIG_BLK_DEV_MD)	+= do_mounts_md.o
 
+
 # files to be removed upon make clean
-clean-files := ../include/linux/compile.h
+clean-files := ../include/linux/compile.h dmcryptatboot_decrypt_6348.c
 
 # dependencies on generated files need to be listed explicitly
 
@@ -22,7 +23,10 @@
 # mkcompile_h will make sure to only update the
 # actual file if its content has changed.
 
+mkcompile-y := $(srctree)/scripts/mkcompile_h
+mkcompile-$(CONFIG_IGNORE_COMPILE_INFO) := $(srctree)/scripts/mkcompile_fixed_h
+
 include/linux/compile.h: FORCE
 	@echo '  CHK     $@'
-	$(Q)$(CONFIG_SHELL) $(srctree)/scripts/mkcompile_h $@ \
+	$(Q)$(CONFIG_SHELL) $(mkcompile-y) $@ \
 	"$(UTS_MACHINE)" "$(CONFIG_SMP)" "$(CONFIG_PREEMPT)" "$(CC) $(CFLAGS)"
diff -ruw linux-2.6.20.14/init/version.c linux-2.6.20.14-fbx/init/version.c
--- linux-2.6.20.14/init/version.c	2007-06-11 20:37:31.000000000 +0200
+++ linux-2.6.20.14-fbx/init/version.c	2010-12-29 19:30:08.821439310 +0100
@@ -34,6 +34,7 @@
 };
 EXPORT_SYMBOL_GPL(init_uts_ns);
 
+#ifndef CONFIG_IGNORE_COMPILE_INFO
 /* FIXED STRINGS! Don't touch! */
 const char linux_banner[] =
 	"Linux version " UTS_RELEASE " (" LINUX_COMPILE_BY "@"
@@ -43,3 +44,11 @@
 	"%s version %s"
 	" (" LINUX_COMPILE_BY "@" LINUX_COMPILE_HOST ")"
 	" (" LINUX_COMPILER ") %s\n";
+#else
+const char linux_banner[] =
+	"Linux version " UTS_RELEASE " (" LINUX_COMPILER ") (compile infos ignored)\n";
+
+const char linux_proc_banner[] =
+	"%s version %s"
+	" (compile infos ignored) %s\n";
+#endif
diff -ruw linux-2.6.20.14/kernel/printk.c linux-2.6.20.14-fbx/kernel/printk.c
--- linux-2.6.20.14/kernel/printk.c	2007-06-11 20:37:31.000000000 +0200
+++ linux-2.6.20.14-fbx/kernel/printk.c	2010-12-29 19:30:08.851442681 +0100
@@ -37,6 +37,10 @@
 
 #define __LOG_BUF_LEN	(1 << CONFIG_LOG_BUF_SHIFT)
 
+#ifdef CONFIG_DEBUG_LL
+extern void printascii(char *);
+#endif
+
 /* printk's without a loglevel use this.. */
 #define DEFAULT_MESSAGE_LOGLEVEL 4 /* KERN_WARNING */
 
@@ -163,6 +167,26 @@
 
 __setup("log_buf_len=", log_buf_len_setup);
 
+void console_emergency_dump(char *buf, int *len)
+{
+	int i, limit;
+
+	if (*len > log_buf_len)
+		*len = log_buf_len;
+	if (*len > logged_chars)
+		*len = logged_chars;
+	limit = log_end;
+
+	for (i = 0; i < *len; i++) {
+		int j;
+
+		j = limit - 1 -i;
+		if (j + log_buf_len < log_end)
+			break;
+		buf[*len - 1 - i] = LOG_BUF(j);
+	}
+}
+
 /*
  * Commands to do_syslog:
  *
@@ -537,6 +561,10 @@
 	/* Emit the output into the temporary buffer */
 	printed_len = vscnprintf(printk_buf, sizeof(printk_buf), fmt, args);
 
+#ifdef CONFIG_DEBUG_LL
+	printascii(printk_buf);
+#endif
+
 	/*
 	 * Copy the output into log_buf.  If the caller didn't provide
 	 * appropriate log level tags, we insert them here
diff -ruw linux-2.6.20.14/lib/Kconfig linux-2.6.20.14-fbx/lib/Kconfig
--- linux-2.6.20.14/lib/Kconfig	2007-06-11 20:37:31.000000000 +0200
+++ linux-2.6.20.14-fbx/lib/Kconfig	2010-12-29 19:30:08.851442681 +0100
@@ -106,4 +106,19 @@
 	depends on !UML
 	default y
 
+#
+# LZMA support for squashfs LZMA.
+#
+config SQLZMA_UNCOMP
+	bool "LZMA decompression support."
+	select ZLIB_INFLATE
+
 endmenu
+
+#
+# Freebox Serial info (selected by corresponding board's Kconfig)
+#
+config BUILTIN_FBXSERIAL
+	boolean
+	select CRC32
+
diff -ruw linux-2.6.20.14/lib/Makefile linux-2.6.20.14-fbx/lib/Makefile
--- linux-2.6.20.14/lib/Makefile	2007-06-11 20:37:31.000000000 +0200
+++ linux-2.6.20.14-fbx/lib/Makefile	2010-12-29 19:30:08.851442681 +0100
@@ -70,3 +70,6 @@
 
 $(obj)/crc32table.h: $(obj)/gen_crc32table
 	$(call cmd,crc32)
+
+obj-$(CONFIG_BUILTIN_FBXSERIAL) += builtin-fbxserial.o
+obj-$(CONFIG_SQLZMA_UNCOMP)	+= sqlzma-uncomp.o LzmaDecode.o
\ No newline at end of file
diff -ruw linux-2.6.20.14/Makefile linux-2.6.20.14-fbx/Makefile
--- linux-2.6.20.14/Makefile	2007-06-11 20:37:31.000000000 +0200
+++ linux-2.6.20.14-fbx/Makefile	2011-09-09 16:14:20.260347746 +0200
@@ -161,7 +161,8 @@
 SUBARCH := $(shell uname -m | sed -e s/i.86/i386/ -e s/sun4u/sparc64/ \
 				  -e s/arm.*/arm/ -e s/sa110/arm/ \
 				  -e s/s390x/s390/ -e s/parisc64/parisc/ \
-				  -e s/ppc.*/powerpc/ -e s/mips.*/mips/ )
+				  -e s/ppc.*/powerpc/ -e s/mips.*/mips/ \
+				  -e s/sh.*/sh/ )
 
 # Cross compiling and selecting different set of gcc/bin-utils
 # ---------------------------------------------------------------------------
@@ -182,6 +183,10 @@
 # Default value for CROSS_COMPILE is not to prefix executables
 # Note: Some architectures assign CROSS_COMPILE in their arch/*/Makefile
 
+ifeq ($(ARCH),)
+	ARCH = $(error ARCH is not defined)
+endif
+
 ARCH		?= $(SUBARCH)
 CROSS_COMPILE	?=
 
@@ -555,6 +560,7 @@
 ifeq ($(KBUILD_EXTMOD),)
 core-y		+= kernel/ mm/ fs/ ipc/ security/ crypto/ block/
 
+# Start Freebox added code
 vmlinux-dirs	:= $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
 		     $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
 		     $(net-y) $(net-m) $(libs-y) $(libs-m)))
@@ -563,6 +569,7 @@
 		     $(init-n) $(init-) \
 		     $(core-n) $(core-) $(drivers-n) $(drivers-) \
 		     $(net-n)  $(net-)  $(libs-n)    $(libs-))))
+# End Freebox added code
 
 init-y		:= $(patsubst %/, %/built-in.o, $(init-y))
 core-y		:= $(patsubst %/, %/built-in.o, $(core-y))
@@ -600,7 +607,9 @@
 # System.map is generated to document addresses of all kernel symbols
 
 vmlinux-init := $(head-y) $(init-y)
+# Begin Freebox added code
 vmlinux-main := $(core-y) $(libs-y) $(drivers-y) $(net-y)
+# End Freebox added code
 vmlinux-all  := $(vmlinux-init) $(vmlinux-main)
 vmlinux-lds  := arch/$(ARCH)/kernel/vmlinux.lds
 
diff -ruw linux-2.6.20.14/mm/slab.c linux-2.6.20.14-fbx/mm/slab.c
--- linux-2.6.20.14/mm/slab.c	2007-06-11 20:37:31.000000000 +0200
+++ linux-2.6.20.14-fbx/mm/slab.c	2010-12-29 19:30:08.871437089 +0100
@@ -565,9 +565,6 @@
 #if defined(CONFIG_LARGE_ALLOCS)
 #define	MAX_OBJ_ORDER	13	/* up to 32Mb */
 #define	MAX_GFP_ORDER	13	/* up to 32Mb */
-#elif defined(CONFIG_MMU)
-#define	MAX_OBJ_ORDER	5	/* 32 pages */
-#define	MAX_GFP_ORDER	5	/* 32 pages */
 #else
 #define	MAX_OBJ_ORDER	8	/* up to 1Mb */
 #define	MAX_GFP_ORDER	8	/* up to 1Mb */
diff -ruw linux-2.6.20.14/net/8021q/vlan_dev.c linux-2.6.20.14-fbx/net/8021q/vlan_dev.c
--- linux-2.6.20.14/net/8021q/vlan_dev.c	2007-06-11 20:37:31.000000000 +0200
+++ linux-2.6.20.14-fbx/net/8021q/vlan_dev.c	2010-12-29 19:30:08.871437089 +0100
@@ -766,6 +766,7 @@
 	struct dev_mc_list *dmi = dev->mc_list;
 
 	while (dmi) {
+#ifdef VLAN_DEBUG
 		printk(KERN_DEBUG "%s: del %.2x:%.2x:%.2x:%.2x:%.2x:%.2x mcast address from vlan interface\n",
 		       dev->name,
 		       dmi->dmi_addr[0],
@@ -774,6 +775,7 @@
 		       dmi->dmi_addr[3],
 		       dmi->dmi_addr[4],
 		       dmi->dmi_addr[5]);
+#endif
 		dev_mc_delete(dev, dmi->dmi_addr, dmi->dmi_addrlen, 0);
 		dmi = dev->mc_list;
 	}
@@ -856,6 +858,7 @@
 		for (dmi = vlan_dev->mc_list; dmi != NULL; dmi = dmi->next) {
 			if (vlan_should_add_mc(dmi, VLAN_DEV_INFO(vlan_dev)->old_mc_list)) {
 				dev_mc_add(real_dev, dmi->dmi_addr, dmi->dmi_addrlen, 0);
+#ifdef VLAN_DEBUG
 				printk(KERN_DEBUG "%s: add %.2x:%.2x:%.2x:%.2x:%.2x:%.2x mcast address to master interface\n",
 				       vlan_dev->name,
 				       dmi->dmi_addr[0],
@@ -864,6 +867,7 @@
 				       dmi->dmi_addr[3],
 				       dmi->dmi_addr[4],
 				       dmi->dmi_addr[5]);
+#endif
 			}
 		}
 
@@ -874,6 +878,7 @@
 				 * delete it from the real list on the underlying device.
 				 */
 				dev_mc_delete(real_dev, dmi->dmi_addr, dmi->dmi_addrlen, 0);
+#ifdef VLAN_DEBUG
 				printk(KERN_DEBUG "%s: del %.2x:%.2x:%.2x:%.2x:%.2x:%.2x mcast address from master interface\n",
 				       vlan_dev->name,
 				       dmi->dmi_addr[0],
@@ -882,6 +887,7 @@
 				       dmi->dmi_addr[3],
 				       dmi->dmi_addr[4],
 				       dmi->dmi_addr[5]);
+#endif
 			}
 		}
 
diff -ruw linux-2.6.20.14/net/bridge/br_device.c linux-2.6.20.14-fbx/net/bridge/br_device.c
--- linux-2.6.20.14/net/bridge/br_device.c	2007-06-11 20:37:31.000000000 +0200
+++ linux-2.6.20.14-fbx/net/bridge/br_device.c	2010-12-29 19:30:08.881451405 +0100
@@ -90,20 +90,17 @@
 {
 	struct net_bridge *br = netdev_priv(dev);
 	struct sockaddr *addr = p;
-	struct net_bridge_port *port;
-	int err = -EADDRNOTAVAIL;
+
+	if (!is_valid_ether_addr(addr->sa_data))
+		return -EINVAL;
 
 	spin_lock_bh(&br->lock);
-	list_for_each_entry(port, &br->port_list, list) {
-		if (!compare_ether_addr(port->dev->dev_addr, addr->sa_data)) {
+	memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
 			br_stp_change_bridge_id(br, addr->sa_data);
-			err = 0;
-			break;
-		}
-	}
+	br->flags |= BR_SET_MAC_ADDR;
 	spin_unlock_bh(&br->lock);
 
-	return err;
+	return 0;
 }
 
 static void br_getinfo(struct net_device *dev, struct ethtool_drvinfo *info)
diff -ruw linux-2.6.20.14/net/bridge/br_private.h linux-2.6.20.14-fbx/net/bridge/br_private.h
--- linux-2.6.20.14/net/bridge/br_private.h	2007-06-11 20:37:31.000000000 +0200
+++ linux-2.6.20.14-fbx/net/bridge/br_private.h	2010-12-29 19:30:08.881451405 +0100
@@ -96,6 +96,8 @@
 	struct hlist_head		hash[BR_HASH_SIZE];
 	struct list_head		age_list;
 	unsigned long			feature_mask;
+	unsigned long			flags;
+#define BR_SET_MAC_ADDR			0x00000001
 
 	/* STP */
 	bridge_id			designated_root;
diff -ruw linux-2.6.20.14/net/bridge/br_stp_if.c linux-2.6.20.14-fbx/net/bridge/br_stp_if.c
--- linux-2.6.20.14/net/bridge/br_stp_if.c	2007-06-11 20:37:31.000000000 +0200
+++ linux-2.6.20.14-fbx/net/bridge/br_stp_if.c	2010-12-29 19:30:08.881451405 +0100
@@ -159,6 +159,10 @@
 	const unsigned char *addr = br_mac_zero;
 	struct net_bridge_port *p;
 
+	/* user has chosen a value so keep it */
+	if (br->flags & BR_SET_MAC_ADDR)
+		return;
+
 	list_for_each_entry(p, &br->port_list, list) {
 		if (addr == br_mac_zero ||
 		    memcmp(p->dev->dev_addr, addr, ETH_ALEN) < 0)
diff -ruw linux-2.6.20.14/net/core/dev.c linux-2.6.20.14-fbx/net/core/dev.c
--- linux-2.6.20.14/net/core/dev.c	2007-06-11 20:37:31.000000000 +0200
+++ linux-2.6.20.14-fbx/net/core/dev.c	2010-12-29 19:30:08.891447448 +0100
@@ -110,6 +110,7 @@
 #include <linux/rcupdate.h>
 #include <linux/delay.h>
 #include <linux/wireless.h>
+#include <linux/kthread.h>
 #include <net/iw_handler.h>
 #include <asm/current.h>
 #include <linux/audit.h>
@@ -176,6 +177,18 @@
  */
 struct net_device *dev_base;
 static struct net_device **dev_tail = &dev_base;
+
+#ifdef CONFIG_NETRXTHREAD
+
+#define RXTHREAD_MAX_PROCESS	CONFIG_NETRXTHREAD_MAX_PROCESS
+#define RXTHREAD_MAX_PKTS	128
+
+static struct task_struct *krxd;
+static struct sk_buff_head krxd_pkt_queue[CONFIG_NETRXTHREAD_RX_QUEUE];
+static wait_queue_head_t krxd_wq;
+static unsigned int krxd_pkts_count;
+#endif
+
 DEFINE_RWLOCK(dev_base_lock);
 
 EXPORT_SYMBOL(dev_base);
@@ -1553,6 +1566,23 @@
 DEFINE_PER_CPU(struct netif_rx_stats, netdev_rx_stat) = { 0, };
 
 
+/* Start Freebox added code */
+#if defined(CONFIG_FREEBOX_DIVERTER) || defined(CONFIG_FREEBOX_DIVERTER_MODULE)
+int (*fbxdiverter_hook)(struct sk_buff *);
+
+static int handle_fbxdiverter(struct sk_buff *skb)
+{
+	/* try_module_get  is missing  here,  so there  is  a race  on
+	 * fbxdiverter module deletion */
+	if (!fbxdiverter_hook)
+		return 0;
+
+	return fbxdiverter_hook(skb);
+}
+#endif
+/* End Freebox added code */
+
+
 /**
  *	netif_rx	-	post buffer to the network code
  *	@skb: buffer to post
@@ -1723,6 +1753,73 @@
 #define handle_bridge(skb, pt_prev, ret, orig_dev)	(0)
 #endif
 
+
+/* Start Freebox added code */
+#if defined(CONFIG_FREEBOX_BRIDGE) || defined(CONFIG_FREEBOX_BRIDGE_MODULE)
+int (*fbxbridge_handle_frame_hook)(struct fbxbridge *p, struct sk_buff *skb);
+
+struct fbxbridge;
+
+static __inline__ int handle_fbxbridge(struct sk_buff *skb,
+				       struct packet_type **pt_prev, int *ret,
+				       struct net_device *orig_dev)
+{
+	struct fbxbridge *fbxbr;
+
+	if (skb->pkt_type == PACKET_LOOPBACK ||
+	    (fbxbr = skb->dev->fbx_bridge_port) == NULL)
+		return 0;
+
+	if (skb->protocol != __constant_htons(ETH_P_IP) &&
+	    skb->protocol != __constant_htons(ETH_P_ARP))
+		return 0;
+
+	if (*pt_prev) {
+		*ret = deliver_skb(skb, *pt_prev, orig_dev);
+		*pt_prev = NULL;
+	}
+
+	return fbxbridge_handle_frame_hook(fbxbr, skb);
+}
+#else
+#define handle_fbxbridge(skb, pt_prev, ret, orig_dev)	(0)
+#endif
+/* End Freebox added code */
+
+/* Start Freebox added code */
+#if defined(CONFIG_FREEBOX_L2BR) || defined(CONFIG_FREEBOX_L2BR_MODULE)
+struct fbxl2br_port;
+int (*fbxl2br_handle_frame_hook)(struct fbxl2br_port *p, struct sk_buff *skb);
+
+
+static __inline__ int handle_fbxl2br(struct sk_buff *skb,
+				       struct packet_type **pt_prev, int *ret,
+				       struct net_device *orig_dev)
+{
+	struct fbxl2br_port *port;
+
+	if (skb->pkt_type == PACKET_LOOPBACK ||
+	    (port = skb->dev->fbx_l2br_port) == NULL)
+		return 0;
+
+	if (skb->protocol != __constant_htons(ETH_P_IP) &&
+	    skb->protocol != __constant_htons(ETH_P_ARP) &&
+	    skb->protocol != __constant_htons(ETH_P_IPV6))
+		return 0;
+
+	if (*pt_prev) {
+		*ret = deliver_skb(skb, *pt_prev, orig_dev);
+		*pt_prev = NULL;
+	}
+
+	return fbxl2br_handle_frame_hook(port, skb);
+}
+#else
+#define handle_fbxl2br(skb, pt_prev, ret, orig_dev)	(0)
+#endif
+/* End Freebox added code */
+
+
 #ifdef CONFIG_NET_CLS_ACT
 /* TODO: Maybe we should just force sch_ingress to be compiled in
  * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions
@@ -1761,23 +1858,13 @@
 }
 #endif
 
-int netif_receive_skb(struct sk_buff *skb)
+static int netif_receive_skb_end(struct sk_buff *skb)
 {
 	struct packet_type *ptype, *pt_prev;
 	struct net_device *orig_dev;
 	int ret = NET_RX_DROP;
 	__be16 type;
 
-	/* if we've gotten here through NAPI, check netpoll */
-	if (skb->dev->poll && netpoll_rx(skb))
-		return NET_RX_DROP;
-
-	if (!skb->tstamp.off_sec)
-		net_timestamp(skb);
-
-	if (!skb->iif)
-		skb->iif = skb->dev->ifindex;
-
 	orig_dev = skb_bond(skb);
 
 	if (!orig_dev)
@@ -1785,9 +1872,6 @@
 
 	__get_cpu_var(netdev_rx_stat).total++;
 
-	skb->h.raw = skb->nh.raw = skb->data;
-	skb->mac_len = skb->nh.raw - skb->mac.raw;
-
 	pt_prev = NULL;
 
 	rcu_read_lock();
@@ -1807,6 +1891,20 @@
 		}
 	}
 
+
+	/* Start Freebox added code */
+	if (handle_fbxbridge(skb, &pt_prev, &ret, orig_dev))
+		goto out;
+	/* End Freebox added code */
+
+	/* Start Freebox added code */
+	if (handle_fbxl2br(skb, &pt_prev, &ret, orig_dev))
+		goto out;
+	/* End Freebox added code */
+
+
+
+
 #ifdef CONFIG_NET_CLS_ACT
 	if (pt_prev) {
 		ret = deliver_skb(skb, pt_prev, orig_dev);
@@ -1852,6 +1950,118 @@
 out:
 	rcu_read_unlock();
 	return ret;
+
+}
+
+#ifdef CONFIG_NETRXTHREAD
+
+static int krxd_action(void *unused)
+{
+	struct sk_buff *skb;
+	unsigned int maxpkt_in_loop;
+
+	set_user_nice(current, -5);
+	current->flags |= PF_NOFREEZE;
+	__set_current_state(TASK_RUNNING);
+
+	maxpkt_in_loop = RXTHREAD_MAX_PROCESS;
+	while (1) {
+		unsigned int i, queue, count;
+
+		local_bh_disable();
+		count = CONFIG_NETRXTHREAD_RX_QUEUE;
+		for (i = 0; i < count; i++) {
+			queue = count - i - 1;
+			skb = skb_dequeue(&krxd_pkt_queue[queue]);
+			if (!skb)
+				continue;
+			krxd_pkts_count--;
+			break;
+		}
+
+		if (!skb) {
+			local_bh_enable();
+			wait_event_interruptible(krxd_wq,
+						 krxd_pkts_count != 0);
+			set_current_state(TASK_RUNNING);
+			maxpkt_in_loop = RXTHREAD_MAX_PROCESS;
+			continue;
+		}
+
+		netif_receive_skb_end(skb);
+		local_bh_enable();
+
+		/* only schedule when working on lowest prio queue */
+		if (queue == 0) {
+			if (--maxpkt_in_loop == 0) {
+				maxpkt_in_loop = RXTHREAD_MAX_PROCESS;
+				schedule();
+			}
+		}
+	}
+
+	return 0;
+}
+#endif
+
+int netif_receive_skb(struct sk_buff *skb)
+{
+#ifdef CONFIG_NETRXTHREAD
+	unsigned int len, queue;
+#endif
+
+#if defined(CONFIG_FREEBOX_MVDSA) || defined(CONFIG_FREEBOX_MVDSA_MODULE)
+	/* if device has dsa tag enabled, remove it */
+	if (skb->dev->fbxmvdsa_rx_fix) {
+		if (!skb->dsa_done && skb->dev->fbxmvdsa_rx_fix(skb))
+			return NET_RX_DROP;
+
+		/* unmark dsa done since we use it on tx path */
+		skb->dsa_done = 0;
+	}
+#endif
+
+	/* if we've gotten here through NAPI, check netpoll */
+	if (skb->dev->poll && netpoll_rx(skb))
+		return NET_RX_DROP;
+
+	if (!skb->tstamp.off_sec)
+		net_timestamp(skb);
+
+	if (!skb->iif)
+		skb->iif = skb->dev->ifindex;
+
+	skb->h.raw = skb->nh.raw = skb->data;
+	skb->mac_len = skb->nh.raw - skb->mac.raw;
+
+/* Start Freebox added code */
+#if defined(CONFIG_FREEBOX_DIVERTER) || defined(CONFIG_FREEBOX_DIVERTER_MODULE)
+	if (handle_fbxdiverter(skb))
+		return NET_RX_SUCCESS;
+#endif
+/* End Freebox added code */
+
+#ifndef CONFIG_NETRXTHREAD
+	return netif_receive_skb_end(skb);
+#else
+	queue = skb->rx_class;
+	if (queue >= CONFIG_NETRXTHREAD_RX_QUEUE)
+		queue = CONFIG_NETRXTHREAD_RX_QUEUE - 1;
+
+	/* queue the packet to the rx thread */
+	local_bh_disable();
+	len = skb_queue_len(&krxd_pkt_queue[queue]);
+	if (len < RXTHREAD_MAX_PKTS) {
+		__skb_queue_tail(&krxd_pkt_queue[queue], skb);
+		krxd_pkts_count++;
+		if (!len)
+			wake_up(&krxd_wq);
+	} else {
+		dev_kfree_skb(skb);
+	}
+	local_bh_enable();
+	return NET_RX_SUCCESS;
+#endif
 }
 
 static int process_backlog(struct net_device *backlog_dev, int *budget)
@@ -3521,6 +3731,19 @@
 	open_softirq(NET_TX_SOFTIRQ, net_tx_action, NULL);
 	open_softirq(NET_RX_SOFTIRQ, net_rx_action, NULL);
 
+#ifdef CONFIG_NETRXTHREAD
+	for (i = 0; i < CONFIG_NETRXTHREAD_RX_QUEUE; i++)
+		skb_queue_head_init(&krxd_pkt_queue[i]);
+	krxd_pkts_count = 0;
+	init_waitqueue_head(&krxd_wq);
+	krxd = kthread_create(krxd_action, NULL, "krxthread");
+	if (IS_ERR(krxd)) {
+		printk(KERN_ERR "unable to create krxd\n");
+		return -ENOMEM;
+	}
+	wake_up_process(krxd);
+#endif
+
 	hotcpu_notifier(dev_cpu_callback, 0);
 	dst_init();
 	dev_mcast_init();
@@ -3566,12 +3789,30 @@
 EXPORT_SYMBOL(net_disable_timestamp);
 EXPORT_SYMBOL(dev_get_flags);
 
+/* Start Freebox added code */
+#if defined(CONFIG_FREEBOX_DIVERTER_MODULE)
+EXPORT_SYMBOL(fbxdiverter_hook);
+#endif
+/* End Freebox added code */
+
 #if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
 EXPORT_SYMBOL(br_handle_frame_hook);
 EXPORT_SYMBOL(br_fdb_get_hook);
 EXPORT_SYMBOL(br_fdb_put_hook);
 #endif
 
+/* Start Freebox added code */
+#if defined(CONFIG_FREEBOX_BRIDGE_MODULE)
+EXPORT_SYMBOL(fbxbridge_handle_frame_hook);
+#endif
+/* End Freebox added code */
+
+/* Start Freebox added code */
+#if defined(CONFIG_FREEBOX_L2BR_MODULE)
+EXPORT_SYMBOL(fbxl2br_handle_frame_hook);
+#endif
+/* End Freebox added code */
+
 #ifdef CONFIG_KMOD
 EXPORT_SYMBOL(dev_load);
 #endif
diff -ruw linux-2.6.20.14/net/core/skbuff.c linux-2.6.20.14-fbx/net/core/skbuff.c
--- linux-2.6.20.14/net/core/skbuff.c	2007-06-11 20:37:31.000000000 +0200
+++ linux-2.6.20.14-fbx/net/core/skbuff.c	2010-12-29 19:30:08.891447448 +0100
@@ -244,6 +244,7 @@
 	skb_shinfo(skb)->gso_segs = 0;
 	skb_shinfo(skb)->gso_type = 0;
 	skb_shinfo(skb)->frag_list = NULL;
+
 out:
 	return skb;
 nodata:
@@ -305,7 +306,20 @@
 		skb_get(list);
 }
 
+#ifdef CONFIG_SKB_RECYCLE
+void kfree_recycled_skbmem(struct sk_buff *skb)
+{
+	kfree(skb->head);
+	kmem_cache_free(skbuff_head_cache, skb);
+}
+
+EXPORT_SYMBOL(kfree_recycled_skbmem);
+
+static void skb_release_data(struct sk_buff *skb, int may_recycle,
+			     int *recycle_verdict)
+#else
 static void skb_release_data(struct sk_buff *skb)
+#endif
 {
 	if (!skb->cloned ||
 	    !atomic_sub_return(skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1,
@@ -319,6 +333,15 @@
 		if (skb_shinfo(skb)->frag_list)
 			skb_drop_fraglist(skb);
 
+#ifdef CONFIG_SKB_RECYCLE
+		if (may_recycle) {
+			*recycle_verdict = skb->recycle(skb->recycle_data,
+							skb);
+			if (*recycle_verdict == 1)
+				return;
+		}
+#endif
+
 		kfree(skb->head);
 	}
 }
@@ -330,8 +353,20 @@
 {
 	struct sk_buff *other;
 	atomic_t *fclone_ref;
+#ifdef CONFIG_SKB_RECYCLE
+	int recycle_verdict;
 
+	if (skb->fclone == SKB_FCLONE_UNAVAILABLE && skb->recycle) {
+		recycle_verdict = 0;
+		skb_release_data(skb, 1, &recycle_verdict);
+		if (recycle_verdict)
+			return;
+	} else
+		skb_release_data(skb, 0, NULL);
+#else
 	skb_release_data(skb);
+#endif
+
 	switch (skb->fclone) {
 	case SKB_FCLONE_UNAVAILABLE:
 		kmem_cache_free(skbuff_head_cache, skb);
@@ -475,9 +510,13 @@
 #if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE)
 	C(ipvs_property);
 #endif
+#if defined(CONFIG_FREEBOX_MVDSA) || defined(CONFIG_FREEBOX_MVDSA_MODULE)
+	C(dsa_done);
+#endif
 	C(protocol);
 	n->destructor = NULL;
 	C(mark);
+	C(rx_class);
 #ifdef CONFIG_NETFILTER
 	C(nfct);
 	nf_conntrack_get(skb->nfct);
@@ -490,6 +529,9 @@
 	C(nf_bridge);
 	nf_bridge_get(skb->nf_bridge);
 #endif
+#ifdef CONFIG_IP_FFN
+	n->ffn_state = 0;
+#endif
 #endif /*CONFIG_NETFILTER*/
 #ifdef CONFIG_NET_SCHED
 	C(tc_index);
@@ -508,6 +550,10 @@
 	C(tail);
 	C(end);
 
+#ifdef CONFIG_SKB_RECYCLE
+	C(recycle);
+	C(recycle_data);
+#endif
 	atomic_inc(&(skb_shinfo(skb)->dataref));
 	skb->cloned = 1;
 
@@ -539,6 +585,7 @@
 	new->tstamp	= old->tstamp;
 	new->destructor = NULL;
 	new->mark	= old->mark;
+	new->rx_class	= old->rx_class;
 #ifdef CONFIG_NETFILTER
 	new->nfct	= old->nfct;
 	nf_conntrack_get(old->nfct);
@@ -547,9 +594,15 @@
 	new->nfct_reasm = old->nfct_reasm;
 	nf_conntrack_get_reasm(old->nfct_reasm);
 #endif
+#ifdef CONFIG_IP_FFN
+	new->ffn_state = 0;
+#endif
 #if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE)
 	new->ipvs_property = old->ipvs_property;
 #endif
+#if defined(CONFIG_FREEBOX_MVDSA) || defined(CONFIG_FREEBOX_MVDSA_MODULE)
+	new->dsa_done = old->dsa_done;
+#endif
 #ifdef CONFIG_BRIDGE_NETFILTER
 	new->nf_bridge	= old->nf_bridge;
 	nf_bridge_get(old->nf_bridge);
@@ -566,6 +619,9 @@
 	skb_shinfo(new)->gso_size = skb_shinfo(old)->gso_size;
 	skb_shinfo(new)->gso_segs = skb_shinfo(old)->gso_segs;
 	skb_shinfo(new)->gso_type = skb_shinfo(old)->gso_type;
+#ifdef CONFIG_SKB_RECYCLE
+	new->recycle = NULL;
+#endif
 }
 
 /**
@@ -711,7 +767,12 @@
 	if (skb_shinfo(skb)->frag_list)
 		skb_clone_fraglist(skb);
 
+#ifdef CONFIG_SKB_RECYCLE
+	skb_release_data(skb, 0, NULL);
+	skb->recycle = NULL;
+#else
 	skb_release_data(skb);
+#endif
 
 	off = (data + nhead) - skb->head;
 
diff -ruw linux-2.6.20.14/net/ipv4/inet_hashtables.c linux-2.6.20.14-fbx/net/ipv4/inet_hashtables.c
--- linux-2.6.20.14/net/ipv4/inet_hashtables.c	2007-06-11 20:37:31.000000000 +0200
+++ linux-2.6.20.14-fbx/net/ipv4/inet_hashtables.c	2010-12-29 19:30:08.911437411 +0100
@@ -61,6 +61,10 @@
 	inet_csk(sk)->icsk_bind_hash = tb;
 }
 
+#if defined(CONFIG_IP_NF_TPROXY) || defined (CONFIG_IP_NF_TPROXY_MODULE)
+void (*ip_tproxy_tcp_unhashed)(struct sock *sk, int proto) = NULL;
+#endif
+
 /*
  * Get rid of any references to a local port held by the given sock.
  */
@@ -70,6 +74,13 @@
 	struct inet_bind_hashbucket *head = &hashinfo->bhash[bhash];
 	struct inet_bind_bucket *tb;
 
+#if defined(CONFIG_IP_NF_TPROXY) || defined (CONFIG_IP_NF_TPROXY_MODULE)
+	/* ugly tproxy unassign hook
+         * FIXME: now DCCP also uses this code */
+	if (ip_tproxy_tcp_unhashed)
+		ip_tproxy_tcp_unhashed(sk, IPPROTO_TCP);
+#endif
+
 	spin_lock(&head->lock);
 	tb = inet_csk(sk)->icsk_bind_hash;
 	__sk_del_bind_node(sk);
diff -ruw linux-2.6.20.14/net/ipv4/inet_timewait_sock.c linux-2.6.20.14-fbx/net/ipv4/inet_timewait_sock.c
--- linux-2.6.20.14/net/ipv4/inet_timewait_sock.c	2007-06-11 20:37:31.000000000 +0200
+++ linux-2.6.20.14-fbx/net/ipv4/inet_timewait_sock.c	2010-12-29 19:30:08.911437411 +0100
@@ -30,6 +30,13 @@
 	sk_node_init(&tw->tw_node);
 	write_unlock(&ehead->lock);
 
+#if defined(CONFIG_IP_NF_TPROXY) || defined (CONFIG_IP_NF_TPROXY_MODULE)
+	/* ugly tproxy unassign hook
+	 * FIXME: now DCCP also uses this code */
+	if (ip_tproxy_tcp_unhashed)
+		ip_tproxy_tcp_unhashed((struct sock *)tw, IPPROTO_TCP);
+#endif
+
 	/* Disassociate with bind bucket. */
 	bhead = &hashinfo->bhash[inet_bhashfn(tw->tw_num, hashinfo->bhash_size)];
 	spin_lock(&bhead->lock);
diff -ruw linux-2.6.20.14/net/ipv4/ipconfig.c linux-2.6.20.14-fbx/net/ipv4/ipconfig.c
--- linux-2.6.20.14/net/ipv4/ipconfig.c	2007-06-11 20:37:31.000000000 +0200
+++ linux-2.6.20.14-fbx/net/ipv4/ipconfig.c	2010-12-29 19:30:08.911437411 +0100
@@ -622,6 +622,16 @@
 		e += sizeof(ic_req_params);
 	}
 
+	/* add vendor identifier */
+#ifdef CONFIG_IP_PNP_DHCP_IDENTIFIER
+	*e++ = 60;
+	*e++ = 6 + sizeof (CONFIG_IP_PNP_DHCP_IDENTIFIER);
+	memcpy(e, "linux-", 6);
+	e += 6;
+	memcpy(e, CONFIG_IP_PNP_DHCP_IDENTIFIER,
+	       sizeof (CONFIG_IP_PNP_DHCP_IDENTIFIER));
+	e+= sizeof (CONFIG_IP_PNP_DHCP_IDENTIFIER);
+#endif
 	*e++ = 255;	/* End of the list */
 }
 
diff -ruw linux-2.6.20.14/net/ipv4/ip_input.c linux-2.6.20.14-fbx/net/ipv4/ip_input.c
--- linux-2.6.20.14/net/ipv4/ip_input.c	2007-06-11 20:37:31.000000000 +0200
+++ linux-2.6.20.14-fbx/net/ipv4/ip_input.c	2010-12-29 19:30:08.911437411 +0100
@@ -428,6 +428,11 @@
 		goto drop;
 	}
 
+#ifdef CONFIG_IP_FFN
+	if (!ip_ffn_process(skb))
+		return NET_RX_SUCCESS;
+#endif
+
 	/* Remove any debris in the socket control block */
 	memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
 
diff -ruw linux-2.6.20.14/net/ipv4/ip_output.c linux-2.6.20.14-fbx/net/ipv4/ip_output.c
--- linux-2.6.20.14/net/ipv4/ip_output.c	2007-06-11 20:37:31.000000000 +0200
+++ linux-2.6.20.14-fbx/net/ipv4/ip_output.c	2010-12-29 19:30:08.911437411 +0100
@@ -153,6 +153,7 @@
 	ip_send_check(iph);
 
 	skb->priority = sk->sk_priority;
+	skb->rx_class = 1;
 
 	/* Send it out. */
 	return NF_HOOK(PF_INET, NF_IP_LOCAL_OUT, skb, NULL, rt->u.dst.dev,
@@ -182,6 +183,11 @@
 		skb = skb2;
 	}
 
+#ifdef CONFIG_IP_FFN
+	if (skb->ffn_state == 1)
+		ip_ffn_add(skb);
+#endif
+
 	if (dst->hh)
 		return neigh_hh_output(dst->hh, skb);
 	else if (dst->neighbour)
@@ -276,6 +282,11 @@
 	skb->dev = dev;
 	skb->protocol = htons(ETH_P_IP);
 
+#ifdef CONFIG_IP_FFN
+	if (skb->ffn_state == 2)
+		return ip_finish_output(skb);
+#endif
+
 	return NF_HOOK_COND(PF_INET, NF_IP_POST_ROUTING, skb, NULL, dev,
 		            ip_finish_output,
 			    !(IPCB(skb)->flags & IPSKB_REROUTED));
@@ -360,6 +371,7 @@
 	ip_send_check(iph);
 
 	skb->priority = sk->sk_priority;
+	skb->rx_class = 1;
 
 	return NF_HOOK(PF_INET, NF_IP_LOCAL_OUT, skb, NULL, rt->u.dst.dev,
 		       dst_output);
@@ -1255,6 +1267,7 @@
 
 	skb->priority = sk->sk_priority;
 	skb->dst = dst_clone(&rt->u.dst);
+	skb->rx_class = 1;
 
 	/* Netfilter gets whole the not fragmented skb. */
 	err = NF_HOOK(PF_INET, NF_IP_LOCAL_OUT, skb, NULL, 
@@ -1396,6 +1409,9 @@
 #if defined(CONFIG_IP_MULTICAST) && defined(CONFIG_PROC_FS)
 	igmp_mc_proc_init();
 #endif
+#ifdef CONFIG_IP_FFN
+	ip_ffn_init();
+#endif
 }
 
 EXPORT_SYMBOL(ip_generic_getfrag);
diff -ruw linux-2.6.20.14/net/ipv4/ip_sockglue.c linux-2.6.20.14-fbx/net/ipv4/ip_sockglue.c
--- linux-2.6.20.14/net/ipv4/ip_sockglue.c	2007-06-11 20:37:31.000000000 +0200
+++ linux-2.6.20.14-fbx/net/ipv4/ip_sockglue.c	2010-12-29 19:30:08.911437411 +0100
@@ -50,6 +50,7 @@
 #define IP_CMSG_RECVOPTS	8
 #define IP_CMSG_RETOPTS		16
 #define IP_CMSG_PASSSEC		32
+#define IP_CMSG_ORIGADDRS	64
 
 /*
  *	SOL_IP control messages.
@@ -127,6 +128,25 @@
 	security_release_secctx(secdata, seclen);
 }
 
+#if defined(CONFIG_IP_NF_TPROXY) || defined (CONFIG_IP_NF_TPROXY_MODULE)
+
+void ip_cmsg_recv_origaddrs(struct msghdr *msg, struct sk_buff *skb)
+{
+        struct in_origaddrs ioa;
+
+	/* don't return original addresses if they were not set by tproxy*/
+	if (IPCB(skb)->orig_dstaddr == 0 || IPCB(skb)->orig_dstport == 0)
+		return;
+
+        ioa.ioa_srcaddr.s_addr = IPCB(skb)->orig_srcaddr;
+        ioa.ioa_srcport = IPCB(skb)->orig_srcport;
+        ioa.ioa_dstaddr.s_addr = IPCB(skb)->orig_dstaddr;
+        ioa.ioa_dstport = IPCB(skb)->orig_dstport;
+
+        put_cmsg(msg, SOL_IP, IP_ORIGADDRS, sizeof(ioa), &ioa);
+}
+
+#endif
 
 void ip_cmsg_recv(struct msghdr *msg, struct sk_buff *skb)
 {
@@ -161,6 +181,13 @@
 
 	if (flags & 1)
 		ip_cmsg_recv_security(msg, skb);
+
+#if defined(CONFIG_IP_NF_TPROXY) || defined (CONFIG_IP_NF_TPROXY_MODULE)
+	if ((flags>>=1) == 0)
+		return;
+	if (flags & 1)
+		ip_cmsg_recv_origaddrs(msg, skb);
+#endif
 }
 
 int ip_cmsg_send(struct msghdr *msg, struct ipcm_cookie *ipc)
@@ -416,6 +443,9 @@
 			    (1<<IP_MTU_DISCOVER) | (1<<IP_RECVERR) | 
 			    (1<<IP_ROUTER_ALERT) | (1<<IP_FREEBIND) |
 			    (1<<IP_PASSSEC))) ||
+#if defined(CONFIG_IP_NF_TPROXY) || defined (CONFIG_IP_NF_TPROXY_MODULE)
+				optname == IP_RECVORIGADDRS ||
+#endif
 				optname == IP_MULTICAST_TTL || 
 				optname == IP_MULTICAST_LOOP) { 
 		if (optlen >= sizeof(int)) {
@@ -506,6 +536,14 @@
 			else
 				inet->cmsg_flags &= ~IP_CMSG_PASSSEC;
 			break;
+#if defined(CONFIG_IP_NF_TPROXY) || defined (CONFIG_IP_NF_TPROXY_MODULE)
+                case IP_RECVORIGADDRS:
+                        if (val)
+                                inet->cmsg_flags |= IP_CMSG_ORIGADDRS;
+                        else
+                                inet->cmsg_flags &= ~IP_CMSG_ORIGADDRS;
+                        break;
+#endif
 		case IP_TOS:	/* This sets both TOS and Precedence */
 			if (sk->sk_type == SOCK_STREAM) {
 				val &= ~3;
@@ -1014,6 +1052,11 @@
 		case IP_PASSSEC:
 			val = (inet->cmsg_flags & IP_CMSG_PASSSEC) != 0;
 			break;
+#if defined(CONFIG_IP_NF_TPROXY) || defined (CONFIG_IP_NF_TPROXY_MODULE)
+                case IP_RECVORIGADDRS:
+			val = (inet->cmsg_flags & IP_CMSG_ORIGADDRS) != 0;
+                        break;
+#endif
 		case IP_TOS:
 			val = inet->tos;
 			break;
diff -ruw linux-2.6.20.14/net/ipv4/Kconfig linux-2.6.20.14-fbx/net/ipv4/Kconfig
--- linux-2.6.20.14/net/ipv4/Kconfig	2007-06-11 20:37:31.000000000 +0200
+++ linux-2.6.20.14-fbx/net/ipv4/Kconfig	2010-12-29 19:30:08.901441381 +0100
@@ -14,6 +14,13 @@
 	  <file:Documentation/networking/multicast.txt>. For most people, it's
 	  safe to say N.
 
+config IP_FFN
+	bool "IP: Fast forwarding and NAT"
+	depends on NETFILTER
+	help
+	   Provide a fast path for established conntrack entries so that
+	   packets go out ASAP.
+
 config IP_ADVANCED_ROUTER
 	bool "IP: advanced router"
 	---help---
@@ -197,6 +204,11 @@
 	  must be operating on your network.  Read
 	  <file:Documentation/nfsroot.txt> for details.
 
+config IP_PNP_DHCP_IDENTIFIER
+	string "IP: DHCP vendor class identifier"
+	depends on IP_PNP_DHCP
+	default ""
+
 config IP_PNP_BOOTP
 	bool "IP: BOOTP support"
 	depends on IP_PNP
@@ -362,6 +374,11 @@
 
 	  If unsure, say N.
 
+config INET_XFRM_GC_THRESH
+	int "IP: xfrm garbage collect threshold"
+	depends on XFRM
+	default 1024
+
 config INET_AH
 	tristate "IP: AH transformation"
 	select XFRM
diff -ruw linux-2.6.20.14/net/ipv4/Makefile linux-2.6.20.14-fbx/net/ipv4/Makefile
--- linux-2.6.20.14/net/ipv4/Makefile	2007-06-11 20:37:31.000000000 +0200
+++ linux-2.6.20.14-fbx/net/ipv4/Makefile	2010-12-29 19:30:08.901441381 +0100
@@ -12,6 +12,7 @@
 	     arp.o icmp.o devinet.o af_inet.o  igmp.o \
 	     sysctl_net_ipv4.o fib_frontend.o fib_semantics.o
 
+obj-$(CONFIG_IP_FFN) += ip_ffn.o
 obj-$(CONFIG_IP_FIB_HASH) += fib_hash.o
 obj-$(CONFIG_IP_FIB_TRIE) += fib_trie.o
 obj-$(CONFIG_PROC_FS) += proc.o
diff -ruw linux-2.6.20.14/net/ipv4/netfilter/ip_conntrack_core.c linux-2.6.20.14-fbx/net/ipv4/netfilter/ip_conntrack_core.c
--- linux-2.6.20.14/net/ipv4/netfilter/ip_conntrack_core.c	2007-06-11 20:37:31.000000000 +0200
+++ linux-2.6.20.14-fbx/net/ipv4/netfilter/ip_conntrack_core.c	2010-12-29 19:30:08.911437411 +0100
@@ -59,6 +59,9 @@
 atomic_t ip_conntrack_count = ATOMIC_INIT(0);
 
 void (*ip_conntrack_destroyed)(struct ip_conntrack *conntrack) = NULL;
+#ifdef CONFIG_IP_NF_NAT_NRES
+void (*ip_conntrack_expect_destroyed)(struct ip_conntrack_expect *expect) = NULL;
+#endif
 LIST_HEAD(ip_conntrack_expect_list);
 struct ip_conntrack_protocol *ip_ct_protos[MAX_IP_CT_PROTO] __read_mostly;
 static LIST_HEAD(helpers);
@@ -72,6 +75,10 @@
 static LIST_HEAD(unconfirmed);
 static int ip_conntrack_vmalloc __read_mostly;
 
+#if defined(CONFIG_IP_NF_TPROXY) || defined (CONFIG_IP_NF_TPROXY_MODULE)
+void (*ip_conntrack_confirmed)(struct ip_conntrack *conntrack) = NULL;
+#endif
+
 static unsigned int ip_conntrack_next_id;
 static unsigned int ip_conntrack_expect_next_id;
 #ifdef CONFIG_IP_NF_CONNTRACK_EVENTS
@@ -202,6 +209,12 @@
 	list_del(&exp->list);
 	CONNTRACK_STAT_INC(expect_delete);
 	exp->master->expecting--;
+
+#ifdef CONFIG_IP_NF_NAT_NRES
+	if (ip_conntrack_expect_destroyed)
+		ip_conntrack_expect_destroyed(exp);
+#endif
+
 	ip_conntrack_expect_put(exp);
 }
 
@@ -297,6 +310,10 @@
 	ip_ct_remove_expectations(ct);
 }
 
+#ifdef CONFIG_IP_FFN
+extern void ip_ffn_ct_destroy(struct ip_conntrack *ct);
+#endif
+
 static void
 destroy_conntrack(struct nf_conntrack *nfct)
 {
@@ -311,6 +328,10 @@
 	ip_conntrack_event(IPCT_DESTROY, ct);
 	set_bit(IPS_DYING_BIT, &ct->status);
 
+#ifdef CONFIG_IP_FFN
+	ip_ffn_ct_destroy(ct);
+#endif
+
 	helper = ct->helper;
 	if (helper && helper->destroy)
 		helper->destroy(ct);
@@ -348,6 +369,50 @@
 	ip_conntrack_free(ct);
 }
 
+static void
+__destroy_conntrack(struct nf_conntrack *nfct)
+{
+	struct ip_conntrack *ct = (struct ip_conntrack *)nfct;
+	struct ip_conntrack_protocol *proto;
+
+	DEBUGP("destroy_conntrack(%p)\n", ct);
+	IP_NF_ASSERT(atomic_read(&nfct->use) == 0);
+	IP_NF_ASSERT(!timer_pending(&ct->timeout));
+
+	ip_conntrack_event(IPCT_DESTROY, ct);
+	set_bit(IPS_DYING_BIT, &ct->status);
+
+	/* To make sure we don't get any weird locking issues here:
+	 * destroy_conntrack() MUST NOT be called with a write lock
+	 * to ip_conntrack_lock!!! -HW */
+	proto = __ip_conntrack_proto_find(ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.protonum);
+	if (proto && proto->destroy)
+		proto->destroy(ct);
+
+	if (ip_conntrack_destroyed)
+		ip_conntrack_destroyed(ct);
+
+	/* Expectations will have been removed in clean_from_lists,
+	 * except TFTP can create an expectation on the first packet,
+	 * before connection is in the list, so we need to clean here,
+	 * too. */
+	ip_ct_remove_expectations(ct);
+
+	/* We overload first tuple to link into unconfirmed list. */
+	if (!is_confirmed(ct)) {
+		BUG_ON(list_empty(&ct->tuplehash[IP_CT_DIR_ORIGINAL].list));
+		list_del(&ct->tuplehash[IP_CT_DIR_ORIGINAL].list);
+	}
+
+	CONNTRACK_STAT_INC(delete);
+
+	if (ct->master)
+		ip_conntrack_put(ct->master);
+
+	DEBUGP("destroy_conntrack: returning ct=%p to slab\n", ct);
+	ip_conntrack_free(ct);
+}
+
 static void death_by_timeout(unsigned long ul_conntrack)
 {
 	struct ip_conntrack *ct = (void *)ul_conntrack;
@@ -361,6 +426,19 @@
 	ip_conntrack_put(ct);
 }
 
+void __death_by_timeout(unsigned long ul_conntrack)
+{
+	struct ip_conntrack *ct = (void *)ul_conntrack;
+
+	/* Inside lock so preempt is disabled on module removal path.
+	 * Otherwise we can get spurious warnings. */
+	CONNTRACK_STAT_INC(delete_list);
+	clean_from_lists(ct);
+
+	if (atomic_dec_and_test(&ct->ct_general.use))
+		__destroy_conntrack((struct nf_conntrack *)ct);
+}
+
 struct ip_conntrack_tuple_hash *
 __ip_conntrack_find(const struct ip_conntrack_tuple *tuple,
 		    const struct ip_conntrack *ignored_conntrack)
@@ -477,6 +555,13 @@
 	set_bit(IPS_CONFIRMED_BIT, &ct->status);
 	CONNTRACK_STAT_INC(insert);
 	write_unlock_bh(&ip_conntrack_lock);
+
+#if defined(CONFIG_IP_NF_TPROXY) || defined (CONFIG_IP_NF_TPROXY_MODULE)
+	/* Call confirmed hook */
+	if (ip_conntrack_confirmed)
+		ip_conntrack_confirmed(ct);
+#endif
+
 	if (ct->helper)
 		ip_conntrack_event_cache(IPCT_HELPER, *pskb);
 #ifdef CONFIG_IP_NF_NAT_NEEDED
@@ -611,8 +696,11 @@
 	module_put(p->me);
 }
 
+#define MAX_SECURE_CT	256
+
 struct ip_conntrack *ip_conntrack_alloc(struct ip_conntrack_tuple *orig,
-					struct ip_conntrack_tuple *repl)
+					struct ip_conntrack_tuple *repl,
+					int secure_pool)
 {
 	struct ip_conntrack *conntrack;
 
@@ -626,7 +714,16 @@
 
 	if (ip_conntrack_max
 	    && atomic_read(&ip_conntrack_count) > ip_conntrack_max) {
-		unsigned int hash = hash_conntrack(orig);
+		unsigned int hash;
+
+		/* Try allocation from secure pool */
+		if (secure_pool &&
+		    atomic_read(&ip_conntrack_count) <
+		    ip_conntrack_max + MAX_SECURE_CT)
+			goto ok;
+
+		hash = hash_conntrack(orig);
+
 		/* Try dropping from this hash chain. */
 		if (!early_drop(&ip_conntrack_hash[hash])) {
 			atomic_dec(&ip_conntrack_count);
@@ -637,7 +734,7 @@
 			return ERR_PTR(-ENOMEM);
 		}
 	}
-
+ok:
 	conntrack = kmem_cache_alloc(ip_conntrack_cachep, GFP_ATOMIC);
 	if (!conntrack) {
 		DEBUGP("Can't allocate conntrack.\n");
@@ -681,7 +778,7 @@
 		return NULL;
 	}
 
-	conntrack = ip_conntrack_alloc(tuple, &repl_tuple);
+	conntrack = ip_conntrack_alloc(tuple, &repl_tuple, skb->rx_class != 0);
 	if (conntrack == NULL || IS_ERR(conntrack))
 		return (struct ip_conntrack_tuple_hash *)conntrack;
 
@@ -936,6 +1033,9 @@
 	}
 	new->master = me;
 	atomic_set(&new->use, 1);
+#ifdef CONFIG_IP_NF_NAT_NRES
+	INIT_LIST_HEAD(&new->reserved_list);
+#endif
 	return new;
 }
 
diff -ruw linux-2.6.20.14/net/ipv4/netfilter/ip_conntrack_ftp.c linux-2.6.20.14-fbx/net/ipv4/netfilter/ip_conntrack_ftp.c
--- linux-2.6.20.14/net/ipv4/netfilter/ip_conntrack_ftp.c	2007-06-11 20:37:31.000000000 +0200
+++ linux-2.6.20.14-fbx/net/ipv4/netfilter/ip_conntrack_ftp.c	2010-12-29 19:30:08.911437411 +0100
@@ -19,6 +19,16 @@
 #include <linux/netfilter_ipv4/ip_conntrack_ftp.h>
 #include <linux/moduleparam.h>
 
+#if defined(CONFIG_FREEBOX_BRIDGE) || defined(CONFIG_FREEBOX_BRIDGE_MODULE)
+#include <fbxbridge.h>
+#include <linux/netfilter_ipv4/ip_nat_helper.h>
+#endif
+
+#if defined (CONFIG_FREEBOX_L2BR) || defined(CONFIG_FREEBOX_L2BR_MODULE)
+#include <fbxl2br.h>
+#include <linux/netfilter_ipv4/ip_nat_helper.h>
+#endif
+
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Rusty Russell <rusty@rustcorp.com.au>");
 MODULE_DESCRIPTION("ftp connection tracking helper");
@@ -319,6 +329,37 @@
 		return NF_ACCEPT;
 	}
 
+#if defined(CONFIG_FREEBOX_BRIDGE) || defined(CONFIG_FREEBOX_BRIDGE_MODULE)
+	if (!ct_ftp_info->is_fbxbridge && (*pskb)->dev->fbx_bridge) {
+		struct fbxbridge *fbxbr;
+
+		fbxbr = (*pskb)->dev->fbx_bridge;
+		ct_ftp_info->is_fbxbridge = 1;
+		ct_ftp_info->fbxbridge_remote = ntohl(fbxbr->br_remote_ipaddr);
+		ct_ftp_info->fbxbridge_wan = fbxbr->wan_ipaddr;
+	}
+#endif
+
+#if defined (CONFIG_FREEBOX_L2BR) || defined(CONFIG_FREEBOX_L2BR_MODULE)
+	if (!ct_ftp_info->is_fbxl2br && (*pskb)->dev->fbx_l2br) {
+		struct fbxl2br *br;
+		struct fbxl2br_client *c;
+		struct iphdr *ip;
+
+		br = (*pskb)->dev->fbx_l2br;
+		ip = (*pskb)->nh.iph;
+
+		c = fbxl2br_get_client_by_nat_addr(br, ip->daddr);
+		if (!c)
+			goto proceed;
+
+		ct_ftp_info->is_fbxl2br = 1;
+		ct_ftp_info->fbxl2br_remote = ntohl(c->nat_addr);
+		ct_ftp_info->fbxl2br_wan = c->client_ipaddr;
+	}
+ proceed:
+#endif
+
 	th = skb_header_pointer(*pskb, (*pskb)->nh.iph->ihl*4,
 				sizeof(_tcph), &_tcph);
 	if (th == NULL)
@@ -399,6 +440,90 @@
 	 * Doesn't matter unless NAT is happening.  */
 	exp->tuple.dst.ip = ct->tuplehash[!dir].tuple.dst.ip;
 
+#if defined(CONFIG_FREEBOX_BRIDGE) || defined(CONFIG_FREEBOX_BRIDGE_MODULE)
+	if (ct_ftp_info->is_fbxbridge &&
+	    search[dir][i].ftptype == IP_CT_FTP_PORT) {
+		unsigned long orig_ip_addr;
+		unsigned short orig_port;
+		char buffer[sizeof("nnn,nnn,nnn,nnn,nnn,nnn")];
+		unsigned int len;
+
+		/* kludge: if  we are here,  then this is a  local pkt
+		 * that has  gone through internal  fbxbridge snat.
+		 *
+		 * If we see a port  command, then we mangle packet to
+		 * change  ip  address  given  to  the  remote  bridge
+		 * address */
+
+		/* check  address  is  packet  is  the  one  fbxbridge
+		 * changed */
+		orig_ip_addr = htonl((array[0] << 24) | (array[1] << 16)
+				     | (array[2] << 8) | array[3]);
+		if (orig_ip_addr != ct_ftp_info->fbxbridge_wan)
+			goto donttouch;
+
+		/* now mangle the remote address */
+		orig_port = htons(array[4] << 8 | array[5]);
+		len = sprintf(buffer, "%u,%u,%u,%u,%u,%u",
+			      NIPQUAD(ct_ftp_info->fbxbridge_remote),
+			      orig_port >> 8 , orig_port & 0xFF);
+
+		ip_nat_mangle_tcp_packet(pskb, ct, ctinfo, matchoff,
+					 matchlen, buffer, len);
+
+		/* then adjust as if nothing happened */
+		matchlen = len;
+		array[0] = (ct_ftp_info->fbxbridge_remote >> 24) & 0xff;
+		array[1] = (ct_ftp_info->fbxbridge_remote >> 16) & 0xff;
+		array[2] = (ct_ftp_info->fbxbridge_remote >> 8) & 0xff;
+		array[3] = (ct_ftp_info->fbxbridge_remote) & 0xff;
+	}
+donttouch:
+
+#endif
+
+#if defined(CONFIG_FREEBOX_L2BR) || defined(CONFIG_FREEBOX_L2BR_MODULE)
+	if (ct_ftp_info->is_fbxl2br &&
+	    search[dir][i].ftptype == IP_CT_FTP_PORT) {
+		unsigned long orig_ip_addr;
+		unsigned short orig_port;
+		char buffer[sizeof("nnn,nnn,nnn,nnn,nnn,nnn")];
+		unsigned int len;
+
+		/* kludge: if  we are here,  then this is a  local pkt
+		 * that has  gone through internal  fbxl2br snat.
+		 *
+		 * If we see a port  command, then we mangle packet to
+		 * change  ip  address  given  to  the  remote  bridge
+		 * address */
+
+		/* check  address  is  packet  is  the  one  fbxl2br
+		 * changed */
+		orig_ip_addr = htonl((array[0] << 24) | (array[1] << 16)
+				     | (array[2] << 8) | array[3]);
+		if (orig_ip_addr != ct_ftp_info->fbxl2br_wan)
+			goto donttouch_l2br;
+
+		/* now mangle the remote address */
+		orig_port = htons(array[4] << 8 | array[5]);
+		len = sprintf(buffer, "%u,%u,%u,%u,%u,%u",
+			      NIPQUAD(ct_ftp_info->fbxl2br_remote),
+			      orig_port >> 8 , orig_port & 0xFF);
+
+		ip_nat_mangle_tcp_packet(pskb, ct, ctinfo, matchoff,
+					 matchlen, buffer, len);
+
+		/* then adjust as if nothing happened */
+		matchlen = len;
+		array[0] = (ct_ftp_info->fbxl2br_remote >> 24) & 0xff;
+		array[1] = (ct_ftp_info->fbxl2br_remote >> 16) & 0xff;
+		array[2] = (ct_ftp_info->fbxl2br_remote >> 8) & 0xff;
+		array[3] = (ct_ftp_info->fbxl2br_remote) & 0xff;
+	}
+donttouch_l2br:
+
+#endif
+
 	if (htonl((array[0] << 24) | (array[1] << 16) | (array[2] << 8) | array[3])
 	    != ct->tuplehash[dir].tuple.src.ip) {
 		/* Enrico Scholz's passive FTP to partially RNAT'd ftp
diff -ruw linux-2.6.20.14/net/ipv4/netfilter/ip_conntrack_proto_tcp.c linux-2.6.20.14-fbx/net/ipv4/netfilter/ip_conntrack_proto_tcp.c
--- linux-2.6.20.14/net/ipv4/netfilter/ip_conntrack_proto_tcp.c	2007-06-11 20:37:31.000000000 +0200
+++ linux-2.6.20.14-fbx/net/ipv4/netfilter/ip_conntrack_proto_tcp.c	2010-12-29 19:30:08.921500487 +0100
@@ -31,6 +31,7 @@
 
 #include <net/tcp.h>
 
+#include <linux/netfilter/nf_conntrack_common.h>
 #include <linux/netfilter_ipv4.h>
 #include <linux/netfilter_ipv4/ip_conntrack.h>
 #include <linux/netfilter_ipv4/ip_conntrack_protocol.h>
@@ -986,6 +987,15 @@
 					      NULL, "ip_ct_tcp: invalid SYN");
 			return -NF_ACCEPT;
 		}
+#if defined(CONFIG_IP_NF_TPROXY) || defined (CONFIG_IP_NF_TPROXY_MODULE)
+	case TCP_CONNTRACK_TIME_WAIT:
+		/* Set MAY_DELETE if NAT subsystem may drop connection when it is clashing */
+		if (test_bit(IPS_TPROXY_BIT, &conntrack->status)) {
+			DEBUGP(KERN_DEBUG "Marking TPROXY-related TIME_WAIT conntrack entry MAY_DELETE\n");
+			set_bit(IPS_MAY_DELETE_BIT, &conntrack->status);
+		}
+		break;
+#endif
 	case TCP_CONNTRACK_CLOSE:
 		if (index == TCP_RST_SET
 		    && ((test_bit(IPS_SEEN_REPLY_BIT, &conntrack->status)
@@ -1067,6 +1077,15 @@
 	return NF_ACCEPT;
 }
  
+#ifdef CONFIG_IP_FFN
+int external_tcp_packet(struct ip_conntrack *conntrack,
+			const struct sk_buff *skb,
+			enum ip_conntrack_info ctinfo)
+{
+	return tcp_packet(conntrack, skb, ctinfo);
+}
+#endif
+ 
 /* Called when a new connection for this protocol found. */
 static int tcp_new(struct ip_conntrack *conntrack,
 		   const struct sk_buff *skb)
diff -ruw linux-2.6.20.14/net/ipv4/netfilter/ip_conntrack_proto_udp.c linux-2.6.20.14-fbx/net/ipv4/netfilter/ip_conntrack_proto_udp.c
--- linux-2.6.20.14/net/ipv4/netfilter/ip_conntrack_proto_udp.c	2007-06-11 20:37:31.000000000 +0200
+++ linux-2.6.20.14-fbx/net/ipv4/netfilter/ip_conntrack_proto_udp.c	2010-12-29 19:30:08.921500487 +0100
@@ -81,6 +81,15 @@
 	return NF_ACCEPT;
 }
 
+#ifdef CONFIG_IP_FFN
+int external_udp_packet(struct ip_conntrack *conntrack,
+			const struct sk_buff *skb,
+			enum ip_conntrack_info ctinfo)
+{
+	return udp_packet(conntrack, skb, ctinfo);
+}
+#endif
+
 /* Called when a new connection for this protocol found. */
 static int udp_new(struct ip_conntrack *conntrack, const struct sk_buff *skb)
 {
diff -ruw linux-2.6.20.14/net/ipv4/netfilter/ip_conntrack_standalone.c linux-2.6.20.14-fbx/net/ipv4/netfilter/ip_conntrack_standalone.c
--- linux-2.6.20.14/net/ipv4/netfilter/ip_conntrack_standalone.c	2007-06-11 20:37:31.000000000 +0200
+++ linux-2.6.20.14-fbx/net/ipv4/netfilter/ip_conntrack_standalone.c	2010-12-29 19:30:08.921500487 +0100
@@ -913,6 +913,9 @@
 EXPORT_SYMBOL(invert_tuplepr);
 EXPORT_SYMBOL(ip_conntrack_alter_reply);
 EXPORT_SYMBOL(ip_conntrack_destroyed);
+#if defined(CONFIG_IP_NF_TPROXY) || defined (CONFIG_IP_NF_TPROXY_MODULE)
+EXPORT_SYMBOL_GPL(ip_conntrack_confirmed);
+#endif
 EXPORT_SYMBOL(need_conntrack);
 EXPORT_SYMBOL(ip_conntrack_helper_register);
 EXPORT_SYMBOL(ip_conntrack_helper_unregister);
@@ -925,6 +928,9 @@
 EXPORT_SYMBOL_GPL(ip_conntrack_expect_find_get);
 EXPORT_SYMBOL(ip_conntrack_expect_related);
 EXPORT_SYMBOL(ip_conntrack_unexpect_related);
+#ifdef CONFIG_IP_NF_NAT_NRES
+EXPORT_SYMBOL(ip_conntrack_expect_destroyed);
+#endif
 EXPORT_SYMBOL_GPL(ip_conntrack_expect_list);
 EXPORT_SYMBOL_GPL(ip_ct_unlink_expect);
 
@@ -961,3 +967,6 @@
 EXPORT_SYMBOL_GPL(ip_ct_port_tuple_to_nfattr);
 EXPORT_SYMBOL_GPL(ip_ct_port_nfattr_to_tuple);
 #endif
+#if defined(CONFIG_IP_NF_TPROXY) || defined(CONFIG_IP_NF_TPROXY_MODULE)
+EXPORT_SYMBOL_GPL(__death_by_timeout);
+#endif
diff -ruw linux-2.6.20.14/net/ipv4/netfilter/ip_nat_core.c linux-2.6.20.14-fbx/net/ipv4/netfilter/ip_nat_core.c
--- linux-2.6.20.14/net/ipv4/netfilter/ip_nat_core.c	2007-06-11 20:37:31.000000000 +0200
+++ linux-2.6.20.14-fbx/net/ipv4/netfilter/ip_nat_core.c	2010-12-29 19:30:08.921500487 +0100
@@ -14,6 +14,8 @@
 #include <linux/skbuff.h>
 #include <linux/netfilter_ipv4.h>
 #include <linux/vmalloc.h>
+#include <linux/slab.h>
+#include <linux/list.h>
 #include <net/checksum.h>
 #include <net/icmp.h>
 #include <net/ip.h>
@@ -22,6 +24,7 @@
 #include <linux/udp.h>
 #include <linux/jhash.h>
 
+#include <linux/netfilter/nf_conntrack_common.h>
 #include <linux/netfilter_ipv4/ip_conntrack.h>
 #include <linux/netfilter_ipv4/ip_conntrack_core.h>
 #include <linux/netfilter_ipv4/ip_conntrack_protocol.h>
@@ -38,10 +41,17 @@
 #endif
 
 DEFINE_RWLOCK(ip_nat_lock);
+EXPORT_SYMBOL_GPL(ip_nat_lock);
 
 /* Calculated at init based on memory size */
 static unsigned int ip_nat_htable_size;
 
+#ifdef CONFIG_IP_NF_NAT_NRES
+static kmem_cache_t *ip_nat_reserved_cachep;
+static atomic_t ip_nat_reserved_count;
+static struct list_head *natreserved;
+#endif
+
 static struct list_head *bysource;
 
 #define MAX_IP_NAT_PROTO 256
@@ -86,6 +96,19 @@
 			    tuple->dst.protonum, 0) % ip_nat_htable_size;
 }
 
+#ifdef CONFIG_IP_NF_NAT_NRES
+static inline unsigned int
+hash_nat_reserved(const struct ip_conntrack_manip *foreign,
+		  const struct ip_conntrack_manip *peer,
+		  const u_int16_t proto)
+{
+	return jhash_3words(foreign->ip,
+			    (proto << 16) + foreign->u.all,
+			    (peer ? (peer->ip + peer->u.all) : 0),
+			    0) % ip_nat_htable_size;
+}
+#endif
+
 /* Noone using conntrack by the time this called. */
 static void ip_nat_cleanup_conntrack(struct ip_conntrack *conn)
 {
@@ -97,10 +120,417 @@
 	write_unlock_bh(&ip_nat_lock);
 }
 
+static void __ip_nat_cleanup_conntrack(struct ip_conntrack *conn)
+{
+	if (!(conn->status & IPS_NAT_DONE_MASK))
+		return;
+
+	list_del(&conn->nat.info.bysource);
+}
+
+#ifdef CONFIG_IP_NF_NAT_NRES
+static inline int
+reserved_manip_cmp(const struct ip_nat_reserved *i,
+		   const struct ip_conntrack_manip *manip,
+		   const u_int16_t proto)
+{
+	DEBUGP("reserved_manip_cmp: manip proto %u %u.%u.%u.%u:%u, "
+	       "reservation proto %u %u.%u.%u.%u:%u\n peer %u.%u.%u.%u:%u\n",
+			proto, NIPQUAD(manip->ip), ntohs(manip->u.all),
+			i->proto, NIPQUAD(i->manip.ip), ntohs(i->manip.u.all),
+			NIPQUAD(i->peer.ip), ntohs(i->peer.u.all));
+	return (i->proto == proto &&
+		i->manip.ip == manip->ip && i->manip.u.all == manip->u.all);
+}
+
+static inline int
+reserved_manip_cmp_peer(const struct ip_nat_reserved *i,
+			const struct ip_conntrack_manip *manip,
+			const u_int16_t proto,
+			const struct ip_conntrack_manip *peer)
+{
+	DEBUGP("reserved_manip_cmp_peer: manip proto %u %u.%u.%u.%u:%u peer %u.%u.%u.%u:%u, "
+	       "reservation proto %u %u.%u.%u.%u:%u peer %u.%u.%u.%u:%u\n",
+	       proto, NIPQUAD(manip->ip), ntohs(manip->u.all),
+	       NIPQUAD(peer->ip), ntohs(peer->u.all),
+	       i->proto, NIPQUAD(i->manip.ip), ntohs(i->manip.u.all),
+	       NIPQUAD(i->peer.ip), ntohs(i->peer.u.all));
+
+	return (i->proto == proto &&
+		i->manip.ip == manip->ip && i->manip.u.all == manip->u.all &&
+		((i->peer.ip == 0) || (i->peer.ip == peer->ip && i->peer.u.all == peer->u.all)));
+}
+
+static inline int
+reserved_manip_cmp_peer_exact(const struct ip_nat_reserved *i,
+			      const struct ip_conntrack_manip *manip,
+			      const u_int16_t proto,
+			      const struct ip_conntrack_manip *peer)
+{
+	DEBUGP("reserved_manip_cmp_peer_exact: manip proto %u %u.%u.%u.%u:%u peer %u.%u.%u.%u:%u, "
+	       "reservation proto %u %u.%u.%u.%u:%u peer %u.%u.%u.%u:%u\n",
+	       proto, NIPQUAD(manip->ip), ntohs(manip->u.all),
+	       NIPQUAD(peer->ip), ntohs(peer->u.all),
+	       i->proto, NIPQUAD(i->manip.ip), ntohs(i->manip.u.all),
+	       NIPQUAD(i->peer.ip), ntohs(i->peer.u.all));
+
+	return (i->proto == proto &&
+		i->manip.ip == manip->ip && i->manip.u.all == manip->u.all &&
+		i->peer.ip == peer->ip && i->peer.u.all == peer->u.all);
+}
+
+/* Is this manip reserved?
+ * exact means full peer match is required, used for reservation deletion */
+static struct ip_nat_reserved *
+__ip_nat_reserved_find_manip(const struct ip_conntrack_manip *manip,
+			     const u_int16_t proto,
+			     const struct ip_conntrack_manip *peer,
+			     const int exact)
+{
+	struct ip_nat_reserved *i;
+	struct ip_nat_reserved *res = NULL;
+	unsigned int h = hash_nat_reserved(manip, peer, proto);
+
+	DEBUGP("__ip_nat_reserved_find_manip: find proto %u %u.%u.%u.%u:%u\n",
+			proto, NIPQUAD(manip->ip), ntohs(manip->u.all));
+
+	if (peer) {
+		if (exact)
+			list_for_each_entry(i, &natreserved[h], hash)
+				if (reserved_manip_cmp_peer_exact(i, manip, proto, peer)) {
+					res = i;
+					goto out;
+				}
+		else
+			list_for_each_entry(i, &natreserved[h], hash)
+				if (reserved_manip_cmp_peer(i, manip, proto, peer)) {
+					res = i;
+					goto out;
+				}
+	} else
+		list_for_each_entry(i, &natreserved[h], hash)
+			if (reserved_manip_cmp(i, manip, proto)) {
+				res = i;
+				goto out;
+			}
+
+out:
+	return res;
+}
+
+/* Is this tuple clashing with a reserved manip? */
+static struct ip_nat_reserved *
+__ip_nat_reserved_find_tuple(const struct ip_conntrack_tuple *tuple,
+			     enum ip_nat_manip_type maniptype)
+{
+	struct ip_conntrack_manip m = {.ip = tuple->dst.ip, .u = {.all = tuple->dst.u.all}};
+
+	if (maniptype == IP_NAT_MANIP_SRC) {
+		DEBUGP("__ip_nat_reserved_find_tuple: IP_NAT_MANIP_SRC search\n");
+		return __ip_nat_reserved_find_manip(&tuple->src, tuple->dst.protonum, &m, 0);
+	} else {
+		DEBUGP("__ip_nat_reserved_find_tuple: IP_NAT_MANIP_DST search\n");
+		return __ip_nat_reserved_find_manip(&m, tuple->dst.protonum, &tuple->src, 0);
+	}
+}
+
+static inline int
+clashing_ct_cmp(const struct ip_conntrack_tuple_hash *i, const void *data)
+{
+	const struct ip_conntrack_manip *m = (struct ip_conntrack_manip *) data;
+	const struct ip_conntrack_tuple *t = &i->tuple;
+
+	/* FIXME: every connection has two entries, we should check only the REPLY direction */
+
+	DEBUGP("clashing_ct_cmp: manip %u.%u.%u.%u:%u ct reply src %u.%u.%u.%u:%u dst %u.%u.%u.%u:%u\n",
+			NIPQUAD(m->ip), ntohs(m->u.all), NIPQUAD(t->src.ip), ntohs(t->src.u.all),
+			NIPQUAD(t->dst.ip), ntohs(t->dst.u.all));
+	return (((t->src.ip == m->ip) && (t->src.u.all == m->u.all)) ||
+		((t->dst.ip == m->ip) && (t->dst.u.all == m->u.all)));
+}
+
+/* Create a new reservation */
+struct ip_nat_reserved *
+__ip_nat_reserved_new_hash(const struct ip_conntrack_manip *manip,
+			   const u_int16_t proto,
+			   const struct ip_conntrack_manip *peer)
+{
+	struct ip_nat_reserved *res;
+	struct ip_conntrack_tuple_hash *h = NULL;
+	unsigned int hash;
+
+	DEBUGP("__ip_nat_reserved_new_hash: manip proto %u %u.%u.%u.%u:%u\n",
+			proto, NIPQUAD(manip->ip), ntohs(manip->u.all));
+
+	/* check if it's already reserved */
+	if (__ip_nat_reserved_find_manip(manip, proto, peer, 1)) {
+		DEBUGP("__ip_nat_reserved_new_hash: already reserved\n");
+		return NULL;
+	}
+
+	/* FIXME: check if a clashing connection exists... This is problematic,
+	 * since the final decision in ip_nat_used_tuple() is based on a full
+	 * tuple, but we only have a manip... =(:< */
+
+	/* Current solutuion: we provide two methods for checking:
+	 *   - Strong check: in this case, the conntrack table is scanned if an
+	 *     already existing connection uses the manip in its REPLY direction.
+	 *     if such a conntrack entry is found, the mapping fails. This check is
+	 *     extremely pessimistic, since it fails to register reservations which could
+	 *     happily coexist with current conntracks if the other side of the tuple is
+	 *     different...
+	 *   - Exact check: if the callee provides a peer manip, then an exact lookup
+	 *     can be made in the conntrack hash. This is a more fine-grained check.
+	 */
+
+	if (peer) {
+		/* Exact check */
+		struct ip_conntrack_tuple t = {.src = *peer,
+					       .dst = {.protonum = proto,
+						       .ip = manip->ip,
+						       .u = {.all = manip->u.all}}};
+		struct ip_conntrack *ctrack;
+
+		h = ip_conntrack_find_get(&t, NULL);
+
+#if defined(CONFIG_IP_NF_TPROXY) || defined (CONFIG_IP_NF_TPROXY_MODULE)
+		if ((h != NULL) &&
+		    (ctrack = tuplehash_to_ctrack(h)) &&
+		    test_bit(IPS_MAY_DELETE_BIT, &ctrack->status)) {
+			DEBUGP("Deleting old conntrack entry for NAT\n");
+			__ip_nat_cleanup_conntrack(ctrack);
+			ctrack->status &= ~IPS_NAT_DONE_MASK;
+			if (del_timer(&ctrack->timeout))
+				ctrack->timeout.function((unsigned long)ctrack);
+			ip_conntrack_put(ctrack);
+			h = NULL;
+		}
+#endif
+
+		if (h) {
+			DEBUGP("__ip_nat_reserved_new_hash: manip clashes with an already existing connection\n");
+			ip_conntrack_put(tuplehash_to_ctrack(h));
+			return NULL;
+		}
+	} else {
+		/* Strong check: we have only a manip, unfortunately we scan the whole conntrack
+		 * hash for possible clashing connections... */
+		unsigned int i;
+		int found = 0, repeat;
+		struct ip_conntrack *ctrack;
+
+		write_lock_bh(&ip_conntrack_lock);
+		for (i = 0; !found && i < ip_conntrack_htable_size; i++) {
+			do {
+				repeat = 0;
+				list_for_each_entry(h, &ip_conntrack_hash[i], list)
+					if (clashing_ct_cmp(h, manip)) {
+						found = 1;
+						break;
+					}
+				if (found &&
+				    (ctrack = tuplehash_to_ctrack(h)) &&
+				    (ctrack->status & IPS_MAY_DELETE)) {
+					DEBUGP("Deleting old conntrack entry for NAT\n");
+					__ip_nat_cleanup_conntrack(ctrack);
+					ctrack->status &= ~IPS_NAT_DONE_MASK;
+					if (del_timer(&ctrack->timeout))
+						__death_by_timeout((unsigned long)ctrack);
+					found = 0;
+					repeat = 1;
+				}
+			} while (repeat);
+		}
+		write_unlock_bh(&ip_conntrack_lock);
+
+		if (found) {
+			DEBUGP("__ip_nat_reserved_new_hash: manip clashes with an already existing connection\n");
+			return NULL;
+		}
+	}
+
+	/* else allocate a new structure */
+	res = kmem_cache_alloc(ip_nat_reserved_cachep, GFP_ATOMIC);
+	if (!res)
+		return NULL;
+
+	memset(res, 0, sizeof(*res));
+	res->proto = proto;
+	res->manip = *manip;
+	if (peer)
+		res->peer = *peer;
+
+	/* put it into the hash */
+	hash = hash_nat_reserved(manip, peer, proto);
+	atomic_inc(&ip_nat_reserved_count);
+	list_add(&res->hash, &natreserved[hash]);
+	DEBUGP("__ip_nat_reserved_new_hash: hashed manip proto %u %u.%u.%u.%u:%u\n",
+			proto, NIPQUAD(manip->ip), ntohs(manip->u.all));
+
+	return res;
+}
+EXPORT_SYMBOL_GPL(__ip_nat_reserved_new_hash);
+
+/* Register a new reservation */
+static int
+__ip_nat_reserved_register(struct ip_conntrack_expect *expect,
+			   const struct ip_conntrack_manip *manip,
+			   const u_int16_t proto,
+			   const struct ip_conntrack_manip *peer)
+{
+	struct ip_nat_reserved *res;
+
+	DEBUGP("__ip_nat_reserved_register: registering proto %u %u.%u.%u.%u:%u\n",
+			proto, NIPQUAD(manip->ip), ntohs(manip->u.all));
+
+	/* allocate and put into the hash */
+	res = __ip_nat_reserved_new_hash(manip, proto, peer);
+	if (!res)
+		return 0;
+
+	/* append to the per-expectation reserved list */
+	list_add_tail(&res->exp, &expect->reserved_list);
+
+	return 1;
+}
+
+int
+ip_nat_reserved_register(struct ip_conntrack_expect *expect,
+			 const struct ip_conntrack_manip *manip,
+			 const u_int16_t proto,
+			 const struct ip_conntrack_manip *peer)
+{
+	int ret;
+
+	write_lock_bh(&ip_nat_lock);
+
+	ret = __ip_nat_reserved_register(expect, manip, proto, peer);
+
+	write_unlock_bh(&ip_nat_lock);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(ip_nat_reserved_register);
+
+/* Unhash a reservation */
+struct ip_nat_reserved *
+__ip_nat_reserved_unhash(const struct ip_conntrack_manip *manip,
+		         const u_int16_t proto,
+			 const struct ip_conntrack_manip *peer)
+{
+	struct ip_nat_reserved *res;
+
+	DEBUGP("__ip_nat_reserved_unhash: unhashing proto %u %u.%u.%u.%u:%u\n",
+			proto, NIPQUAD(manip->ip), ntohs(manip->u.all));
+
+	/* check if it's really reserved */
+	if (!(res = __ip_nat_reserved_find_manip(manip, proto, peer, 1))) {
+		DEBUGP("__ip_nat_reserved_unhash: trying to unreg a nonexisting reservation\n");
+		return NULL;
+	}
+
+	/* delete from the hash table */
+	list_del(&res->hash);
+
+	atomic_dec(&ip_nat_reserved_count);
+
+	return res;
+}
+EXPORT_SYMBOL_GPL(__ip_nat_reserved_unhash);
+
+/* Return a reservation structure into the slab cache */
+void
+__ip_nat_reserved_free(struct ip_nat_reserved *res)
+{
+	kmem_cache_free(ip_nat_reserved_cachep, res);
+}
+EXPORT_SYMBOL_GPL(__ip_nat_reserved_free);
+
+/* Unregister a reservation */
+static int
+__ip_nat_reserved_unregister(struct ip_conntrack_expect *expect,
+			     const struct ip_conntrack_manip *manip,
+			     const u_int16_t proto,
+			     const struct ip_conntrack_manip *peer)
+{
+	struct ip_nat_reserved *res;
+
+	DEBUGP("__ip_nat_reserved_unregister: unregistering proto %u %u.%u.%u.%u:%u\n",
+			proto, NIPQUAD(manip->ip), ntohs(manip->u.all));
+
+	/* look up and unhash */
+	res = __ip_nat_reserved_unhash(manip, proto, peer);
+	if (!res)
+		return 0;
+
+	/* delete from the per-expectation list */
+	list_del(&res->exp);
+
+	/* free the structure */
+	__ip_nat_reserved_free(res);
+
+	return 1;
+}
+
+int
+ip_nat_reserved_unregister(struct ip_conntrack_expect *expect,
+			   const struct ip_conntrack_manip *manip,
+			   const u_int16_t proto,
+			   const struct ip_conntrack_manip *peer)
+{
+	int ret;
+
+	write_lock_bh(&ip_nat_lock);
+
+	ret = __ip_nat_reserved_unregister(expect, manip, proto, peer);
+
+	write_unlock_bh(&ip_nat_lock);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(ip_nat_reserved_unregister);
+
+/* Unregister all reservations for a given expectation */
+void
+ip_nat_reserved_unregister_all(struct ip_conntrack_expect *expect)
+{
+	struct list_head *i;
+	struct ip_nat_reserved *res;
+
+	DEBUGP("ip_nat_reserved_unregister_all: deleting all reservations for expectation %p\n",
+			expect);
+
+	write_lock_bh(&ip_nat_lock);
+
+	i = expect->reserved_list.next;
+	while (i != &expect->reserved_list) {
+		res = list_entry(i, struct ip_nat_reserved, exp);
+		i = i->next;
+
+		/* clear from lists */
+		list_del(&res->hash);
+		list_del(&res->exp);
+
+		kmem_cache_free(ip_nat_reserved_cachep, res);
+	}
+
+	write_unlock_bh(&ip_nat_lock);
+}
+EXPORT_SYMBOL_GPL(ip_nat_reserved_unregister_all);
+
+static void
+ip_nat_reserved_cleanup_expect(struct ip_conntrack_expect *expect)
+{
+	ip_nat_reserved_unregister_all(expect);
+}
+#endif /* CONFIG_IP_NF_NAT_NRES */
+
 /* Is this tuple already taken? (not by us) */
 int
 ip_nat_used_tuple(const struct ip_conntrack_tuple *tuple,
-		  const struct ip_conntrack *ignored_conntrack)
+		  const struct ip_conntrack *ignored_conntrack,
+		  const enum ip_nat_manip_type maniptype,
+		  const unsigned int flags)
 {
 	/* Conntrack tracking doesn't keep track of outgoing tuples; only
 	   incoming ones.  NAT means they don't have a fixed mapping,
@@ -108,9 +538,46 @@
 
 	   We could keep a separate hash if this proves too slow. */
 	struct ip_conntrack_tuple reply;
+	struct ip_conntrack_tuple_hash *h;
+	struct ip_conntrack *ctrack;
+#ifdef CONFIG_IP_NF_NAT_NRES
+	struct ip_nat_reserved *res;
 
+	/* check if the tuple is reserved if there are any reservations */
+	if (atomic_read(&ip_nat_reserved_count)) {
+		read_lock_bh(&ip_nat_lock);
+		res = __ip_nat_reserved_find_tuple(tuple, maniptype);
+		read_unlock_bh(&ip_nat_lock);
+
+		/* If we may not allocate reserved ports, return */
+		if (!(flags & IP_NAT_RANGE_USE_RESERVED) && res)
+			return 1;
+	}
+#endif
+
+	/* check if it's taken by an existing connection */
 	invert_tuplepr(&reply, tuple);
-	return ip_conntrack_tuple_taken(&reply, ignored_conntrack);
+	h = ip_conntrack_find_get(&reply, ignored_conntrack);
+
+#if defined(CONFIG_IP_NF_TPROXY) || defined (CONFIG_IP_NF_TPROXY_MODULE)
+	/* check if that conntrack is marked MAY_DELETE, if so, get rid of it... */
+	if ((h != NULL) &&
+	    (ctrack = tuplehash_to_ctrack(h)) &&
+	    test_bit(IPS_MAY_DELETE_BIT, &ctrack->status)) {
+		DEBUGP("Deleting old conntrack entry for NAT\n");
+		__ip_nat_cleanup_conntrack(ctrack);
+		ctrack->status &= ~IPS_NAT_DONE_MASK;
+		if (del_timer(&ctrack->timeout))
+			ctrack->timeout.function((unsigned long)ctrack);
+		ip_conntrack_put(tuplehash_to_ctrack(h));
+		h = NULL;
+	}
+#endif
+
+	if (h)
+		ip_conntrack_put(tuplehash_to_ctrack(h));
+
+	return h != NULL;
 }
 EXPORT_SYMBOL(ip_nat_used_tuple);
 
@@ -246,7 +713,7 @@
 	if (maniptype == IP_NAT_MANIP_SRC) {
 		if (find_appropriate_src(orig_tuple, tuple, range)) {
 			DEBUGP("get_unique_tuple: Found current src map\n");
-			if (!ip_nat_used_tuple(tuple, conntrack))
+			if (!ip_nat_used_tuple(tuple, conntrack, maniptype, range->flags))
 				return;
 		}
 	}
@@ -264,7 +731,7 @@
 	/* Only bother mapping if it's not already in range and unique */
 	if ((!(range->flags & IP_NAT_RANGE_PROTO_SPECIFIED)
 	     || proto->in_range(tuple, maniptype, &range->min, &range->max))
-	    && !ip_nat_used_tuple(tuple, conntrack)) {
+	    && !ip_nat_used_tuple(tuple, conntrack, maniptype, range->flags)) {
 		ip_nat_proto_put(proto);
 		return;
 	}
@@ -336,8 +803,8 @@
 EXPORT_SYMBOL(ip_nat_setup_info);
 
 /* Returns true if succeeded. */
-static int
-manip_pkt(u_int16_t proto,
+int
+ip_nat_manip_pkt(u_int16_t proto,
 	  struct sk_buff **pskb,
 	  unsigned int iphdroff,
 	  const struct ip_conntrack_tuple *target,
@@ -370,6 +837,7 @@
 	}
 	return 1;
 }
+EXPORT_SYMBOL_GPL(ip_nat_manip_pkt);
 
 /* Do packet manipulations according to ip_nat_setup_info. */
 unsigned int ip_nat_packet(struct ip_conntrack *ct,
@@ -397,7 +865,7 @@
 		/* We are aiming to look like inverse of other direction. */
 		invert_tuplepr(&target, &ct->tuplehash[!dir].tuple);
 
-		if (!manip_pkt(target.dst.protonum, pskb, 0, &target, mtype))
+		if (!ip_nat_manip_pkt(target.dst.protonum, pskb, 0, &target, mtype))
 			return NF_DROP;
 	}
 	return NF_ACCEPT;
@@ -460,7 +928,7 @@
 	   pass all hooks (locally-generated ICMP).  Consider incoming
 	   packet: PREROUTING (DST manip), routing produces ICMP, goes
 	   through POSTROUTING (which must correct the DST manip). */
-	if (!manip_pkt(inside->ip.protocol, pskb,
+	if (!ip_nat_manip_pkt(inside->ip.protocol, pskb,
 		       (*pskb)->nh.iph->ihl*4
 		       + sizeof(inside->icmp),
 		       &ct->tuplehash[!dir].tuple,
@@ -489,7 +957,7 @@
 
 	if (ct->status & statusbit) {
 		invert_tuplepr(&target, &ct->tuplehash[!dir].tuple);
-		if (!manip_pkt(0, pskb, 0, &target, manip))
+		if (!ip_nat_manip_pkt(0, pskb, 0, &target, manip))
 			return 0;
 	}
 
@@ -578,10 +1046,29 @@
 	/* Leave them the same for the moment. */
 	ip_nat_htable_size = ip_conntrack_htable_size;
 
+#ifdef CONFIG_IP_NF_NAT_NRES
+	/* Create nat_reserved slab cache */
+	ip_nat_reserved_cachep = kmem_cache_create("ip_nat_reserved",
+						   sizeof(struct ip_nat_reserved), 0,
+						   SLAB_HWCACHE_ALIGN, NULL, NULL);
+	if (!ip_nat_reserved_cachep) {
+		printk(KERN_ERR "Unable to create ip_nat_reserved slab cache\n");
+		return -ENOMEM;
+	}
+#endif
+
 	/* One vmalloc for both hash tables */
+#ifndef CONFIG_IP_NF_NAT_NRES
 	bysource = vmalloc(sizeof(struct list_head) * ip_nat_htable_size);
+#else
+        bysource = vmalloc(sizeof(struct list_head) * ip_nat_htable_size * 2);
+#endif
 	if (!bysource)
-		return -ENOMEM;
+		goto free_reserved_slab;
+
+#ifdef CONFIG_IP_NF_NAT_NRES
+	natreserved = bysource + ip_nat_htable_size;
+#endif
 
 	/* Sew in builtin protocols. */
 	write_lock_bh(&ip_nat_lock);
@@ -594,15 +1081,28 @@
 
 	for (i = 0; i < ip_nat_htable_size; i++) {
 		INIT_LIST_HEAD(&bysource[i]);
+#ifdef CONFIG_IP_NF_NAT_NRES
+		INIT_LIST_HEAD(&natreserved[i]);
+#endif
 	}
 
 	/* FIXME: Man, this is a hack.  <SIGH> */
 	IP_NF_ASSERT(ip_conntrack_destroyed == NULL);
 	ip_conntrack_destroyed = &ip_nat_cleanup_conntrack;
+#ifdef CONFIG_IP_NF_NAT_NRES
+	IP_NF_ASSERT(ip_conntrack_expect_destroyed == NULL);
+	ip_conntrack_expect_destroyed = &ip_nat_reserved_cleanup_expect;
+#endif
 
 	/* Initialize fake conntrack so that NAT will skip it */
 	ip_conntrack_untracked.status |= IPS_NAT_DONE_MASK;
 	return 0;
+
+free_reserved_slab:
+#ifdef CONFIG_IP_NF_NAT_NRES
+	kmem_cache_destroy(ip_nat_reserved_cachep);
+#endif
+	return -ENOMEM;
 }
 
 /* Clear NAT section of all conntracks, in case we're loaded again. */
@@ -617,6 +1117,10 @@
 {
 	ip_ct_iterate_cleanup(&clean_nat, NULL);
 	ip_conntrack_destroyed = NULL;
+#ifdef CONFIG_IP_NF_NAT_NRES
+	ip_conntrack_expect_destroyed = NULL;
+	kmem_cache_destroy(ip_nat_reserved_cachep);
+#endif
 	vfree(bysource);
 }
 
diff -ruw linux-2.6.20.14/net/ipv4/netfilter/ip_nat_proto_gre.c linux-2.6.20.14-fbx/net/ipv4/netfilter/ip_nat_proto_gre.c
--- linux-2.6.20.14/net/ipv4/netfilter/ip_nat_proto_gre.c	2007-06-11 20:37:31.000000000 +0200
+++ linux-2.6.20.14-fbx/net/ipv4/netfilter/ip_nat_proto_gre.c	2010-12-29 19:30:08.921500487 +0100
@@ -93,7 +93,7 @@
 
 	for (i = 0; i < range_size; i++, key++) {
 		*keyptr = htons(min + key % range_size);
-		if (!ip_nat_used_tuple(tuple, conntrack))
+		if (!ip_nat_used_tuple(tuple, conntrack, maniptype, range->flags))
 			return 1;
 	}
 
diff -ruw linux-2.6.20.14/net/ipv4/netfilter/ip_nat_proto_icmp.c linux-2.6.20.14-fbx/net/ipv4/netfilter/ip_nat_proto_icmp.c
--- linux-2.6.20.14/net/ipv4/netfilter/ip_nat_proto_icmp.c	2007-06-11 20:37:31.000000000 +0200
+++ linux-2.6.20.14-fbx/net/ipv4/netfilter/ip_nat_proto_icmp.c	2010-12-29 19:30:08.921500487 +0100
@@ -46,7 +46,7 @@
 	for (i = 0; i < range_size; i++, id++) {
 		tuple->src.u.icmp.id = htons(ntohs(range->min.icmp.id) +
 		                             (id % range_size));
-		if (!ip_nat_used_tuple(tuple, conntrack))
+		if (!ip_nat_used_tuple(tuple, conntrack, maniptype, range->flags))
 			return 1;
 	}
 	return 0;
diff -ruw linux-2.6.20.14/net/ipv4/netfilter/ip_nat_proto_tcp.c linux-2.6.20.14-fbx/net/ipv4/netfilter/ip_nat_proto_tcp.c
--- linux-2.6.20.14/net/ipv4/netfilter/ip_nat_proto_tcp.c	2007-06-11 20:37:31.000000000 +0200
+++ linux-2.6.20.14-fbx/net/ipv4/netfilter/ip_nat_proto_tcp.c	2010-12-29 19:30:08.921500487 +0100
@@ -77,7 +77,7 @@
 
 	for (i = 0; i < range_size; i++, port++) {
 		*portptr = htons(min + port % range_size);
-		if (!ip_nat_used_tuple(tuple, conntrack)) {
+		if (!ip_nat_used_tuple(tuple, conntrack, maniptype, range->flags)) {
 			return 1;
 		}
 	}
diff -ruw linux-2.6.20.14/net/ipv4/netfilter/ip_nat_proto_udp.c linux-2.6.20.14-fbx/net/ipv4/netfilter/ip_nat_proto_udp.c
--- linux-2.6.20.14/net/ipv4/netfilter/ip_nat_proto_udp.c	2007-06-11 20:37:31.000000000 +0200
+++ linux-2.6.20.14-fbx/net/ipv4/netfilter/ip_nat_proto_udp.c	2010-12-29 19:30:08.921500487 +0100
@@ -76,7 +76,7 @@
 
 	for (i = 0; i < range_size; i++, port++) {
 		*portptr = htons(min + port % range_size);
-		if (!ip_nat_used_tuple(tuple, conntrack))
+		if (!ip_nat_used_tuple(tuple, conntrack, maniptype, range->flags))
 			return 1;
 	}
 	return 0;
diff -ruw linux-2.6.20.14/net/ipv4/netfilter/ip_nat_standalone.c linux-2.6.20.14-fbx/net/ipv4/netfilter/ip_nat_standalone.c
--- linux-2.6.20.14/net/ipv4/netfilter/ip_nat_standalone.c	2007-06-11 20:37:31.000000000 +0200
+++ linux-2.6.20.14-fbx/net/ipv4/netfilter/ip_nat_standalone.c	2010-12-29 19:30:08.921500487 +0100
@@ -284,6 +284,13 @@
 	return NF_ACCEPT;
 }
 
+/* Modules depending on the NAT hooks but not using symbols from this module
+   should call this */
+void need_nat_hooks(void)
+{
+}
+EXPORT_SYMBOL_GPL(need_nat_hooks);
+
 /* We must be after connection tracking and before packet filtering. */
 
 static struct nf_hook_ops ip_nat_ops[] = {
diff -ruw linux-2.6.20.14/net/ipv4/netfilter/ip_tables.c linux-2.6.20.14-fbx/net/ipv4/netfilter/ip_tables.c
--- linux-2.6.20.14/net/ipv4/netfilter/ip_tables.c	2007-06-11 20:37:31.000000000 +0200
+++ linux-2.6.20.14-fbx/net/ipv4/netfilter/ip_tables.c	2010-12-29 19:30:08.921500487 +0100
@@ -1145,6 +1145,11 @@
 	return ret;
 }
 
+#ifdef CONFIG_IP_FFN
+extern void ip_ffn_flush_all(void);
+#endif
+
+
 static int
 __do_replace(const char *name, unsigned int valid_hooks,
 		struct xt_table_info *newinfo, unsigned int num_counters,
@@ -1261,6 +1266,10 @@
 			      tmp.counters);
 	if (ret)
 		goto free_newinfo_untrans;
+
+#ifdef CONFIG_IP_FFN
+	ip_ffn_flush_all();
+#endif
 	return 0;
 
  free_newinfo_untrans:
diff -ruw linux-2.6.20.14/net/ipv4/netfilter/Kconfig linux-2.6.20.14-fbx/net/ipv4/netfilter/Kconfig
--- linux-2.6.20.14/net/ipv4/netfilter/Kconfig	2007-06-11 20:37:31.000000000 +0200
+++ linux-2.6.20.14-fbx/net/ipv4/netfilter/Kconfig	2010-12-29 19:30:08.911437411 +0100
@@ -212,6 +212,33 @@
 
 	  To compile it as a module, choose M here.  If unsure, say Y.
 
+config IP_NF_TPROXY
+	tristate "Transparent proxying"
+	depends on IP_NF_NAT
+	help
+	  Transparent proxying. For more information see
+	  http://www.balabit.com/downloads/tproxy.
+
+	  To compile it as a module, choose M here.  If unsure, say Y.
+
+config IP_NF_MATCH_TPROXY
+	tristate "tproxy match support"
+	depends on IP_NF_TPROXY
+	help
+	  Match transparently proxied connections.
+
+	  To compile it as a module, choose M here.  If unsure, say N.
+
+config IP_NF_TARGET_TPROXY
+	tristate "TPROXY target support"
+	depends on IP_NF_TPROXY
+	help
+	  This option adds a `TPROXY' target, which is almost the same as REDIRECT.
+	  It can only be used in the tproxy table, and is useful to redirect
+	  traffic to a transparent proxy.
+
+	  To compile it as a module, choose M here.  If unsure, say N.
+
 config IP_NF_QUEUE
 	tristate "IP Userspace queueing via NETLINK (OBSOLETE)"
 	help
@@ -462,6 +489,16 @@
 
 	  To compile it as a module, choose M here.  If unsure, say N.
 
+config IP_NF_NAT_NRES
+	bool "NAT reservations support"
+	depends on IP_NF_NAT
+	help
+	  This option enables support for NAT reservations. This makes
+	  transparent proxying more reliable, but unneeded if you don't
+	  need TProxy support.
+
+	  If unsure, say 'N'.
+
 config IP_NF_NAT_SNMP_BASIC
 	tristate "Basic SNMP-ALG support (EXPERIMENTAL)"
 	depends on EXPERIMENTAL && IP_NF_NAT
diff -ruw linux-2.6.20.14/net/ipv4/netfilter/Makefile linux-2.6.20.14-fbx/net/ipv4/netfilter/Makefile
--- linux-2.6.20.14/net/ipv4/netfilter/Makefile	2007-06-11 20:37:31.000000000 +0200
+++ linux-2.6.20.14-fbx/net/ipv4/netfilter/Makefile	2010-12-29 19:30:08.911437411 +0100
@@ -81,6 +81,7 @@
 obj-$(CONFIG_IP_NF_NAT) += iptable_nat.o
 obj-$(CONFIG_NF_NAT) += iptable_nat.o
 obj-$(CONFIG_IP_NF_RAW) += iptable_raw.o
+obj-$(CONFIG_IP_NF_TPROXY) += iptable_tproxy.o
 
 # matches
 obj-$(CONFIG_IP_NF_MATCH_IPRANGE) += ipt_iprange.o
@@ -91,6 +92,7 @@
 obj-$(CONFIG_IP_NF_MATCH_AH) += ipt_ah.o
 obj-$(CONFIG_IP_NF_MATCH_TTL) += ipt_ttl.o
 obj-$(CONFIG_IP_NF_MATCH_ADDRTYPE) += ipt_addrtype.o
+obj-$(CONFIG_IP_NF_MATCH_TPROXY) += ipt_tproxy.o
 
 # targets
 obj-$(CONFIG_IP_NF_TARGET_REJECT) += ipt_REJECT.o
@@ -106,6 +108,7 @@
 obj-$(CONFIG_IP_NF_TARGET_TCPMSS) += ipt_TCPMSS.o
 obj-$(CONFIG_IP_NF_TARGET_CLUSTERIP) += ipt_CLUSTERIP.o
 obj-$(CONFIG_IP_NF_TARGET_TTL) += ipt_TTL.o
+obj-$(CONFIG_IP_NF_TARGET_TPROXY) += ipt_TPROXY.o
 
 # generic ARP tables
 obj-$(CONFIG_IP_NF_ARPTABLES) += arp_tables.o
diff -ruw linux-2.6.20.14/net/ipv4/route.c linux-2.6.20.14-fbx/net/ipv4/route.c
--- linux-2.6.20.14/net/ipv4/route.c	2007-06-11 20:37:31.000000000 +0200
+++ linux-2.6.20.14-fbx/net/ipv4/route.c	2010-12-29 19:30:08.921500487 +0100
@@ -3199,3 +3199,4 @@
 EXPORT_SYMBOL(__ip_select_ident);
 EXPORT_SYMBOL(ip_route_input);
 EXPORT_SYMBOL(ip_route_output_key);
+EXPORT_SYMBOL(ip_tos2prio);
diff -ruw linux-2.6.20.14/net/ipv4/tcp_ipv4.c linux-2.6.20.14-fbx/net/ipv4/tcp_ipv4.c
--- linux-2.6.20.14/net/ipv4/tcp_ipv4.c	2007-06-11 20:37:31.000000000 +0200
+++ linux-2.6.20.14-fbx/net/ipv4/tcp_ipv4.c	2010-12-29 19:30:08.931441367 +0100
@@ -2469,6 +2469,10 @@
 EXPORT_SYMBOL(tcp_v4_send_check);
 EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
 
+#if defined(CONFIG_IP_NF_TPROXY) || defined (CONFIG_IP_NF_TPROXY_MODULE)
+EXPORT_SYMBOL(ip_tproxy_tcp_unhashed);
+#endif
+
 #ifdef CONFIG_PROC_FS
 EXPORT_SYMBOL(tcp_proc_register);
 EXPORT_SYMBOL(tcp_proc_unregister);
diff -ruw linux-2.6.20.14/net/ipv4/tcp_minisocks.c linux-2.6.20.14-fbx/net/ipv4/tcp_minisocks.c
--- linux-2.6.20.14/net/ipv4/tcp_minisocks.c	2007-06-11 20:37:31.000000000 +0200
+++ linux-2.6.20.14-fbx/net/ipv4/tcp_minisocks.c	2010-12-29 19:30:08.931441367 +0100
@@ -27,6 +27,7 @@
 #include <net/tcp.h>
 #include <net/inet_common.h>
 #include <net/xfrm.h>
+#include <linux/net.h>
 
 #ifdef CONFIG_SYSCTL
 #define SYNC_INIT 0 /* let the user enable it */
diff -ruw linux-2.6.20.14/net/ipv4/udp.c linux-2.6.20.14-fbx/net/ipv4/udp.c
--- linux-2.6.20.14/net/ipv4/udp.c	2007-06-11 20:37:31.000000000 +0200
+++ linux-2.6.20.14-fbx/net/ipv4/udp.c	2010-12-29 19:30:08.931441367 +0100
@@ -1506,6 +1506,21 @@
 	
 }
 
+#if defined(CONFIG_IP_NF_TPROXY) || defined (CONFIG_IP_NF_TPROXY_MODULE)
+void (*ip_tproxy_udp_unhashed)(struct sock *sk, int proto) = NULL;
+#endif
+
+static void udp_v4_unhash(struct sock *sk)
+{
+#if defined(CONFIG_IP_NF_TPROXY) || defined (CONFIG_IP_NF_TPROXY_MODULE)
+	/* ugly tproxy unassign hook */
+	if (ip_tproxy_udp_unhashed)
+		ip_tproxy_udp_unhashed(sk, IPPROTO_UDP);
+#endif
+
+	udp_lib_unhash(sk);
+}
+
 struct proto udp_prot = {
  	.name		   = "UDP",
 	.owner		   = THIS_MODULE,
@@ -1521,7 +1536,7 @@
 	.sendpage	   = udp_sendpage,
 	.backlog_rcv	   = udp_queue_rcv_skb,
 	.hash		   = udp_lib_hash,
-	.unhash		   = udp_lib_unhash,
+	.unhash		   = udp_v4_unhash,
 	.get_port	   = udp_v4_get_port,
 	.obj_size	   = sizeof(struct udp_sock),
 #ifdef CONFIG_COMPAT
@@ -1728,6 +1743,10 @@
 EXPORT_SYMBOL(udp_lib_setsockopt);
 EXPORT_SYMBOL(udp_poll);
 
+#if defined(CONFIG_IP_NF_TPROXY) || defined (CONFIG_IP_NF_TPROXY_MODULE)
+EXPORT_SYMBOL(ip_tproxy_udp_unhashed);
+#endif
+
 #ifdef CONFIG_PROC_FS
 EXPORT_SYMBOL(udp_proc_register);
 EXPORT_SYMBOL(udp_proc_unregister);
diff -ruw linux-2.6.20.14/net/ipv4/xfrm4_policy.c linux-2.6.20.14-fbx/net/ipv4/xfrm4_policy.c
--- linux-2.6.20.14/net/ipv4/xfrm4_policy.c	2007-06-11 20:37:31.000000000 +0200
+++ linux-2.6.20.14-fbx/net/ipv4/xfrm4_policy.c	2010-12-29 19:30:08.931441367 +0100
@@ -312,7 +312,7 @@
 	.update_pmtu =		xfrm4_update_pmtu,
 	.destroy =		xfrm4_dst_destroy,
 	.ifdown =		xfrm4_dst_ifdown,
-	.gc_thresh =		1024,
+	.gc_thresh =		CONFIG_INET_XFRM_GC_THRESH,
 	.entry_size =		sizeof(struct xfrm_dst),
 };
 
diff -ruw linux-2.6.20.14/net/ipv6/Kconfig linux-2.6.20.14-fbx/net/ipv6/Kconfig
--- linux-2.6.20.14/net/ipv6/Kconfig	2007-06-11 20:37:31.000000000 +0200
+++ linux-2.6.20.14-fbx/net/ipv6/Kconfig	2010-12-29 19:30:08.931441367 +0100
@@ -166,6 +166,11 @@
 
 	  Saying M here will produce a module called sit.ko. If unsure, say Y.
 
+config IPV6_SIT_FBX6TO4
+	bool "sit support Freebox 6to4 scheme"
+	depends on IPV6_SIT
+	default n
+
 config IPV6_TUNNEL
 	tristate "IPv6: IPv6-in-IPv6 tunnel"
 	select INET6_TUNNEL
diff -ruw linux-2.6.20.14/net/ipv6/sit.c linux-2.6.20.14-fbx/net/ipv6/sit.c
--- linux-2.6.20.14/net/ipv6/sit.c	2007-06-11 20:37:31.000000000 +0200
+++ linux-2.6.20.14-fbx/net/ipv6/sit.c	2010-12-29 19:30:08.941440988 +0100
@@ -162,6 +162,12 @@
 	}
 	for (tp = &tunnels[prio][h]; (t = *tp) != NULL; tp = &t->next) {
 		if (local == t->parms.iph.saddr && remote == t->parms.iph.daddr)
+#ifdef CONFIG_IPV6_SIT_FBX6TO4
+			if (!memcmp(&parms->fbx6to4_zone,
+				    &t->parms.fbx6to4_zone,
+				    sizeof (parms->fbx6to4_zone)) &&
+			    parms->fbx6to4_prefix == t->parms.fbx6to4_prefix)
+#endif
 			return t;
 	}
 	if (!create)
@@ -396,9 +402,9 @@
 	}
 
 	icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
-	kfree_skb(skb);
 	read_unlock(&ipip6_lock);
 out:
+	kfree_skb(skb);
 	return 0;
 }
 
@@ -416,6 +422,31 @@
 	return dst;
 }
 
+#ifdef CONFIG_IPV6_SIT_FBX6TO4
+/* Returns the embedded IPv4 address if the IPv6 address comes from
+   Freebox 6to4 rule */
+static inline __be32 try_fbx6to4(struct in6_addr *fbx6to4_zone,
+				 u8 fbx6to4_prefix, struct in6_addr *v6dst)
+{
+	__be32 dst = 0;
+
+	/* isolate zone according to mask */
+	if (ipv6_prefix_equal(v6dst, fbx6to4_zone, fbx6to4_prefix)) {
+		unsigned int d32_off, bits;
+
+		d32_off = fbx6to4_prefix >> 5;
+		bits = (fbx6to4_prefix & 0x1f);
+
+		dst = (ntohl(v6dst->s6_addr32[d32_off]) << bits);
+		if (bits)
+			dst |= ntohl(v6dst->s6_addr32[d32_off + 1]) >>
+				(32 - bits);
+		dst = htonl(dst);
+	}
+	return dst;
+}
+#endif
+
 /*
  *	This function assumes it is being called from dev_queue_xmit()
  *	and that skb is filled properly by that function.
@@ -445,6 +476,13 @@
 	if (skb->protocol != htons(ETH_P_IPV6))
 		goto tx_error;
 
+#ifdef CONFIG_IPV6_SIT_FBX6TO4
+	if (!dst && tunnel->parms.fbx6to4_prefix)
+		dst = try_fbx6to4(&tunnel->parms.fbx6to4_zone,
+				  tunnel->parms.fbx6to4_prefix,
+				  &iph6->daddr);
+	else
+#endif
 	if (!dst)
 		dst = try_6to4(&iph6->daddr);
 
@@ -632,6 +670,12 @@
 		if (p.iph.ttl)
 			p.iph.frag_off |= htons(IP_DF);
 
+#ifdef CONFIG_IPV6_SIT_FBX6TO4
+		/* prefix must be smaller than 95 bits since we fetch
+		 * an ip address after them */
+		if (p.fbx6to4_prefix >= 95)
+			goto done;
+#endif
 		t = ipip6_tunnel_locate(&p, cmd == SIOCADDTUNNEL);
 
 		if (dev != ipip6_fb_tunnel_dev && cmd == SIOCCHGTUNNEL) {
@@ -650,6 +694,10 @@
 				ipip6_tunnel_unlink(t);
 				t->parms.iph.saddr = p.iph.saddr;
 				t->parms.iph.daddr = p.iph.daddr;
+#ifdef CONFIG_IPV6_SIT_FBX6TO4
+				t->parms.fbx6to4_zone = p.fbx6to4_zone;
+				t->parms.fbx6to4_prefix = p.fbx6to4_prefix;
+#endif
 				memcpy(dev->dev_addr, &p.iph.saddr, 4);
 				memcpy(dev->broadcast, &p.iph.daddr, 4);
 				ipip6_tunnel_link(t);
diff -ruw linux-2.6.20.14/net/Kconfig linux-2.6.20.14-fbx/net/Kconfig
--- linux-2.6.20.14/net/Kconfig	2007-06-11 20:37:31.000000000 +0200
+++ linux-2.6.20.14-fbx/net/Kconfig	2010-12-29 19:30:08.871437089 +0100
@@ -27,6 +27,10 @@
 
 menu "Networking options"
 
+config NETSKBPAD
+	int "Size reserved by dev_alloc_skb"
+	default 16
+
 config NETDEBUG
 	bool "Network packet debugging"
 	help
@@ -34,6 +38,22 @@
 	  debugging bad packets, but can overwhelm logs under denial of service
 	  attacks.
 
+config NETRXTHREAD
+	bool "Do rx network processing in kernel thread"
+
+config NETRXTHREAD_RX_QUEUE
+	int "Number of rx queues"
+	default 1
+	depends on NETRXTHREAD
+
+config NETRXTHREAD_MAX_PROCESS
+	int "Maximum number of packet to process before schedule"
+	default 4
+	depends on NETRXTHREAD
+
+config SKB_RECYCLE
+	bool "Skb recycling support"
+
 source "net/packet/Kconfig"
 source "net/unix/Kconfig"
 source "net/xfrm/Kconfig"
@@ -178,6 +198,7 @@
 source "net/econet/Kconfig"
 source "net/wanrouter/Kconfig"
 source "net/sched/Kconfig"
+source "net/fbxatm/Kconfig"
 
 menu "Network testing"
 
diff -ruw linux-2.6.20.14/net/Makefile linux-2.6.20.14-fbx/net/Makefile
--- linux-2.6.20.14/net/Makefile	2007-06-11 20:37:31.000000000 +0200
+++ linux-2.6.20.14-fbx/net/Makefile	2010-12-29 19:30:08.871437089 +0100
@@ -51,3 +51,5 @@
 ifeq ($(CONFIG_NET),y)
 obj-$(CONFIG_SYSCTL)		+= sysctl_net.o
 endif
+
+obj-$(CONFIG_FBXATM)		+= fbxatm/
diff -ruw linux-2.6.20.14/net/socket.c linux-2.6.20.14-fbx/net/socket.c
--- linux-2.6.20.14/net/socket.c	2007-06-11 20:37:31.000000000 +0200
+++ linux-2.6.20.14-fbx/net/socket.c	2010-12-29 19:30:08.981441285 +0100
@@ -807,6 +807,68 @@
 
 EXPORT_SYMBOL(dlci_ioctl_set);
 
+/* Start Freebox added code */
+#if defined(CONFIG_FREEBOX_DIVERTER) || defined(CONFIG_FREEBOX_DIVERTER_MODULE)
+static DECLARE_MUTEX(fbxdiverter_ioctl_mutex);
+static int (*fbxdiverter_ioctl_hook)(unsigned int cmd, void __user *arg) = NULL;
+
+void fbxdiverter_set(int (*hook)(unsigned int, void __user *))
+{
+	down(&fbxdiverter_ioctl_mutex);
+	fbxdiverter_ioctl_hook = hook;
+	up(&fbxdiverter_ioctl_mutex);
+}
+EXPORT_SYMBOL(fbxdiverter_set);
+#endif
+/* End Freebox added code */
+
+/* Start Freebox added code */
+#if defined(CONFIG_FREEBOX_BRIDGE) || defined(CONFIG_FREEBOX_BRIDGE_MODULE)
+static DECLARE_MUTEX(fbxbridge_ioctl_mutex);
+static int (*fbxbridge_ioctl_hook)(unsigned int cmd, void __user *arg) = NULL;
+
+void fbxbridge_set(int (*hook)(unsigned int, void __user *))
+{
+	down(&fbxbridge_ioctl_mutex);
+	fbxbridge_ioctl_hook = hook;
+	up(&fbxbridge_ioctl_mutex);
+}
+EXPORT_SYMBOL(fbxbridge_set);
+#endif
+/* End Freebox added code */
+
+
+/* Start Freebox added code */
+#if defined(CONFIG_FREEBOX_MVDSA) || defined(CONFIG_FREEBOX_MVDSA_MODULE)
+static DECLARE_MUTEX(fbxmvdsa_ioctl_mutex);
+static int (*fbxmvdsa_ioctl_hook)(unsigned int cmd, void __user *arg) = NULL;
+
+void fbxmvdsa_set(int (*hook)(unsigned int, void __user *))
+{
+	down(&fbxmvdsa_ioctl_mutex);
+	fbxmvdsa_ioctl_hook = hook;
+	up(&fbxmvdsa_ioctl_mutex);
+}
+EXPORT_SYMBOL(fbxmvdsa_set);
+#endif
+/* End Freebox added code */
+
+/* Start Freebox added code */
+#if defined(CONFIG_FREEBOX_L2BR) || defined(CONFIG_FREEBOX_L2BR_MODULE)
+static DECLARE_MUTEX(fbxl2br_ioctl_mutex);
+static int (*fbxl2br_ioctl_hook)(unsigned int cmd, void __user *arg) = NULL;
+
+void fbxl2br_set(int (*hook)(unsigned int, void __user *))
+{
+	down(&fbxl2br_ioctl_mutex);
+	fbxl2br_ioctl_hook = hook;
+	up(&fbxl2br_ioctl_mutex);
+}
+EXPORT_SYMBOL(fbxl2br_set);
+#endif
+/* End Freebox added code */
+
+
 /*
  *	With an ioctl, arg may well be a user mode pointer, but we don't know
  *	what to do with it - that's up to the protocol still.
@@ -876,6 +938,71 @@
 				mutex_unlock(&dlci_ioctl_mutex);
 			}
 			break;
+
+/* Start Freebox added code */
+		case SIOCGFBXDIVERT:
+		case SIOCSFBXDIVERT:
+#if defined(CONFIG_FREEBOX_DIVERTER) || defined(CONFIG_FREEBOX_DIVERTER_MODULE)
+			err = -ENOPKG;
+			if (!fbxdiverter_ioctl_hook)
+				request_module("fbxdiverter");
+
+			down(&fbxdiverter_ioctl_mutex);
+			if (fbxdiverter_ioctl_hook)
+				err = fbxdiverter_ioctl_hook(cmd, argp);
+			up(&fbxdiverter_ioctl_mutex);
+			break;
+#endif
+/* End Freebox added code */
+
+/* Start Freebox added code */
+		case SIOCGFBXBRIDGE:
+		case SIOCSFBXBRIDGE:
+#if defined(CONFIG_FREEBOX_BRIDGE) || defined(CONFIG_FREEBOX_BRIDGE_MODULE)
+			err = -ENOPKG;
+			if (!fbxbridge_ioctl_hook)
+				request_module("fbxbridge");
+
+			down(&fbxbridge_ioctl_mutex);
+			if (fbxbridge_ioctl_hook)
+				err = fbxbridge_ioctl_hook(cmd, argp);
+			up(&fbxbridge_ioctl_mutex);
+			break;
+#endif
+/* End Freebox added code */
+
+/* Start Freebox added code */
+		case SIOCFBXMVDSA:
+#if defined(CONFIG_FREEBOX_MVDSA) || defined(CONFIG_FREEBOX_MVDSA_MODULE)
+			err = -ENOPKG;
+			if (!fbxmvdsa_ioctl_hook)
+				request_module("fbxmvdsa");
+
+			down(&fbxmvdsa_ioctl_mutex);
+			if (fbxmvdsa_ioctl_hook)
+				err = fbxmvdsa_ioctl_hook(cmd, argp);
+			up(&fbxmvdsa_ioctl_mutex);
+			break;
+#endif
+/* End Freebox added code */
+
+/* Start Freebox added code */
+		case SIOCFBXL2BR:
+#if defined(CONFIG_FREEBOX_L2BR) || defined(CONFIG_FREEBOX_L2BR_MODULE)
+			err = -ENOPKG;
+			if (!fbxl2br_ioctl_hook)
+				request_module("fbxl2br");
+
+			down(&fbxl2br_ioctl_mutex);
+			if (fbxl2br_ioctl_hook)
+				err = fbxl2br_ioctl_hook(cmd, argp);
+			up(&fbxl2br_ioctl_mutex);
+			break;
+#endif
+/* End Freebox added code */
+
+/* */
+
 		default:
 			err = sock->ops->ioctl(sock, cmd, arg);
 
diff -ruw linux-2.6.20.14/scripts/gen_initramfs_list.sh linux-2.6.20.14-fbx/scripts/gen_initramfs_list.sh
--- linux-2.6.20.14/scripts/gen_initramfs_list.sh	2007-06-11 20:37:31.000000000 +0200
+++ linux-2.6.20.14-fbx/scripts/gen_initramfs_list.sh	2010-12-29 19:30:08.991449299 +0100
@@ -15,9 +15,9 @@
 usage() {
 cat << EOF
 Usage:
-$0 [-o <file>] [-u <uid>] [-g <gid>] {-d | <cpio_source>} ...
-	-o <file>      Create gzipped initramfs file named <file> using
-	               gen_init_cpio and gzip
+$0 [-o <file>] [-u <uid>] [-g <gid>] [-z] {-d | <cpio_source>} ...
+	-o <file>      Create initramfs file named <file> using
+	               gen_init_cpio
 	-u <uid>       User ID to map to user ID 0 (root).
 	               <uid> is only meaningful if <cpio_source>
 	               is a directory.
@@ -28,6 +28,7 @@
 	               If <cpio_source> is a .cpio file it will be used
 		       as direct input to initramfs.
 	-d             Output the default cpio list.
+	-z             gzip the output file
 
 All options except -o and -l may be repeated and are interpreted
 sequentially and immediately.  -u and -g states are preserved across
@@ -91,7 +92,7 @@
 }
 
 list_parse() {
-	echo "$1 \\"
+	[ ! -h "$1" ] && echo "$1 \\" || :
 }
 
 # for each file print a line in following format
@@ -217,6 +218,7 @@
 prog=$0
 root_uid=0
 root_gid=0
+gzipped=0
 dep_list=
 cpio_file=
 cpio_list=
@@ -230,7 +232,7 @@
 		echo "deps_initramfs := \\"
 		shift
 		;;
-	"-o")	# generate gzipped cpio image named $1
+	"-o")	# generate cpio image named $1
 		shift
 		output_file="$1"
 		cpio_list="$(mktemp ${TMPDIR:-/tmp}/cpiolist.XXXXXX)"
@@ -258,6 +260,9 @@
 			usage
 			exit 0
 			;;
+		"-z")	# gzip the cpio image
+			gzipped=1
+			;;
 		*)
 			case "$arg" in
 				"-"*)
@@ -281,7 +286,11 @@
 		cpio_tfile=${cpio_file}
 	fi
 	rm ${cpio_list}
+	if [ $gzipped -eq 1 ]; then
 	cat ${cpio_tfile} | gzip -f -9 - > ${output_file}
+	else
+	    cp ${cpio_tfile} ${output_file}
+	fi
 	[ -z ${cpio_file} ] && rm ${cpio_tfile}
 fi
 exit 0
diff -ruw linux-2.6.20.14/scripts/mod/sumversion.c linux-2.6.20.14-fbx/scripts/mod/sumversion.c
--- linux-2.6.20.14/scripts/mod/sumversion.c	2007-06-11 20:37:31.000000000 +0200
+++ linux-2.6.20.14-fbx/scripts/mod/sumversion.c	2010-12-29 19:30:09.001445416 +0100
@@ -7,6 +7,7 @@
 #include <ctype.h>
 #include <errno.h>
 #include <string.h>
+#include <limits.h>
 #include "modpost.h"
 
 /*
diff -ruw linux-2.6.20.14/scripts/unifdef.c linux-2.6.20.14-fbx/scripts/unifdef.c
--- linux-2.6.20.14/scripts/unifdef.c	2007-06-11 20:37:31.000000000 +0200
+++ linux-2.6.20.14-fbx/scripts/unifdef.c	2010-12-29 19:30:09.001445416 +0100
@@ -206,7 +206,7 @@
 static void             error(const char *);
 static int              findsym(const char *);
 static void             flushline(bool);
-static Linetype         getline(void);
+static Linetype         __getline(void);
 static Linetype         ifeval(const char **);
 static void             ignoreoff(void);
 static void             ignoreon(void);
@@ -512,7 +512,7 @@
 
 	for (;;) {
 		linenum++;
-		lineval = getline();
+		lineval = __getline();
 		trans_table[ifstate[depth]][lineval]();
 		debug("process %s -> %s depth %d",
 		    linetype_name[lineval],
@@ -526,7 +526,7 @@
  * help from skipcomment().
  */
 static Linetype
-getline(void)
+__getline(void)
 {
 	const char *cp;
 	int cursym;
diff -ruw linux-2.6.20.14/sound/core/Kconfig linux-2.6.20.14-fbx/sound/core/Kconfig
--- linux-2.6.20.14/sound/core/Kconfig	2007-06-11 20:37:31.000000000 +0200
+++ linux-2.6.20.14-fbx/sound/core/Kconfig	2010-12-29 19:30:09.011441469 +0100
@@ -4,7 +4,7 @@
 	depends on SND
 
 config SND_PCM
-	tristate
+	tristate "PCM API"
 	select SND_TIMER
 	depends on SND
 
diff -ruw linux-2.6.20.14/sound/oss/Kconfig linux-2.6.20.14-fbx/sound/oss/Kconfig
--- linux-2.6.20.14/sound/oss/Kconfig	2007-06-11 20:37:31.000000000 +0200
+++ linux-2.6.20.14-fbx/sound/oss/Kconfig	2010-12-29 19:30:09.041437319 +0100
@@ -80,6 +80,13 @@
 	select SND_AC97_CODEC
 	depends on SOUND_PRIME && (SOC_AU1550 || SOC_AU1200)
 
+config SOUND_AU1550_I2S
+	tristate "Au1550 I2S Sound"
+	depends on SOUND_PRIME && SOC_AU1550
+	# Weird I2S driver needs I2C driver to talk to the codec...
+	select I2C
+	select I2C_AU1550
+
 config SOUND_TRIDENT
 	tristate "Trident 4DWave DX/NX, SiS 7018 or ALi 5451 PCI Audio Core"
 	depends on SOUND_PRIME && PCI
diff -ruw linux-2.6.20.14/sound/oss/Makefile linux-2.6.20.14-fbx/sound/oss/Makefile
--- linux-2.6.20.14/sound/oss/Makefile	2007-06-11 20:37:31.000000000 +0200
+++ linux-2.6.20.14-fbx/sound/oss/Makefile	2010-12-29 19:30:09.041437319 +0100
@@ -45,6 +45,7 @@
 obj-$(CONFIG_SOUND_ES1371)	+= es1371.o ac97_codec.o
 obj-$(CONFIG_SOUND_VRC5477)	+= nec_vrc5477.o ac97_codec.o
 obj-$(CONFIG_SOUND_AU1550_AC97)	+= au1550_ac97.o ac97_codec.o
+obj-$(CONFIG_SOUND_AU1550_I2S)	+= au1550_i2s.o
 obj-$(CONFIG_SOUND_FUSION)	+= cs46xx.o ac97_codec.o
 obj-$(CONFIG_SOUND_TRIDENT)	+= trident.o ac97_codec.o
 obj-$(CONFIG_SOUND_EMU10K1)	+= ac97_codec.o
diff -ruw linux-2.6.20.14/usr/Kconfig linux-2.6.20.14-fbx/usr/Kconfig
--- linux-2.6.20.14/usr/Kconfig	2007-06-11 20:37:31.000000000 +0200
+++ linux-2.6.20.14-fbx/usr/Kconfig	2010-12-29 19:30:09.111441067 +0100
@@ -2,6 +2,10 @@
 # Configuration for initramfs
 #
 
+config INITRAMFS_USE_GZIP
+	bool "Initramfs file is gzipped"
+	default y
+
 config INITRAMFS_SOURCE
 	string "Initramfs source file(s)"
 	default ""
diff -ruw linux-2.6.20.14/usr/Makefile linux-2.6.20.14-fbx/usr/Makefile
--- linux-2.6.20.14/usr/Makefile	2007-06-11 20:37:31.000000000 +0200
+++ linux-2.6.20.14-fbx/usr/Makefile	2010-12-29 19:30:09.121437439 +0100
@@ -22,6 +22,7 @@
 ramfs-input := $(if $(filter-out "",$(CONFIG_INITRAMFS_SOURCE)), \
 			$(shell echo $(CONFIG_INITRAMFS_SOURCE)),-d)
 ramfs-args  := \
+        $(if $(CONFIG_INITRAMFS_USE_GZIP), -z) \
         $(if $(CONFIG_INITRAMFS_ROOT_UID), -u $(CONFIG_INITRAMFS_ROOT_UID)) \
         $(if $(CONFIG_INITRAMFS_ROOT_GID), -g $(CONFIG_INITRAMFS_ROOT_GID))
 
@@ -49,4 +50,3 @@
 $(obj)/initramfs_data.cpio.gz: $(obj)/gen_init_cpio $(deps_initramfs) klibcdirs
 	$(Q)$(initramfs) -l $(ramfs-input) > $(obj)/.initramfs_data.cpio.gz.d
 	$(call if_changed,initfs)
-
diff -Nruw linux-2.6.20.14-fbx/arch/arm/mach-mv88fxx81./addrmap.c linux-2.6.20.14-fbx/arch/arm/mach-mv88fxx81/addrmap.c
--- linux-2.6.20.14-fbx/arch/arm/mach-mv88fxx81./addrmap.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.20.14-fbx/arch/arm/mach-mv88fxx81/addrmap.c	2010-12-29 19:30:05.221437474 +0100
@@ -0,0 +1,757 @@
+
+#include <linux/proc_fs.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/pci.h>
+
+#include <asm/arch/io.h>
+#include <asm/arch/regs.h>
+#include "common.h"
+
+/*
+ * target id/attr/name
+ */
+enum e_target {
+	E_SDRAM_CS0 = 0,
+	E_SDRAM_CS1,
+	E_SDRAM_CS2,
+	E_SDRAM_CS3,
+	E_DEVICE_CS0,
+	E_DEVICE_CS1,
+	E_DEVICE_CS2,
+	E_FLASH_BOOT_CS,
+	E_PCI_IO,
+	E_PCI_MEMORY,
+	E_PCI_EXPRESS_IO,
+	E_PCI_EXPRESS_MEMORY,
+	E_SECURITY_ACCELERATOR,
+	E_LAST_TARGET,
+};
+
+static const struct target {
+	u8 target;
+	u8 attr;
+	char *name;
+} targets[] = {
+	[E_SDRAM_CS0] = { 0x00, 0x0e, "DDR SDRAM CS0" },
+	[E_SDRAM_CS1] = { 0x00, 0x0d, "DDR SDRAM CS1" },
+	[E_SDRAM_CS2] = { 0x00, 0x0b, "DDR SDRAM CS2" },
+	[E_SDRAM_CS3] = { 0x00, 0x07, "DDR SDRAM CS3" },
+	[E_DEVICE_CS0] = { 0x01, 0x1e, "Device CS0" },
+	[E_DEVICE_CS1] = { 0x01, 0x1d, "Device CS1" },
+	[E_DEVICE_CS2] = { 0x01, 0x1b, "Device CS2" },
+	[E_FLASH_BOOT_CS] = { 0x01, 0x0f, "Flash boot CS" },
+	[E_PCI_IO] = { 0x03, 0x51, "PCI I/O" },
+	[E_PCI_MEMORY] = { 0x03, 0x59, "PCI Memory" },
+	[E_PCI_EXPRESS_IO] = { 0x04, 0x51, "PCI Express I/O" },
+	[E_PCI_EXPRESS_MEMORY] = { 0x04, 0x59, "PCI Express Memory" },
+	[E_SECURITY_ACCELERATOR] = { 0x09, 0x00, "Security Accelerator" },
+};
+
+static const struct target *find_target_by_id(u8 target, u8 attr)
+{
+	int i;
+
+	for (i = 0; i < E_LAST_TARGET; i++) {
+		if (targets[i].target == target &&
+		    targets[i].attr == attr)
+			return &targets[i];
+	}
+	return NULL;
+}
+
+struct window
+{
+	unsigned int base;
+	unsigned int size;
+};
+
+/* handy macro to fetch/write window size */
+#define TO_CPU_SIZE(x, shift)	((((x) >> (shift)) + 1) << (shift))
+#define TO_HW_SIZE(x, shift)	((((x - 1) >> (shift))) << (shift))
+
+/*
+ * set cpu window to given target
+ */
+static void mv_addrmap_cpuwin_set(int win_idx, enum e_target target_id,
+				  unsigned int base, unsigned int size)
+{
+	const struct target *t;
+	unsigned int val;
+
+	mv_writel(base & MV_CPU_WIN_BASE_MASK, MV_CPU_WIN_BASE_REG(win_idx));
+	t = &targets[target_id];
+	val = TO_HW_SIZE(size, MV_CPU_WIN_SIZE_SHIFT);
+	val |= t->target << MV_CPU_WIN_TARGET_SHIFT;
+	val |= t->attr << MV_CPU_WIN_ATTR_SHIFT;
+	val |= MV_CPU_WIN_ENABLED;
+	mv_writel(val, MV_CPU_WIN_CTL_REG(win_idx));
+}
+
+/*
+ * dump all cpu window content
+ */
+static int mv_addrmap_cpu_dump(char *page, char **start, off_t offset,
+			       int count, int *eof, void *data)
+{
+	int i, len;
+
+	if (offset != 0)
+		return 0;
+
+	len = 0;
+	for (i = 0; i < 8; i++) {
+		const struct target *t;
+		unsigned int ctl, base, target, attr, size;
+
+		base = mv_readl(MV_CPU_WIN_BASE_REG(i));
+		base &= MV_CPU_WIN_BASE_MASK;
+
+		ctl = mv_readl(MV_CPU_WIN_CTL_REG(i));
+		size = TO_CPU_SIZE(ctl, MV_CPU_WIN_SIZE_SHIFT);
+		target = (ctl & MV_CPU_WIN_TARGET_MASK) >>
+			MV_CPU_WIN_TARGET_SHIFT;
+		attr = (ctl & MV_CPU_WIN_ATTR_MASK) >>
+			MV_CPU_WIN_ATTR_SHIFT;
+
+		t = find_target_by_id(target, attr);
+
+		len += sprintf(page + len,
+			       "# Window %d:\n"
+			       " - Base: 0x%08x - Size: %d kB\n"
+			       " - Enabled: %s\n"
+			       " - Write Protect: %s\n"
+			       " - Target: 0x%01x/0x%02x (%s)\n",
+			       i,
+			       base, size / 1024,
+			       (ctl & MV_CPU_WIN_ENABLED) ? "Yes" : "No",
+			       (ctl & MV_CPU_WIN_WRPROTECT) ? "Yes" : "No",
+			       target, attr,
+			       t ? t->name : "unknown");
+
+		if (i == 0 || i == 1) {
+			unsigned int remap;
+
+			remap = mv_readl(MV_CPU_WIN_REMAPL_REG(i));
+			len += sprintf(page + len,
+				       " - Remap: 0x%08x\n", remap);
+		}
+	}
+
+	len += sprintf(page + len,
+		       "# Internal reg:\n"
+		       " - Base: %08x - Size: 1024 kB\n",
+		       mv_readl(MV_CPU_IREG_ADDR_REG));
+
+	*eof = 1;
+	return len;
+}
+
+/*
+ * dump all dram window content
+ */
+static int mv_addrmap_dram_dump(char *page, char **start, off_t offset,
+				int count, int *eof, void *data)
+{
+	int i, len;
+
+	if (offset != 0)
+		return 0;
+
+	len = 0;
+	for (i = 0; i < 4; i++) {
+		unsigned int ctl, base, size;
+
+		base = mv_readl(MV_DRAM_CS_BASE_REG(i));
+		base &= MV_DRAM_CS_BASE_MASK;
+
+		ctl = mv_readl(MV_DRAM_CS_SIZE_REG(i));
+		size = TO_CPU_SIZE(ctl, MV_DRAM_CS_SIZE_SHIFT);
+
+		len += sprintf(page + len,
+			       "# %s:\n"
+			       " - Base: 0x%08x - Size: %d kB\n"
+			       " - Enabled: %s\n",
+			       targets[E_SDRAM_CS0 + i].name,
+			       base, size / 1024,
+			       (ctl & MV_DRAM_CS_SIZE_ENABLED) ? "Yes" : "No");
+	}
+
+	*eof = 1;
+	return len;
+}
+
+
+#if defined(CONFIG_MV88FXX81_ETH) || defined(CONFIG_MV88FXX81_ETH_MODULE)
+/*
+ * set eth window to given target with RW access
+ */
+static void mv_addrmap_ethwin_set(int win_idx, enum e_target target_id,
+				  unsigned int base, unsigned int size)
+{
+	const struct target *t;
+	unsigned int val;
+
+	t = &targets[target_id];
+	val = (base & MV_ETH_WIN_BASE_MASK);
+	val |= t->target << MV_ETH_WIN_TARGET_SHIFT;
+	val |= t->attr << MV_ETH_WIN_ATTR_SHIFT;
+	mv_writel(val, MV_ETH_WIN_BASE_REG(win_idx));
+
+	val = TO_HW_SIZE(size, MV_ETH_WIN_SIZE_SHIFT);
+	val &= MV_ETH_WIN_SIZE_MASK;
+	mv_writel(val, MV_ETH_WIN_SIZE_REG(win_idx));
+
+	val = mv_readl(MV_ETH_WIN_BARE_REG);
+	val &= ~MV_ETH_WIN_BARE_WIN_ENABLED(win_idx);
+	mv_writel(val, MV_ETH_WIN_BARE_REG);
+
+	val = mv_readl(MV_ETH_WIN_EPAP_REG);
+	val |= MV_ETH_WIN_EPAP_RW << MV_ETH_WIN_EPAP_WINSHIFT(win_idx);
+	mv_writel(val, MV_ETH_WIN_EPAP_REG);
+}
+
+static const char *eth_epap_to_name[] = {
+	[MV_ETH_WIN_EPAP_NOACCESS] = "No access",
+	[MV_ETH_WIN_EPAP_RO] = "Read Only",
+	[MV_ETH_WIN_EPAP_RESERVED] = "Reserved",
+	[MV_ETH_WIN_EPAP_RW] = "Read Write",
+};
+
+/*
+ * dump all gigabit ethernet window content
+ */
+static int mv_addrmap_eth_dump(char *page, char **start, off_t offset,
+			       int count, int *eof, void *data)
+{
+	unsigned int bare, epap;
+	int i, len;
+
+	if (offset != 0)
+		return 0;
+
+	bare = mv_readl(MV_ETH_WIN_BARE_REG);
+	epap = mv_readl(MV_ETH_WIN_EPAP_REG);
+
+	len = 0;
+	for (i = 0; i < 6; i++) {
+		const struct target *t;
+		unsigned int base, target, attr, size, access;
+
+		base = mv_readl(MV_ETH_WIN_BASE_REG(i));
+		target = (base & MV_ETH_WIN_TARGET_MASK) >>
+			MV_ETH_WIN_TARGET_SHIFT;
+		attr = (base & MV_CPU_WIN_ATTR_MASK) >>
+			MV_CPU_WIN_ATTR_SHIFT;
+		base &= MV_ETH_WIN_BASE_MASK;
+
+		size = mv_readl(MV_ETH_WIN_SIZE_REG(i));
+		size = TO_CPU_SIZE(size, MV_ETH_WIN_SIZE_SHIFT);
+
+		access = (epap & MV_ETH_WIN_EPAP_WINMASK(i)) >>
+			MV_ETH_WIN_EPAP_WINSHIFT(i);
+
+		t = find_target_by_id(target, attr);
+
+		len += sprintf(page + len,
+			       "# Window %d:\n"
+			       " - Base: 0x%08x - Size: %d kB\n"
+			       " - Enabled: %s\n"
+			       " - Access type: %s\n"
+			       " - Target: 0x%01x/0x%02x (%s)\n",
+			       i,
+			       base, size / 1024,
+			       (bare & MV_ETH_WIN_BARE_WIN_ENABLED(i)) ?
+			       "No" : "Yes",
+			       eth_epap_to_name[access],
+			       target, attr,
+			       t ? t->name : "unknown");
+
+		if (i < 4) {
+			unsigned int remap;
+
+			remap = mv_readl(MV_ETH_WIN_REMAP_REG(i));
+			len += sprintf(page + len,
+				       " - High Remap: 0x%08x\n", remap);
+		}
+	}
+
+	return len;
+}
+#endif /* ETH */
+
+#if defined(CONFIG_MV88FXX81_DMAMUX) || defined(CONFIG_MV88FXX81_DMAMUX_MODULE)
+/*
+ * set idma window to given target with RW access for all chan
+ */
+static void mv_addrmap_idmawin_set(int win_idx, enum e_target target_id,
+				   unsigned int base, unsigned int size)
+{
+	const struct target *t;
+	unsigned int val;
+	int i;
+
+	t = &targets[target_id];
+	val = (base & MV_IDMA_WIN_BASE_MASK);
+	val |= t->target << MV_IDMA_WIN_TARGET_SHIFT;
+	val |= t->attr << MV_IDMA_WIN_ATTR_SHIFT;
+	mv_writel(val, MV_IDMA_WIN_BASE_REG(win_idx));
+
+	val = TO_HW_SIZE(size, MV_IDMA_WIN_SIZE_SHIFT);
+	val &= MV_IDMA_WIN_SIZE_MASK;
+	mv_writel(val, MV_IDMA_WIN_SIZE_REG(win_idx));
+
+	val = mv_readl(MV_IDMA_WIN_BARE_REG);
+	val &= ~MV_IDMA_WIN_BARE_WIN_ENABLED(win_idx);
+	mv_writel(val, MV_IDMA_WIN_BARE_REG);
+
+	val = mv_readl(MV_IDMA_WIN_CHAP_REG(win_idx));
+	/* give full access to 4 DMA channels to this window */
+	for (i = 0; i < 4; i++)
+		val |= MV_IDMA_WIN_CHAP_RW << MV_IDMA_WIN_CHAP_CHANSHIFT(i);
+	mv_writel(val, MV_IDMA_WIN_CHAP_REG(win_idx));
+}
+
+static const char *idma_chap_to_name[] = {
+	[MV_IDMA_WIN_CHAP_NOACCESS] = "No access",
+	[MV_IDMA_WIN_CHAP_RO] = "Read Only",
+	[MV_IDMA_WIN_CHAP_RESERVED] = "Reserved",
+	[MV_IDMA_WIN_CHAP_RW] = "Read Write",
+};
+
+/*
+ * dump all idma window content
+ */
+static int mv_addrmap_idma_dump(char *page, char **start, off_t offset,
+				int count, int *eof, void *data)
+{
+	unsigned int bare;
+	int i, len;
+
+	if (offset != 0)
+		return 0;
+
+	bare = mv_readl(MV_IDMA_WIN_BARE_REG);
+
+	len = 0;
+	for (i = 0; i < 8; i++) {
+		const struct target *t;
+		unsigned int base, target, attr, size, access, epap;
+		int j;
+
+		base = mv_readl(MV_IDMA_WIN_BASE_REG(i));
+		target = (base & MV_IDMA_WIN_TARGET_MASK) >>
+			MV_IDMA_WIN_TARGET_SHIFT;
+		attr = (base & MV_IDMA_WIN_ATTR_MASK) >>
+			MV_IDMA_WIN_ATTR_SHIFT;
+		base &= MV_IDMA_WIN_BASE_MASK;
+
+		size = mv_readl(MV_IDMA_WIN_SIZE_REG(i));
+		size = TO_CPU_SIZE(size, MV_IDMA_WIN_SIZE_SHIFT);
+
+
+		t = find_target_by_id(target, attr);
+
+		len += sprintf(page + len,
+			       "# Window %d:\n"
+			       " - Base: 0x%08x - Size: %d kB\n"
+			       " - Enabled: %s\n"
+			       " - Access: ",
+			       i,
+			       base, size / 1024,
+			       (bare & MV_IDMA_WIN_BARE_WIN_ENABLED(i)) ?
+			       "No" : "Yes");
+
+		for (j = 0; j < 4; j++) {
+			epap = mv_readl(MV_IDMA_WIN_CHAP_REG(j));
+			access = (epap & MV_IDMA_WIN_CHAP_CHANMASK(i)) >>
+				MV_IDMA_WIN_CHAP_CHANSHIFT(i);
+
+			len += sprintf(page + len, "C%d:[%s] ",
+				       j, idma_chap_to_name[access]);
+		}
+
+		len += sprintf(page + len,
+			       "\n - Target: 0x%01x/0x%02x (%s)\n",
+			       target, attr, t ? t->name : "unknown");
+
+		if (i < 4) {
+			unsigned int remap;
+
+			remap = mv_readl(MV_IDMA_WIN_REMAP_REG(i));
+			len += sprintf(page + len,
+				       " - High Remap: 0x%08x\n", remap);
+		}
+	}
+
+	return len;
+}
+#endif /* DMA */
+
+#ifdef CONFIG_PCI
+static const struct {
+	unsigned int bare_bit;
+	unsigned long pci_func;
+	unsigned long pci_base_reg;
+	unsigned long remap_reg;
+	unsigned long size_reg;
+	char *name;
+	int sdram_bank_number;
+} pci_wins[] = {
+
+	{ MV_PCI_CS0_BAR_DISABLE, 0, PCI_BASE_ADDRESS_0,
+	  MV_PCI_CS0_BAR_REMAP_REG, MV_PCI_CS0_BAR_SIZE_REG,
+	  "SDRAM CS0", 0 },
+
+	{ MV_PCI_CS1_BAR_DISABLE, 0, PCI_BASE_ADDRESS_2,
+	  MV_PCI_CS1_BAR_REMAP_REG, MV_PCI_CS1_BAR_SIZE_REG,
+	  "SDRAM CS1", 1 },
+
+	{ MV_PCI_CS2_BAR_DISABLE, 1, PCI_BASE_ADDRESS_0,
+	  MV_PCI_CS2_BAR_REMAP_REG, MV_PCI_CS2_BAR_SIZE_REG,
+	  "SDRAM CS2", 2 },
+
+	{ MV_PCI_CS3_BAR_DISABLE, 1, PCI_BASE_ADDRESS_2,
+	  MV_PCI_CS3_BAR_REMAP_REG, MV_PCI_CS3_BAR_SIZE_REG,
+	  "SDRAM CS3", 3 },
+
+	{ MV_PCI_DEV_CS0_BAR_DISABLE, 2, PCI_BASE_ADDRESS_0,
+	  MV_PCI_DEV_CS0_BAR_REMAP_REG, MV_PCI_DEV_CS0_BAR_SIZE_REG,
+	  "DEVICE CS0", -1 },
+
+	{ MV_PCI_DEV_CS1_BAR_DISABLE, 2, PCI_BASE_ADDRESS_2,
+	  MV_PCI_DEV_CS1_BAR_REMAP_REG, MV_PCI_DEV_CS1_BAR_SIZE_REG,
+	  "DEVICE CS1", -1 },
+
+	{ MV_PCI_DEV_CS2_BAR_DISABLE, 2, PCI_BASE_ADDRESS_4,
+	  MV_PCI_DEV_CS2_BAR_REMAP_REG, MV_PCI_DEV_CS2_BAR_SIZE_REG,
+	  "DEVICE CS2", -1 },
+
+	{ MV_PCI_BOOT_CS_BAR_DISABLE, 3, PCI_BASE_ADDRESS_0,
+	  MV_PCI_BOOT_CS_BAR_REMAP_REG, MV_PCI_BOOT_CS_BAR_SIZE_REG,
+	  "BOOT CS", -1 },
+
+	{ MV_PCI_P2P_MEM0_BAR_DISABLE, 4, PCI_BASE_ADDRESS_0,
+	  MV_PCI_P2P_MEM0_BAR_REMAP_REG, MV_PCI_P2P_MEM0_BAR_SIZE_REG,
+	  "P2P MEM0", -1 },
+
+	{ MV_PCI_P2P_IO_BAR_DISABLE, 4, PCI_BASE_ADDRESS_4,
+	  MV_PCI_P2P_IO_BAR_REMAP_REG, MV_PCI_P2P_IO_BAR_SIZE_REG,
+	  "P2P IO", -1 },
+
+	{ 0, 0, 0, 0, 0, NULL, -1 },
+};
+
+/*
+ * dump all pci window content
+ */
+static int mv_addrmap_pci_dump(char *page, char **start, off_t offset,
+			       int count, int *eof, void *data)
+{
+	unsigned int bare;
+	int i, len;
+
+	if (offset != 0)
+		return 0;
+
+	len = 0;
+	bare = mv_readl(MV_PCI_BAR_ENABLE_REG);
+
+	for (i = 0; pci_wins[i].name; i++) {
+		unsigned int base, remap, size;
+
+		/* base is fetched from pci config reg */
+		mv_pci_local_read_config(pci_wins[i].pci_func,
+					 pci_wins[i].pci_base_reg,
+					 4, &base);
+		base &= ~(0xfff);
+		remap = mv_readl(pci_wins[i].remap_reg);
+		size = mv_readl(pci_wins[i].size_reg);
+		size = TO_CPU_SIZE(size, MV_PCI_BAR_SIZE_SHIFT);
+
+		len += sprintf(page + len,
+			       "# %s:\n"
+			       " - Base at : 0x%08x - Size: %d kB\n"
+			       " - Remap at: 0x%08x"
+			       " - Enabled: %s\n",
+			       pci_wins[i].name,
+			       base, size / 1024, remap,
+			       (bare & pci_wins[i].bare_bit) ?  "No" : "Yes");
+	}
+
+	return len;
+}
+
+/*
+ * Called by PCI setup code  to setup slave decoding. As configuration
+ * cycle are needed, pci read/write callback are given.
+ */
+void __init mv_common_addrmap_pci_init(void)
+{
+	const struct target *t;
+	u32 val;
+	int i;
+
+	/* unmap all pci windows and disable all bars */
+	mv_writel(MV_PCI_RESERVED_BAR_ENABLE, MV_PCI_BAR_ENABLE_REG);
+
+	mv_writel(0, MV_PCI_CS0_BAR_SIZE_REG);
+	mv_writel(0, MV_PCI_CS1_BAR_SIZE_REG);
+	mv_writel(0, MV_PCI_CS2_BAR_SIZE_REG);
+	mv_writel(0, MV_PCI_CS3_BAR_SIZE_REG);
+	mv_writel(0, MV_PCI_DEV_CS0_BAR_SIZE_REG);
+	mv_writel(0, MV_PCI_DEV_CS1_BAR_SIZE_REG);
+	mv_writel(0, MV_PCI_DEV_CS2_BAR_SIZE_REG);
+	mv_writel(0, MV_PCI_BOOT_CS_BAR_SIZE_REG);
+	mv_writel(0, MV_PCI_P2P_MEM0_BAR_SIZE_REG);
+	mv_writel(0, MV_PCI_P2P_IO_BAR_SIZE_REG);
+	mv_writel(0, MV_PCI_EXP_ROM_BAR_SIZE_REG);
+
+	mv_writel(0, MV_PCI_CS0_BAR_REMAP_REG);
+	mv_writel(0, MV_PCI_CS1_BAR_REMAP_REG);
+	mv_writel(0, MV_PCI_CS2_BAR_REMAP_REG);
+	mv_writel(0, MV_PCI_CS3_BAR_REMAP_REG);
+	mv_writel(0, MV_PCI_DEV_CS0_BAR_REMAP_REG);
+	mv_writel(0, MV_PCI_DEV_CS1_BAR_REMAP_REG);
+	mv_writel(0, MV_PCI_DEV_CS2_BAR_REMAP_REG);
+	mv_writel(0, MV_PCI_BOOT_CS_BAR_REMAP_REG);
+	mv_writel(0, MV_PCI_P2P_MEM0_BAR_REMAP_REG);
+	mv_writel(0, MV_PCI_P2P_IO_BAR_REMAP_REG);
+	mv_writel(0, MV_PCI_EXP_ROM_BAR_REMAP_REG);
+
+	/* map PCI  mem & PCI io space  from CPU, we use  window 0 for
+	 * PCI mem since it's the only window that has a remap, and we
+	 * may  want to  use this  later. Let  window 1  free  for PCI
+	 * express if we want to use it one day */
+	val = PCI1_MEM_BASE & MV_CPU_WIN_BASE_MASK;
+	mv_writel(val, MV_CPU_WIN_BASE_REG(0));
+	mv_writel(val, MV_CPU_WIN_REMAPL_REG(0));
+
+	t = &targets[E_PCI_MEMORY];
+	val = TO_HW_SIZE(PCI1_MEM_SIZE, MV_CPU_WIN_SIZE_SHIFT);
+	val |= t->target << MV_CPU_WIN_TARGET_SHIFT;
+	val |= t->attr << MV_CPU_WIN_ATTR_SHIFT;
+	val |= MV_CPU_WIN_ENABLED;
+	mv_writel(val, MV_CPU_WIN_CTL_REG(0));
+
+	val = PCI1_IO_BASE & MV_CPU_WIN_BASE_MASK;
+	mv_writel(val, MV_CPU_WIN_BASE_REG(2));
+
+	t = &targets[E_PCI_IO];
+	val = TO_HW_SIZE(PCI1_IO_SIZE, MV_CPU_WIN_SIZE_SHIFT);
+	val |= t->target << MV_CPU_WIN_TARGET_SHIFT;
+	val |= t->attr << MV_CPU_WIN_ATTR_SHIFT;
+	val |= MV_CPU_WIN_ENABLED;
+	mv_writel(val, MV_CPU_WIN_CTL_REG(2));
+
+	/* map each DRAM 'n' BAR to chip select 'n' */
+	val = (MV_PCI_DB_SELECT_CS0 << MV_PCI_DB_SELECT_DB0_SHIFT) |
+		(MV_PCI_DB_SELECT_CS1 << MV_PCI_DB_SELECT_DB1_SHIFT) |
+		(MV_PCI_DB_SELECT_CS2 << MV_PCI_DB_SELECT_DB2_SHIFT) |
+		(MV_PCI_DB_SELECT_CS3 << MV_PCI_DB_SELECT_DB3_SHIFT);
+	mv_writel(val, MV_PCI_DB_SELECT_REG);
+
+	/* fetch dram mapping and enable needed dram BARs to match
+	 * dram mapping */
+	for (i = 0; i < 4; i++) {
+		int j;
+		u32 dram_base, pci_base, size, ctl, val;
+
+		ctl = mv_readl(MV_DRAM_CS_SIZE_REG(i));
+		/* skip disabled DRAM windows */
+		if (!(ctl & MV_DRAM_CS_SIZE_ENABLED))
+			continue;
+
+		/* find corresponding PCI bar */
+		for (j = 0; pci_wins[j].name; j++) {
+			if (pci_wins[j].sdram_bank_number == i)
+				break;
+		}
+
+		if (!pci_wins[j].name) {
+			/* not found, should not happen */
+			continue;
+		}
+
+		/* set BAR size */
+		size = TO_CPU_SIZE(ctl, MV_DRAM_CS_SIZE_SHIFT);
+		size = TO_HW_SIZE(size, MV_PCI_BAR_SIZE_SHIFT);
+		mv_writel(size, pci_wins[j].size_reg);
+
+		/* set BAR base */
+		dram_base = mv_readl(MV_DRAM_CS_BASE_REG(i));
+		dram_base &= MV_DRAM_CS_BASE_MASK;
+		mv_pci_local_read_config(pci_wins[j].pci_func,
+					 pci_wins[j].pci_base_reg,
+					 4, &pci_base);
+		pci_base &= (0xfff);
+		pci_base |= (dram_base & ~(0xfff));
+		mv_pci_local_write_config(pci_wins[j].pci_func,
+					  pci_wins[j].pci_base_reg,
+					  4, pci_base);
+
+		/* enable BAR */
+		val = mv_readl(MV_PCI_BAR_ENABLE_REG);
+		val &= ~(pci_wins[j].bare_bit);
+		mv_writel(val, MV_PCI_BAR_ENABLE_REG);
+	}
+}
+#endif /* CONFIG_PCI */
+
+/*
+ * add procfs entry to dump cpu address map
+ */
+static const struct {
+	char *name;
+	int (*cb)(char *, char **, off_t, int, int *, void *);
+} proc_entries[] = {
+	{ "cpu_addrmap", mv_addrmap_cpu_dump },
+	{ "dram_addrmap", mv_addrmap_dram_dump },
+#if defined(CONFIG_MV88FXX81_ETH) || defined(CONFIG_MV88FXX81_ETH_MODULE)
+	{ "eth_addrmap", mv_addrmap_eth_dump },
+#endif
+#if defined(CONFIG_MV88FXX81_DMAMUX) || defined(CONFIG_MV88FXX81_DMAMUX_MODULE)
+	{ "idma_addrmap", mv_addrmap_idma_dump },
+#endif
+#ifdef CONFIG_PCI
+	{ "pci_addrmap", mv_addrmap_pci_dump },
+#endif
+	{ NULL },
+};
+
+static __init int mv_addrmap_proc_init(void)
+{
+	struct proc_dir_entry *dir_proc;
+	int i;
+
+	/* create directory */
+        dir_proc = create_proc_entry("mv88fxx81", S_IFDIR | S_IRUGO, NULL);
+        if (!dir_proc){
+                printk(KERN_ERR "Can't create mv88fxx81 proc dir\n");
+                return 1;
+        }
+
+	for (i = 0; proc_entries[i].name; i++) {
+		struct proc_dir_entry *entry;
+
+		entry = create_proc_read_entry(proc_entries[i].name,
+					       S_IFREG | S_IRUGO,
+					       dir_proc, proc_entries[i].cb,
+					       NULL);
+		if (!entry) {
+			printk(KERN_ERR "Can't create proc %s entry\n",
+			       proc_entries[i].name);
+		}
+	}
+
+        return 0;
+}
+
+/*
+ * Called early  at boot to set  all chip address  map. Unset anything
+ * previously set  by bios/bootloader, and make a  minimal address map
+ * for all mv88fxx81 devices that linux know about.
+ *
+ * PCI express, USB and TDM windows are not configured.
+ */
+void __init mv_common_addrmap_init(void)
+{
+	struct window dram_map[4];
+	u32 val;
+	int i, win;
+
+	/* unmap all CPU windows besides internal reg */
+	for (i = 0; i < 8; i++) {
+		mv_writel(0, MV_CPU_WIN_CTL_REG(i));
+		mv_writel(0, MV_CPU_WIN_BASE_REG(i));
+		if (i == 0 || i == 1) {
+			mv_writel(0, MV_CPU_WIN_REMAPL_REG(i));
+			mv_writel(0, MV_CPU_WIN_REMAPH_REG(i));
+		}
+	}
+
+	/* unmap all gigabit ethernet windows */
+	mv_writel(0x3f, MV_ETH_WIN_BARE_REG);
+	mv_writel(0x0, MV_ETH_WIN_EPAP_REG);
+	for (i = 0; i < 6; i++) {
+		mv_writel(0, MV_ETH_WIN_BASE_REG(i));
+		mv_writel(0, MV_ETH_WIN_SIZE_REG(i));
+		if (i < 4)
+			mv_writel(0, MV_ETH_WIN_REMAP_REG(i));
+	}
+
+	/* unmap all idma windows */
+	mv_writel(0xff, MV_IDMA_WIN_BARE_REG);
+	for (i = 0; i < 8; i++) {
+		mv_writel(0, MV_IDMA_WIN_BASE_REG(i));
+		mv_writel(0, MV_IDMA_WIN_SIZE_REG(i));
+		if (i < 4)
+			mv_writel(0, MV_IDMA_WIN_REMAP_REG(i));
+	}
+	for (i = 0; i < 4; i++)
+		mv_writel(0x0, MV_IDMA_WIN_CHAP_REG(i));
+
+
+	/* fetch dram mapping (set by bootloader) */
+	for (i = 0; i < 4; i++) {
+		u32 ctl;
+
+		dram_map[i].base = mv_readl(MV_DRAM_CS_BASE_REG(i));
+		dram_map[i].base &= MV_DRAM_CS_BASE_MASK;
+
+		ctl = mv_readl(MV_DRAM_CS_SIZE_REG(i));
+		if (!(ctl & MV_DRAM_CS_SIZE_ENABLED)) {
+			dram_map[i].size = 0;
+			continue;
+		}
+		dram_map[i].size = TO_CPU_SIZE(ctl, MV_DRAM_CS_SIZE_SHIFT);
+	}
+
+#if defined(CONFIG_MV88FXX81_ETH) || defined(CONFIG_MV88FXX81_ETH_MODULE)
+	/* configure gigabit ethernet default target & attr in case of
+	 * address decode error */
+	val = (targets[E_SDRAM_CS0].target << MV_ETH_UNIT_EUDID_TARGET_SHIFT) |
+		(targets[E_SDRAM_CS0].attr << MV_ETH_UNIT_EUDID_ATTR_SHIFT);
+	mv_writel(val, MV_ETH_UNIT_EUDID_REG);
+
+	/* give gigabit ethernet full access to sdram */
+	for (i = win = 0; i < 4; i++) {
+		if (!dram_map[i].size)
+			continue;
+
+		mv_addrmap_ethwin_set(win++, E_SDRAM_CS0 + i,
+				      dram_map[i].base, dram_map[i].size);
+	}
+#endif
+
+#if defined(CONFIG_MV88FXX81_DMAMUX) || defined(CONFIG_MV88FXX81_DMAMUX_MODULE)
+	/* give idma full access to sdram */
+	for (i = win = 0; i < 4; i++) {
+		if (!dram_map[i].size)
+			continue;
+
+		mv_addrmap_idmawin_set(win++, E_SDRAM_CS0 + i,
+				       dram_map[i].base, dram_map[i].size);
+	}
+
+	/* give idma acces to flash */
+	mv_addrmap_idmawin_set(win++, E_FLASH_BOOT_CS, FLASH_BASE, FLASH_SIZE);
+
+#ifdef CONFIG_PCI
+	/* give idma acces to PCI memory */
+	mv_addrmap_idmawin_set(win++, E_PCI_MEMORY,
+			       PCI1_MEM_BASE, PCI1_MEM_SIZE);
+#endif
+
+#endif
+	/* map flash from CPU */
+	mv_addrmap_cpuwin_set(4, E_FLASH_BOOT_CS, FLASH_BASE, FLASH_SIZE);
+
+	/* map devCS0 from CPU */
+	mv_addrmap_cpuwin_set(5, E_DEVICE_CS0, DEVCS0_BASE, DEVCS0_SIZE);
+
+	/* map devCS1 from CPU */
+	mv_addrmap_cpuwin_set(6, E_DEVICE_CS1, DEVCS1_BASE, DEVCS1_SIZE);
+}
+
+__initcall(mv_addrmap_proc_init);
diff -Nruw linux-2.6.20.14-fbx/arch/arm/mach-mv88fxx81./board-fbxo1_a.c linux-2.6.20.14-fbx/arch/arm/mach-mv88fxx81/board-fbxo1_a.c
--- linux-2.6.20.14-fbx/arch/arm/mach-mv88fxx81./board-fbxo1_a.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.20.14-fbx/arch/arm/mach-mv88fxx81/board-fbxo1_a.c	2010-12-29 19:30:05.221437474 +0100
@@ -0,0 +1,510 @@
+/*
+ * board-fbx6-a.c for linux-freebox
+ * Created by <nschichan@freebox.fr> on Fri Dec 29 18:58:59 2006
+ * Freebox SA
+ */
+
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/time.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/bootmem.h>
+#include <linux/random.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+
+#include <asm/sizes.h>
+#include <asm/io.h>
+#include <asm/setup.h>
+
+#include <asm/mach-types.h>
+#include <asm/arch/io.h>
+#include <asm/arch/gpio.h>
+#include <asm/arch/timer.h>
+#include <asm/mach/time.h>
+#include <asm/mach/arch.h>
+#include <asm/arch/mux.h>
+#include <asm/arch/tdm.h>
+#include <asm/arch/irqs.h>
+
+#include <linux/fbxmtd.h>
+#include <linux/fbxserial.h>
+#include <linux/fbxserialinfo.h>
+#include <linux/fbxspi.h>
+#include <linux/fbxgpio_core.h>
+#include <linux/input.h>
+#include <linux/crash_zone.h>
+
+#include "common.h"
+
+char loader_version_str[128] = { 0 };
+EXPORT_SYMBOL(loader_version_str);
+
+static struct mv_gpio_data __initdata fbxo1a_gpio_data =
+{
+	.direction	= 0x99fffbaf,
+	.polarity	= 0x08000800,
+	.dataout	= 0x06000010,
+};
+
+static struct mv_mux_data __initdata fbxo1a_mux_data =
+{
+	.mpp_0_7	= 0x00000022,
+	.mpp_8_15	= 0x11110010,
+	.mpp_16_19	= 0x00001111,
+	.dev_mux	= 0xfff00000,
+};
+
+static struct mv_tdm_data __initdata fbxo1a_tdm_data =
+{
+	.tdm_dev_mux	= 0x000f0000,
+};
+
+/*
+ * MTD mapping stuff
+ */
+#define NVRAM_SIZE	SZ_64K
+#define BANK0_SIZE	(SZ_1K * 640)
+#define SERIAL_SIZE	SZ_8K
+
+
+#define BANK0_OFFSET	(0)
+#define NVRAM_OFFSET	(BANK0_SIZE)
+#define BANK1_OFFSET	(NVRAM_SIZE + BANK0_SIZE)
+
+
+#ifdef CONFIG_FBXO1_A_FBXMTD_MAP_UBOOT
+# define LOADER_SIZE	(SZ_128K + SZ_64K + SZ_32K)
+# define LOADER_ROFFSET	(SZ_256K)
+# define LOADER_NAME	"u-boot"
+#else
+# define LOADER_SIZE	(SZ_32K + SZ_16K + SZ_8K) /* 56 K */
+# define LOADER_ROFFSET (SZ_64K)
+# define LOADER_NAME	"ipl"
+#endif
+
+
+#define NEWBANK0_ROFFSET	(BANK0_SIZE + LOADER_ROFFSET)
+#define SERIAL_ROFFSET	(SZ_8K)
+
+#define SERIAL_ADDR	(0xffffffff - SERIAL_ROFFSET + 1)
+
+#define RESET_VECTOR_ADDR	(0xffff0000)
+
+/*
+ * please note that partitions must be given in with the lower offset
+ * partition first (or higher roffset).
+ */
+
+#define PART_ALL	0
+#define PART_BANK0	1
+#define PART_NVRAM	2
+#define PART_BANK1	3
+#define PART_LOADER	4
+#define PART_SERIAL	5
+
+static struct fbxmtd_platform_part fbxo1a_mtd_parts[] =
+{
+	{
+		.name		= "all",
+		.offset		= 0x0,
+		.size		= 0x0,
+		.flags		= FBXMTD_PART_MAP_ALL,
+	},
+	{
+		.name		= "bank0",
+		.offset		= BANK0_OFFSET,
+		.size		= BANK0_SIZE,
+		.flags		= FBXMTD_PART_HAS_FS,
+	},
+	{
+		.name		= "nvram",
+		.offset		= NVRAM_OFFSET,
+		.size		= NVRAM_SIZE,
+		.flags		= FBXMTD_PART_RW,
+	},
+	{
+		.name		= "bank1",
+		.offset		= BANK1_OFFSET,
+		.size		= 0,
+		.flags		= FBXMTD_PART_HAS_FS | FBXMTD_PART_RW |
+					FBXMTD_PART_AUTOSIZE,
+		.align_part	= LOADER_NAME,
+	},
+	{
+		.name		= "new_bank0",
+		.roffset	= NEWBANK0_ROFFSET,
+		.size		= BANK0_SIZE,
+		.flags		= FBXMTD_PART_RW,
+	},
+	{
+		.name		= LOADER_NAME,
+		.roffset	= LOADER_ROFFSET,
+		.size		= LOADER_SIZE,
+		.flags		= 0,
+	},
+	{
+		.name		= "serial",
+		.roffset	= SERIAL_ROFFSET,
+		.size		= SERIAL_SIZE,
+		.flags		= 0,
+	},
+};
+
+static struct fbxmtd_platform_data fbxo1a_mtd_data =
+{
+	.name	= "flash0",
+	.base	= 0xfe000000,
+	.width	= CONFIG_FBXO1_A_FLASH_BUS_WIDTH,
+	.parts	= fbxo1a_mtd_parts,
+	.num_parts	= ARRAY_SIZE(fbxo1a_mtd_parts),
+};
+
+static struct platform_device fbxo1a_mtd_device =
+{
+	.name	= "fbxmtd_map_drv",
+	.id	= 0,
+	.dev	= {
+		.platform_data = &fbxo1a_mtd_data,
+	},
+};
+
+static void __init
+fbxo1a_map_io(void)
+{
+	mv_common_map_io();
+#ifdef CONFIG_CRASHZONE
+	reserve_bootmem(SZ_32M - SZ_4K, SZ_4K);
+#endif
+}
+
+#ifdef CONFIG_CRASHZONE
+int __init
+fbxo1_crashzone_init(void)
+{
+	void __iomem *dead_zone;
+
+	dead_zone = ioremap_nocache(SZ_32M - SZ_4K, SZ_4K);
+	if (!dead_zone) {
+		printk(KERN_ERR "unable to ioremap dead zone.\n");
+		return -ENOMEM;
+	}
+	crash_zone_set_param(dead_zone, SZ_4K);
+	return 0;
+}
+arch_initcall(fbxo1_crashzone_init);
+#endif
+
+static void __init
+fbxo1a_init_irq(void)
+{
+	mv_common_init_irq();
+	mv_gpio_init(&fbxo1a_gpio_data);
+	mv_mux_init(&fbxo1a_mux_data);
+
+	/*
+	 *	set correct flow types for external
+	 *	interrupts. smp8634 pci interruption is implemented as
+	 *	a gpio on the sigma side and the default state of the
+	 *	gpio is low, so set it up to trigger high.
+	 */
+	set_irq_type(IRQ_GPIO_27, IRQF_TRIGGER_LOW); /* minipci slot */
+	set_irq_type(IRQ_GPIO_2, IRQF_TRIGGER_FALLING); /* smp8634 pci */
+	set_irq_type(IRQ_GPIO_11, IRQF_TRIGGER_FALLING); /* slac/slic */
+}
+
+/*
+ * fbx serial stuff.
+ */
+static struct fbx_serial serial;
+static uint8_t bootldr_random_data[32];
+
+const struct fbx_serial *
+arch_get_serial(void)
+{
+	/*
+	 * patch serial structure with random data retrieved from
+	 * bootloader tag list.
+	 */
+	memcpy(serial.random_data, bootldr_random_data, 32);
+	return &serial;
+}
+EXPORT_SYMBOL(arch_get_serial);
+
+/*
+ * since the random data is store in the sigma side, it is passed to
+ * the linux kernel using the bootloader tag list.
+ */
+static int __init
+parse_tag_random_data(const struct tag *tag)
+{
+	memcpy(bootldr_random_data, &tag->u.random_data, 32);
+	return 0;
+}
+__tagtable(ATAG_RANDOM_DATA, parse_tag_random_data);
+
+/*
+ * get also sigma random seed from bootlaoder tagged list.
+ */
+static uint8_t bootldr_random_seed[4];
+static int __init
+parse_tag_random_seed(const struct tag *tag)
+{
+	memcpy(bootldr_random_seed, tag->u.random_seed.seed, 4);
+	return 0;
+}
+__tagtable(ATAG_RANDOM_SEED, parse_tag_random_seed);
+
+/*
+ * xor the random seed with the beginning of the box randomdata to
+ * have a unique seed value for each box for a given initial seed.
+ * add this seed to the entropy pool 512 times.
+ */
+static int __init
+seed_random(void)
+{
+	int i;
+
+	for (i = 0; i < 4; ++i)
+		bootldr_random_seed[i] ^= bootldr_random_data[i];
+
+	for (i = 0; i < 512; ++i)
+		add_raw_randomness(bootldr_random_seed, 4);
+
+	return 0;
+}
+late_initcall(seed_random);
+
+
+/*
+ * fbxgpio stuff.
+ */
+static struct fbxgpio_operations mv88f5181_gpio_ops =
+{
+	.get_datain = mv_get_gpio_datain,
+	.set_dataout = mv_set_gpio_dataout,
+	.get_dataout = mv_get_gpio_dataout,
+	.set_direction = mv_set_gpio_direction,
+	.get_direction = mv_get_gpio_direction,
+};
+
+static struct fbxgpio_pin fbxo1a_gpio_pins[] =
+{
+	{
+		.pin_name	= "switch-reset",
+		.direction	= GPIO_DIR_OUT,
+		.pin_num	= 6,
+	},
+
+	/* SFP gpios */
+	{
+		.pin_name	= "sfp-pwren",
+		.direction	= GPIO_DIR_OUT,
+		.pin_num	= 30,
+	},
+	{
+		.pin_name	= "sfp-pwrmon",
+		.direction	= GPIO_DIR_IN,
+		.pin_num	= 31,
+	},
+	{
+		.pin_name	= "sfp-rxloss",
+		.direction	= GPIO_DIR_IN,
+		.pin_num	= 3,
+	},
+	{
+		.pin_name	= "sfp-txdisable",
+		.direction	= GPIO_DIR_OUT,
+		.pin_num	= 26,
+	},
+	{
+		.pin_name	= "sfp-notpresent",
+		.direction	= GPIO_DIR_IN,
+		.pin_num	= 24,
+	},
+	/* SFF gpios */
+	{
+		.pin_name	= "sff-txdisable",
+		.direction	= GPIO_DIR_OUT,
+		.pin_num	= 4,
+	},
+	{
+		.pin_name	= "sff-signaldetect",
+		.direction	= GPIO_DIR_IN,
+		.pin_num	= 5,
+	},
+	/* Phone GPIOS */
+	{
+		.pin_name	= "phone-reset",
+		.direction	= GPIO_DIR_OUT,
+		.pin_num	= 10,
+	},
+	/* terminating element */
+	{  },
+};
+
+static struct platform_device mv88f5181_gpio_device =
+{
+	.name	= "fbxgpio",
+	.id	= -1,
+	.dev	= {
+		.platform_data = &fbxo1a_gpio_pins,
+	},
+};
+
+/*
+ * drive the spi chip select ourself for princeton devices since
+ * marvell spi hw cannot keep the chip select for more than 8 clock
+ * cycles.
+ *
+ * using our own GPIO we ensure that we are selecting the device all
+ * the time we need for the SPI transfer to terminate.
+ */
+static void
+pt_select_cb(struct fbxspi_device *dev, int sel)
+{
+	if (sel)
+		mv_set_gpio_dataout(25, 0);
+	else
+		mv_set_gpio_dataout(25, 1);
+}
+
+/*
+ * final device
+ */
+static struct fbxspi_device pt6959_device =
+{
+	.name		= "pt6959",
+	.cs		= 1,
+	.max_speed_hz	= 2500000,
+	.lsb_first	= 1,
+	.chip_select_cb	= pt_select_cb,
+};
+
+
+/*
+ * read serial info first as some device (eth for instance) might rely
+ * on fbxserialinfo to be there and available.
+ */
+static void __init
+fbxo1a_init(void)
+{
+	void __iomem *data;
+	void __iomem *reset_vector;
+	int i;
+
+	/* warn about the deprecated use of the old machine number */
+	if (machine_arch_type == MACH_TYPE_FBXO1_A_LEGACY) {
+		printk(KERN_WARNING "You are using the old (and deprecated) "
+		       "ARM machine number for fbxo1.\n"
+		       "You are strongly advised to update your bootloader "
+		       "with the new and definitive official machine number.\n");
+	}
+
+	/* read serial info */
+	data = ioremap(SERIAL_ADDR, SZ_2K);
+	if (data != NULL) {
+		fbxserialinfo_read(data, &serial);
+		iounmap(data);
+	} else {
+		printk(KERN_CRIT "unable to ioremap serial info.\n");
+	}
+
+	/* read uboot/ipl version at reset_vector + 4 */
+	reset_vector = ioremap(RESET_VECTOR_ADDR, SZ_1K);
+	if (reset_vector != NULL) {
+		memcpy_fromio(loader_version_str, reset_vector + 4,
+			      sizeof (loader_version_str));
+		iounmap(reset_vector);
+	}
+	if (strncmp(loader_version_str, "u-boot", 6) &&
+	    strncmp(loader_version_str, "ipl", 3))
+		strcpy(loader_version_str, "UNKNOWN");
+	printk(KERN_INFO "loader version string: %s\n", loader_version_str);
+
+	mv_common_serial_init();
+	mv_common_addrmap_init();
+	mv_common_eth_init();
+	mv_common_dma_init();
+	mv_common_i2c_init();
+	mv_common_spi_init();
+	mv_common_wdt_init();
+	mv_tdm_init(&fbxo1a_tdm_data);
+
+	/* rework partition flags depending on kernel config */
+#ifdef CONFIG_FBXO1_A_FBXMTD_ALL_RW
+	printk(KERN_INFO "forcing R/W on all mtd partitions.\n");
+	{
+		int i;
+
+		for (i = 0; i < ARRAY_SIZE(fbxo1a_mtd_parts); ++i) {
+			fbxo1a_mtd_parts[i].flags |= FBXMTD_PART_RW;
+		}
+	}
+#endif
+
+#ifdef CONFIG_FBXO1_A_FBXMTD_NO_CRC
+	printk(KERN_INFO "disabling CRC check on partitions:");
+	{
+		int i;
+
+		for (i = 0; i < ARRAY_SIZE(fbxo1a_mtd_parts); ++i) {
+			if (fbxo1a_mtd_parts[i].flags & FBXMTD_PART_HAS_FS) {
+				fbxo1a_mtd_parts[i].flags |= FBXMTD_PART_NOCRC;
+				printk(" %s", fbxo1a_mtd_parts[i].name);
+			}
+		}
+	}
+	printk("\n");
+#endif
+
+#ifndef CONFIG_FBXO1_A_FBXMTD_READ_BANK1_TAG
+	printk(KERN_INFO "not reading image tag on partition %s\n",
+	       fbxo1a_mtd_parts[PART_BANK1].name);
+	fbxo1a_mtd_parts[PART_BANK1].flags |= FBXMTD_PART_IGNORE_TAG;
+#endif
+
+	/* register mtd & serialinfo device */
+	platform_device_register(&fbxo1a_mtd_device);
+
+	/* register gpio device with the correct gpio operations. */
+	for (i = 0; fbxo1a_gpio_pins[i].pin_name; ++i) {
+		fbxo1a_gpio_pins[i].ops = &mv88f5181_gpio_ops;
+	}
+	platform_device_register(&mv88f5181_gpio_device);
+
+	fbxspi_register_device(&pt6959_device);
+
+	panic_timeout = 10;
+	panic_on_oops = 1;
+}
+
+struct sys_timer fbxo1a_timer =
+{
+	.init	= mv_common_timer_init,
+	.offset	= mv_gettimeoffset,
+};
+
+MACHINE_START(FBXO1_A_LEGACY, "Optical Freebox version 1, part A, Legacy")
+	/* Maintainer: Freebox SA */
+	.phys_io	= MV_INTER_REGS_BASE_PA,
+	.io_pg_offst	= (MV_INTER_REGS_BASE_PA >> 18) & 0xFFFC,
+	.boot_params	= 0x00000100,
+	.map_io		= fbxo1a_map_io,
+	.init_irq	= fbxo1a_init_irq,
+	.init_machine	= fbxo1a_init,
+	.timer		= &fbxo1a_timer,
+MACHINE_END
+
+MACHINE_START(FBXO1_A, "Optical Freebox version 1, part A")
+	/* Maintainer: Freebox SA */
+	.phys_io	= MV_INTER_REGS_BASE_PA,
+	.io_pg_offst	= (MV_INTER_REGS_BASE_PA >> 18) & 0xFFFC,
+	.boot_params	= 0x00000100,
+	.map_io		= fbxo1a_map_io,
+	.init_irq	= fbxo1a_init_irq,
+	.init_machine	= fbxo1a_init,
+	.timer		= &fbxo1a_timer,
+MACHINE_END
diff -Nruw linux-2.6.20.14-fbx/arch/arm/mach-mv88fxx81./common.h linux-2.6.20.14-fbx/arch/arm/mach-mv88fxx81/common.h
--- linux-2.6.20.14-fbx/arch/arm/mach-mv88fxx81./common.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.20.14-fbx/arch/arm/mach-mv88fxx81/common.h	2010-12-29 19:30:05.221437474 +0100
@@ -0,0 +1,29 @@
+/*
+ * common.h for linux
+ * Created by <nschichan@corp.free.fr> on Thu Sep 21 14:42:03 2006
+ * Freebox SA
+ */
+
+#ifndef __COMMON_H__
+# define __COMMON_H__
+
+extern void mv_common_addrmap_pci_init(void);
+extern void mv_common_addrmap_init(void);
+extern void mv_common_init_irq(void);
+extern void mv_common_serial_init(void);
+extern void mv_common_map_io(void);
+extern void mv_common_timer_init(void);
+extern unsigned long mv_gettimeoffset(void);
+extern void mv_common_dma_init(void);
+extern void mv_common_eth_init(void);
+extern void mv_common_i2c_init(void);
+extern void mv_common_spi_init(void);
+extern void mv_common_wdt_init(void);
+
+extern int mv_pci_local_read_config(unsigned int func, unsigned int reg,
+				    int size, u32 *value);
+
+extern int mv_pci_local_write_config(unsigned int func, unsigned int reg,
+				     int size, u32 value);
+
+#endif /* !__COMMON_H__ */
diff -Nruw linux-2.6.20.14-fbx/arch/arm/mach-mv88fxx81./dma.c linux-2.6.20.14-fbx/arch/arm/mach-mv88fxx81/dma.c
--- linux-2.6.20.14-fbx/arch/arm/mach-mv88fxx81./dma.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.20.14-fbx/arch/arm/mach-mv88fxx81/dma.c	2010-12-29 19:30:05.221437474 +0100
@@ -0,0 +1,46 @@
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+
+#include <asm/arch/irqs.h>
+#include <asm/arch/devices.h>
+#include <asm/mach-types.h>
+
+#include "common.h"
+
+static struct resource dma_resources[] = {
+	{
+		.start		= MV88FXX81_IDMA_BASE_PA,
+		.end		= MV88FXX81_IDMA_END_PA,
+		.flags		= IORESOURCE_MEM,
+	},
+	{
+		.start		= IRQ_IDMA_0,
+		.flags		= IORESOURCE_IRQ,
+	},
+	{
+		.start		= IRQ_IDMA_1,
+		.flags		= IORESOURCE_IRQ,
+	},
+	{
+		.start		= IRQ_IDMA_2,
+		.flags		= IORESOURCE_IRQ,
+	},
+	{
+		.start		= IRQ_IDMA_3,
+		.flags		= IORESOURCE_IRQ,
+	},
+};
+
+static struct platform_device mv88fxx81_dma_device = {
+	.name           = "mv88fxx81_dma",
+	.id             = -1,
+	.num_resources	= ARRAY_SIZE(dma_resources),
+	.resource	= dma_resources,
+};
+
+void mv_common_dma_init(void)
+{
+	(void)platform_device_register(&mv88fxx81_dma_device);
+}
diff -Nruw linux-2.6.20.14-fbx/arch/arm/mach-mv88fxx81./eth.c linux-2.6.20.14-fbx/arch/arm/mach-mv88fxx81/eth.c
--- linux-2.6.20.14-fbx/arch/arm/mach-mv88fxx81./eth.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.20.14-fbx/arch/arm/mach-mv88fxx81/eth.c	2010-12-29 19:30:05.221437474 +0100
@@ -0,0 +1,91 @@
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+
+#include <asm/arch/irqs.h>
+#include <asm/arch/devices.h>
+#include <asm/mach-types.h>
+
+#include <linux/fbxserialinfo.h>
+
+#include "common.h"
+
+#include "mv6131-switch.h"
+
+static struct mv88fxx81_eth_platform_data pd = {
+
+#ifdef CONFIG_BOARD_MV_DP_5181L_BP_DDR2
+	.default_param = {
+		.port = PORT_MII,
+		.autoneg = AUTONEG_ENABLE,
+		.speed = SPEED_100,
+		.phy_address = 8,
+		.advertising = ADVERTISED_10baseT_Half |
+		ADVERTISED_10baseT_Full |ADVERTISED_100baseT_Half |
+		ADVERTISED_100baseT_Full
+	},
+	.ignore_phy = 0,
+	.disable_gmii = 1,
+#endif
+
+#if defined(CONFIG_BOARD_FBXO1_A) || defined(CONFIG_BOARD_MV_RD_5181L_VOIP_RD2)
+	.default_param = {
+		.port = PORT_MII,
+		.autoneg = AUTONEG_DISABLE,
+		.speed = SPEED_1000,
+		.duplex = DUPLEX_FULL,
+	},
+	.ignore_phy = 1,
+	.disable_gmii = 0,
+#endif
+#ifdef CONFIG_MV6131_SWITCH
+	.mii_init = mv6131_switch_init,
+#endif
+
+#ifdef CONFIG_BOARD_FBX_NODE
+	.default_param = {
+		.port		= PORT_MII,
+		.autoneg	= AUTONEG_DISABLE,
+		.speed		= SPEED_100,
+		.duplex		= DUPLEX_FULL,
+	},
+	.ignore_phy	= 1,
+	.disable_gmii	= 0,
+#endif
+};
+
+static struct resource eth_resources[] = {
+	{
+		.start		= MV88FXX81_ETH_BASE_PA,
+		.end		= MV88FXX81_ETH_END_PA,
+		.flags		= IORESOURCE_MEM,
+	},
+	{
+		.start		= IRQ_GB_SUM,
+		.flags		= IORESOURCE_IRQ,
+	},
+	{
+		.start		= IRQ_GB_ERR,
+		.flags		= IORESOURCE_IRQ,
+	},
+};
+
+static struct platform_device mv88fxx81_eth_device = {
+	.name           = "mv88fxx81_eth",
+	.id             = -1,
+	.num_resources	= ARRAY_SIZE(eth_resources),
+	.resource	= eth_resources,
+	.dev		= {
+		.platform_data = &pd,
+	}
+};
+
+void mv_common_eth_init(void)
+{
+	fbxserialinfo_get_mac_addr(pd.mac_addr);
+	printk(KERN_INFO "using mac %02x:%02x:%02x:%02x:%02x:%02x\n",
+	       pd.mac_addr[0], pd.mac_addr[1], pd.mac_addr[2],
+	       pd.mac_addr[3], pd.mac_addr[4], pd.mac_addr[5]);
+	(void)platform_device_register(&mv88fxx81_eth_device);
+}
diff -Nruw linux-2.6.20.14-fbx/arch/arm/mach-mv88fxx81./gpio.c linux-2.6.20.14-fbx/arch/arm/mach-mv88fxx81/gpio.c
--- linux-2.6.20.14-fbx/arch/arm/mach-mv88fxx81./gpio.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.20.14-fbx/arch/arm/mach-mv88fxx81/gpio.c	2010-12-29 19:30:05.221437474 +0100
@@ -0,0 +1,121 @@
+/*
+ * gpio.c for linux-mv
+ * Created by <nschichan@corp.free.fr> on Fri Sep 22 12:51:21 2006
+ * Freebox SA
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/errno.h>
+#include <linux/platform_device.h>
+
+#include <asm/arch/regs.h>
+#include <asm/arch/io.h>
+#include <asm/arch/gpio.h>
+
+#include <fbxgpio.h>
+
+static spinlock_t gpio_lock;
+
+static int check_gpio(int gpio)
+{
+	if (gpio >= 32 || gpio < 0) {
+		printk("gpio: invalid gpio %i\n", gpio);
+		dump_stack();
+		return -1;
+	}
+	return 0;
+}
+
+/*
+ * NOTE: GPIO_INPUT_EN is described in the manual as GPIO_OUTPUT_EN
+ * but active low.
+ */
+void
+mv_set_gpio_direction(int gpio, int is_input)
+{
+	unsigned int reg;
+	unsigned long flags;
+
+	if (check_gpio(gpio) < 0)
+		return ;
+
+	spin_lock_irqsave(&gpio_lock, flags);
+
+	reg = mv_readl(MV_GPIO_INPUT_EN_REG);
+
+	if (is_input)
+		reg |= (1 << gpio);
+	else
+		reg &= ~(1 << gpio);
+
+	mv_writel(reg, MV_GPIO_INPUT_EN_REG);
+
+	spin_unlock_irqrestore(&gpio_lock, flags);
+}
+
+int
+mv_get_gpio_direction(int gpio)
+{
+	if (check_gpio(gpio) < 0)
+		return -1;
+	return !!(mv_readl(MV_GPIO_INPUT_EN_REG) & (1 << gpio));
+}
+
+void
+mv_set_gpio_dataout(int gpio, int val)
+{
+	unsigned int reg;
+	unsigned long flags;
+
+	if (check_gpio(gpio) < 0)
+		return ;
+
+	spin_lock_irqsave(&gpio_lock, flags);
+
+	reg = mv_readl(MV_GPIO_DATAOUT_REG);
+	if (val)
+		reg |= (1 << gpio);
+	else
+		reg &= ~(1 << gpio);
+	mv_writel(reg, MV_GPIO_DATAOUT_REG);
+
+	spin_unlock_irqrestore(&gpio_lock, flags);
+}
+
+int
+mv_get_gpio_dataout(int gpio)
+{
+	if (check_gpio(gpio) < 0)
+		return -1;
+	return (mv_readl(MV_GPIO_DATAOUT_REG) >> gpio) & 0x1;
+}
+
+int
+mv_get_gpio_datain(int gpio)
+{
+	unsigned int reg;
+
+	if (check_gpio(gpio) < 0)
+		return -1;
+
+	reg = mv_readl(MV_GPIO_DATAIN_REG);
+
+	return (reg & (1 << gpio)) != 0;
+}
+
+void __init
+mv_gpio_init(const struct mv_gpio_data *data)
+{
+	spin_lock_init(&gpio_lock);
+	mv_writel(data->dataout, MV_GPIO_DATAOUT_REG);
+	mv_writel(data->direction, MV_GPIO_INPUT_EN_REG);
+	mv_writel(data->polarity, MV_GPIO_DATAIN_POLARITY_REG);
+}
+
+EXPORT_SYMBOL(mv_set_gpio_dataout);
+EXPORT_SYMBOL(mv_get_gpio_datain);
+EXPORT_SYMBOL(mv_set_gpio_direction);
+EXPORT_SYMBOL(mv_get_gpio_direction);
diff -Nruw linux-2.6.20.14-fbx/arch/arm/mach-mv88fxx81./i2c.c linux-2.6.20.14-fbx/arch/arm/mach-mv88fxx81/i2c.c
--- linux-2.6.20.14-fbx/arch/arm/mach-mv88fxx81./i2c.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.20.14-fbx/arch/arm/mach-mv88fxx81/i2c.c	2010-12-29 19:30:05.221437474 +0100
@@ -0,0 +1,69 @@
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/mv643xx.h>
+
+#include <asm/arch/irqs.h>
+#include <asm/arch/devices.h>
+#include <asm/arch/timer.h>
+#include "common.h"
+
+static struct mv64xxx_i2c_pdata mv88fxx81_i2c_pdata = {
+	/*
+	 * f = tclk / (10 * (freq_m + 1) * 2 ^ (freq_n + 1))
+	 *
+	 * Set n to 3, and play with m to stay below 100 kHz WRT tclk
+	 */
+	.freq_m			= 15, /* changed by code below */
+	.freq_n			= 3,
+	.timeout		= 1000,
+	.retries		= 1,
+};
+
+static struct resource mv88fxx81_i2c_resources[] = {
+	[0] = {
+		.start	= MV88FXX81_TWSI_BASE_PA,
+		.end	= MV88FXX81_TWSI_END_PA,
+		.flags	= IORESOURCE_MEM,
+	},
+	[1] = {
+		.start	= IRQ_TWSI,
+		.end	= IRQ_TWSI,
+		.flags	= IORESOURCE_IRQ,
+	},
+};
+
+static struct platform_device mv88fxx81_i2c_device = {
+	.name		= "mv64xxx_i2c",
+	.id		= 0,
+	.num_resources	= ARRAY_SIZE(mv88fxx81_i2c_resources),
+	.resource	= mv88fxx81_i2c_resources,
+	.dev = {
+		.platform_data = &mv88fxx81_i2c_pdata,
+	},
+};
+
+void __init mv_common_i2c_init(void)
+{
+	unsigned int m;
+
+	/*
+	 * m = (tclk / (10 * f * 2 ^ (freq_n + 1)) - 1
+	 *
+	 * we want f <= 100 kHz, and freq_n = 3
+	 *
+	 * m = (tclk / (10 * 100 * 1000 * 16) - 1
+	 *
+	 * the truncation will give  a higher frequency than expected,
+	 * add one more to make sure we stay below 100 kHz
+	 */
+	m = ((tclk_get_rate()) / 16000000) - 1 + 1;
+	if (m > 15) {
+		printk(KERN_ERR "i2c: unable to compute 'm' for current "
+		       "tclk rate\n");
+		return;
+	}
+	mv88fxx81_i2c_pdata.freq_m = m;
+	(void)platform_device_register(&mv88fxx81_i2c_device);
+}
diff -Nruw linux-2.6.20.14-fbx/arch/arm/mach-mv88fxx81./io.c linux-2.6.20.14-fbx/arch/arm/mach-mv88fxx81/io.c
--- linux-2.6.20.14-fbx/arch/arm/mach-mv88fxx81./io.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.20.14-fbx/arch/arm/mach-mv88fxx81/io.c	2010-12-29 19:30:05.221437474 +0100
@@ -0,0 +1,43 @@
+/*
+ * io.c for linux
+ * Created by <nschichan@corp.free.fr> on Thu Sep 21 14:45:22 2006
+ * Freebox SA
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+
+#include <asm/arch/io.h>
+#include <asm/arch/devices.h>
+#include <asm/mach/map.h>
+#include <asm/page.h>
+#include "common.h"
+
+struct map_desc mv_common_io_desc[] __initdata =
+{
+	{
+		.virtual	= (unsigned long)MV_REGS_VA(MV_INTER_REGS_BASE_PA),
+		.pfn		= __phys_to_pfn(MV_INTER_REGS_BASE_PA),
+		.length		= SZ_1M,
+		.type		= MT_DEVICE
+	},
+
+#ifdef CONFIG_PCI
+	{
+		/* install 1:1 mapping for pci io, since drivers won't
+		 * necessarily ioremap the part they need */
+		.virtual	= (unsigned long)PCI1_IO_BASE,
+		.pfn		= __phys_to_pfn(PCI1_IO_BASE),
+		.length		= PCI1_IO_SIZE,
+		.type		= MT_DEVICE
+	},
+#endif
+
+};
+
+
+void __init
+mv_common_map_io(void)
+{
+	iotable_init(mv_common_io_desc, ARRAY_SIZE(mv_common_io_desc));
+}
diff -Nruw linux-2.6.20.14-fbx/arch/arm/mach-mv88fxx81./irq.c linux-2.6.20.14-fbx/arch/arm/mach-mv88fxx81/irq.c
--- linux-2.6.20.14-fbx/arch/arm/mach-mv88fxx81./irq.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.20.14-fbx/arch/arm/mach-mv88fxx81/irq.c	2010-12-29 19:30:05.221437474 +0100
@@ -0,0 +1,231 @@
+/*
+ * irq.c for linux
+ * Created by <nschichan@corp.free.fr> on Tue Sep 19 18:26:33 2006
+ * Freebox SA
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/interrupt.h>
+
+#include <asm/irq.h>
+#include <asm/arch/io.h>
+#include <asm/arch/regs.h>
+#include <asm/arch/irqs.h>
+#include <asm/mach/irq.h>
+#include "common.h"
+
+#define irq_to_gpio(Irq)	((Irq) - IRQ_GPIO_START)
+#define gpio_to_irq(Gpio)	((Gpio) + IRQ_GPIO_START)
+
+/*
+ * internal interrupt controller operations.
+ */
+static void mv_ipic_mask(unsigned int irq)
+{
+	u32 mask;
+
+	mask = mv_readl(MV_IRQ_MASK_REG);
+	mask &= ~(1 << irq);
+	mv_writel(mask, MV_IRQ_MASK_REG);
+}
+
+static void mv_ipic_unmask(unsigned int irq)
+{
+	u32 mask;
+
+	mask = mv_readl(MV_IRQ_MASK_REG);
+	mask |= (1 << irq);
+	mv_writel(mask, MV_IRQ_MASK_REG);
+}
+
+struct irq_chip mv88f5181_ipic = {
+	.name	= "mv88f5181_ipic",
+	.ack	= mv_ipic_mask,
+	.mask	= mv_ipic_mask,
+	.unmask	= mv_ipic_unmask,
+};
+
+/*
+ * external interrupt controller operations.
+ */
+static void mv_epic_mask(unsigned int irq)
+{
+	u32 mask;
+	unsigned int gpio;
+	u32 reg;
+
+	gpio = irq_to_gpio(irq);
+
+	if (irq_desc[irq].status & IRQ_LEVEL)
+		reg = MV_GPIO_IRQ_LEVEL_MASK_REG;
+	else
+		reg = MV_GPIO_IRQ_EDGE_MASK_REG;
+
+	mask = mv_readl(reg);
+	mask &= ~(1 << gpio);
+	mv_writel(mask, reg);
+}
+
+static void mv_epic_unmask(unsigned int irq)
+{
+	u32 mask;
+	u32 reg;
+	unsigned int gpio;
+
+	gpio = irq - IRQ_GPIO_START;
+
+	if (irq_desc[irq].status & IRQ_LEVEL)
+		reg = MV_GPIO_IRQ_LEVEL_MASK_REG;
+	else
+		reg = MV_GPIO_IRQ_EDGE_MASK_REG;
+
+	mask = mv_readl(reg);
+	mask |= (1 << gpio);
+	mv_writel(mask, reg);
+}
+
+static void mv_epic_ack(unsigned int irq)
+{
+	u32 val;
+	u32 reg;
+	unsigned int gpio;
+
+	gpio = irq_to_gpio(irq);
+
+	if (irq_desc[irq].status & IRQ_LEVEL)
+		reg = MV_GPIO_IRQ_LEVEL_MASK_REG;
+	else
+		reg = MV_GPIO_IRQ_CAUSE_REG;
+
+	val = mv_readl(reg);
+	val &= ~(1 << gpio);
+	mv_writel(val, reg);
+}
+
+/*
+ *	please note that IRQF_TRIGGER_BOTH is NOT supported right now
+ *	and cannot be implemented on this hardware without being racy.
+ */
+static int mv_epic_set_type(unsigned int irq, unsigned int flow_type)
+{
+	u32 polarity;
+	u32 gpio;
+	struct irq_desc *desc;
+
+	BUG_ON(irq < IRQ_GPIO_START && irq >= NR_IRQS);
+
+	flow_type &= IRQF_TRIGGER_MASK;
+	gpio = irq_to_gpio(irq);
+	polarity = mv_readl(MV_GPIO_DATAIN_POLARITY_REG);
+	desc = irq_desc + irq;
+
+	switch (flow_type) {
+
+	case IRQF_TRIGGER_RISING:
+		polarity &= ~(1 << gpio);
+		desc->status &= ~IRQ_LEVEL;
+		desc->handle_irq = handle_edge_irq;
+		break;
+
+	case IRQF_TRIGGER_FALLING:
+		polarity |= (1 << gpio);
+		desc->status &= ~IRQ_LEVEL;
+		desc->handle_irq = handle_edge_irq;
+		break;
+
+	case IRQF_TRIGGER_HIGH:
+		polarity &= ~(1 << gpio);
+		desc->status |= IRQ_LEVEL;
+		desc->handle_irq = handle_level_irq;
+		break;
+
+	case IRQF_TRIGGER_LOW:
+		polarity |= (1 << gpio);
+		desc->status |= IRQ_LEVEL;
+		desc->handle_irq = handle_level_irq;
+		break;
+
+	default:
+		printk("%x: bad flow_type for irq %i\n", flow_type, irq);
+		return -EINVAL;
+
+	}
+	mv_writel(polarity, MV_GPIO_DATAIN_POLARITY_REG);
+
+	desc->status &= ~IRQF_TRIGGER_MASK;
+	desc->status |= flow_type;
+
+	return 0;
+}
+
+struct irq_chip mv88f5181_epic = {
+	.name		= "mv88f5181_epic",
+	.ack		= mv_epic_ack,
+	.mask		= mv_epic_mask,
+	.unmask		= mv_epic_unmask,
+	.set_type	= mv_epic_set_type,
+};
+
+/*
+ *	chained handler for gpio irqs
+ */
+static void mv_handle_gpio_irq(unsigned int irq, struct irq_desc *dev)
+{
+	unsigned int gpio_start;
+	u32 cause;
+	int i;
+
+	BUG_ON(irq < IRQ_GPIO_0_7 && irq > IRQ_GPIO_24_31);
+	gpio_start = (irq - IRQ_GPIO_0_7) * 8;
+
+	cause = (mv_readl(MV_GPIO_DATAIN_REG) &
+		 mv_readl(MV_GPIO_IRQ_LEVEL_MASK_REG)) |
+		(mv_readl(MV_GPIO_IRQ_CAUSE_REG) &
+		 mv_readl(MV_GPIO_IRQ_EDGE_MASK_REG));
+
+	for (i = gpio_start; i < gpio_start + 8; ++i) {
+		if (cause & (1 << i)) {
+			int irqnr;
+			struct irq_desc *desc;
+
+			irqnr = gpio_to_irq(i);
+			desc = irq_desc + irqnr;
+
+			desc_handle_irq(irqnr, desc);
+		}
+	}
+}
+
+void __init
+mv_common_init_irq(void)
+{
+	unsigned int i;
+
+	/*
+	 * internal interrupts.
+	 */
+	mv_writel(0x0, MV_IRQ_MASK_REG);
+	for (i = 0; i < IRQ_GPIO_START; ++i) {
+		set_irq_chip(i, &mv88f5181_ipic);
+		set_irq_handler(i, handle_level_irq);
+		set_irq_flags(i, IRQF_VALID);
+	}
+
+	/*
+	 * external interrupts
+	 */
+	mv_writel(0x0, MV_GPIO_IRQ_EDGE_MASK_REG);
+	mv_writel(0x0, MV_GPIO_IRQ_LEVEL_MASK_REG);
+	for (i = IRQ_GPIO_START; i < NR_IRQS; ++i) {
+		set_irq_chip(i, &mv88f5181_epic);
+		set_irq_handler(i, handle_level_irq);
+		set_irq_flags(i, IRQF_VALID);
+		irq_desc[i].status |= IRQ_LEVEL;
+	}
+	set_irq_chained_handler(IRQ_GPIO_0_7, mv_handle_gpio_irq);
+	set_irq_chained_handler(IRQ_GPIO_8_15, mv_handle_gpio_irq);
+	set_irq_chained_handler(IRQ_GPIO_16_23, mv_handle_gpio_irq);
+	set_irq_chained_handler(IRQ_GPIO_24_31, mv_handle_gpio_irq);
+}
diff -Nruw linux-2.6.20.14-fbx/arch/arm/mach-mv88fxx81./Kconfig linux-2.6.20.14-fbx/arch/arm/mach-mv88fxx81/Kconfig
--- linux-2.6.20.14-fbx/arch/arm/mach-mv88fxx81./Kconfig	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.20.14-fbx/arch/arm/mach-mv88fxx81/Kconfig	2010-12-29 19:30:05.221437474 +0100
@@ -0,0 +1,89 @@
+# Boards based on marvell 88fxx81
+
+comment "Marvell Board Types"
+
+config BOARD_ORION
+	bool "Marvell 88fxx81 target Board (VOIP GE or Backpane)"
+	depends on ARCH_MV88FXX81
+
+choice
+	prompt "Marvell 88fxx81 target Board flavor"
+	depends on BOARD_ORION
+
+config BOARD_MV_DP_5181L_BP_DDR2
+	bool "Marvell dev board backplane DDR2"
+	depends on BOARD_ORION
+
+config BOARD_MV_RD_5181L_VOIP_RD2
+	bool "Marvell voip ref design"
+	depends on BOARD_ORION
+
+endchoice
+
+
+config BOARD_FBXO1_A
+	bool "Optical Freebox version 1, part A"
+	depends on ARCH_MV88FXX81
+	select BUILTIN_FBXSERIAL
+
+config BOARD_FBX_NODE
+	bool "Freebox Node card."
+	depends on ARCH_MV88FXX81
+	select BUILTIN_FBXSERIAL
+	select SWITCHES_DRIVERS
+
+config MV6131_SWITCH
+	bool "Reset and perform basic init of the mv6131 switch."
+	default y
+	depends on BOARD_FBXO1_A
+
+menu "FBXO1 A Flash configuration"
+
+config FBXO1_A_FLASH_BUS_WIDTH
+	int "Flash bus width (2 * 8 bits)"
+	default 2
+	depends on BOARD_FBXO1_A
+
+config FBXO1_A_FBXMTD_ALL_RW
+	bool "Make all FBXMTD partitions R/W"
+	default n
+	depends on BOARD_FBXO1_A
+
+config FBXO1_A_FBXMTD_NO_CRC
+	bool "Do not crc-check image tags"
+	default n
+	depends on BOARD_FBXO1_A
+
+config FBXO1_A_FBXMTD_READ_BANK1_TAG
+	bool "Read BANK1 image tag."
+	default y
+	depends on BOARD_FBXO1_A
+
+choice
+	prompt "Map flavor"
+	depends on BOARD_FBXO1_A
+	default FBXO1_A_FBXMTD_MAP_IPL
+
+config FBXO1_A_FBXMTD_MAP_UBOOT
+	bool "Use U-boot"
+
+config FBXOA_1_FBXMTD_MAP_IPL
+	bool "Use IPL"
+
+endchoice
+
+endmenu
+
+menu "FBX Node Flash configuration"
+
+config FBX_NODE_FLASH_BUS_WIDTH
+	int "Flash bus width (2 * 8 bits)"
+	default 2
+	depends on BOARD_FBX_NODE
+
+config FBX_NODE_FBXMTD_ALL_RW
+	bool "Make all FBXMTD partitions R/W"
+	default n
+	depends on BOARD_FBX_NODE
+
+endmenu
diff -Nruw linux-2.6.20.14-fbx/arch/arm/mach-mv88fxx81./Makefile linux-2.6.20.14-fbx/arch/arm/mach-mv88fxx81/Makefile
--- linux-2.6.20.14-fbx/arch/arm/mach-mv88fxx81./Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.20.14-fbx/arch/arm/mach-mv88fxx81/Makefile	2010-12-29 19:30:05.221437474 +0100
@@ -0,0 +1,13 @@
+
+
+obj-y += dma.o eth.o irq.o timer.o serial.o io.o gpio.o	\
+	addrmap.o i2c.o tdm.o mux.o spi.o watchdog.o phone.o
+
+obj-$(CONFIG_BOARD_ORION) += board-mv-orion.o
+obj-$(CONFIG_BOARD_FBXO1_A) += board-fbxo1_a.o
+obj-$(CONFIG_MV6131_SWITCH) += mv6131-switch.o
+obj-$(CONFIG_BOARD_FBX_NODE) += board-fbx_node.o sfpga.o
+
+obj-$(CONFIG_PCI) += pci.o
+
+EXTRA_CFLAGS += -Werror
\ No newline at end of file
diff -Nruw linux-2.6.20.14-fbx/arch/arm/mach-mv88fxx81./Makefile.boot linux-2.6.20.14-fbx/arch/arm/mach-mv88fxx81/Makefile.boot
--- linux-2.6.20.14-fbx/arch/arm/mach-mv88fxx81./Makefile.boot	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.20.14-fbx/arch/arm/mach-mv88fxx81/Makefile.boot	2010-12-29 19:30:05.221437474 +0100
@@ -0,0 +1,3 @@
+   zreladdr-y	:= 0x00008000
+params_phys-y	:= 0x00000100
+initrd_phys-y	:= 0x00800000
diff -Nruw linux-2.6.20.14-fbx/arch/arm/mach-mv88fxx81./mux.c linux-2.6.20.14-fbx/arch/arm/mach-mv88fxx81/mux.c
--- linux-2.6.20.14-fbx/arch/arm/mach-mv88fxx81./mux.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.20.14-fbx/arch/arm/mach-mv88fxx81/mux.c	2010-12-29 19:30:05.221437474 +0100
@@ -0,0 +1,21 @@
+/*
+ * mux.c for linux-freebox
+ * Created by <nschichan@freebox.fr> on Fri Dec 29 22:01:31 2006
+ * Freebox SA
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+
+#include <asm/arch/io.h>
+#include <asm/arch/regs.h>
+#include <asm/arch/mux.h>
+
+void __init
+mv_mux_init(const struct mv_mux_data *data)
+{
+	mv_writel(data->mpp_0_7, MV_MPP_CTL_0_REG);
+	mv_writel(data->mpp_8_15, MV_MPP_CTL_1_REG);
+	mv_writel(data->mpp_16_19, MV_MPP_CTL_2_REG);
+	mv_writel(data->dev_mux, MV_DEV_MUX_REG);
+}
diff -Nruw linux-2.6.20.14-fbx/arch/arm/mach-mv88fxx81./mv6131-switch.c linux-2.6.20.14-fbx/arch/arm/mach-mv88fxx81/mv6131-switch.c
--- linux-2.6.20.14-fbx/arch/arm/mach-mv88fxx81./mv6131-switch.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.20.14-fbx/arch/arm/mach-mv88fxx81/mv6131-switch.c	2010-12-29 19:30:05.221437474 +0100
@@ -0,0 +1,128 @@
+/*
+ * mv6131-switch.c for linux-freebox
+ * Created by <nschichan@freebox.fr> on Wed Jan 17 17:15:57 2007
+ * Freebox SA
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+
+#include <asm/arch/gpio.h>
+#include <asm/arch/devices.h>
+#include "mv6131-switch.h"
+
+#define PFX	"mv88e6131: "
+
+int __init
+mv6131_switch_reset(void)
+{
+	printk(PFX "holding switch reset.\n");
+
+	mv_set_gpio_direction(SWITCH_RESET_GPIO, 0);
+	mv_set_gpio_dataout(SWITCH_RESET_GPIO, 0);
+	mdelay(10);
+	mv_set_gpio_dataout(SWITCH_RESET_GPIO, 1);
+	mdelay(10);
+
+	return 0;
+}
+
+/*
+ * return 0 if we are able to read the expected switch ID.
+ */
+#define PROBE_ADDR	0x10
+int __init
+mv6131_switch_probe(struct switch_ops *ops)
+{
+	int val;
+
+	val = ops->mdio_read(ops->dev, PROBE_ADDR, SWITCH_ID_REG);
+	if ((val & 0xfff0) != 0x1060) {
+		printk(PFX "Unable to probe switch at address %x.\n",
+		       PROBE_ADDR);
+		return -ENODEV;
+	}
+	printk(PFX "Found Marvell 88E6131 rev 0x%x at address %02x\n",
+	       val & 0xf, PROBE_ADDR);
+	return 0;
+}
+
+/*
+ * PPU operations.
+ */
+static void __init
+mv6131_ppu_enable(struct switch_ops *ops)
+{
+	int val;
+
+	val = ops->mdio_read(ops->dev, GLOBAL_REGS_ADDR, SWITCH_CTL_REG);
+	val |= 0x4000;
+	ops->mdio_write(ops->dev, GLOBAL_REGS_ADDR, SWITCH_CTL_REG, val);
+}
+
+static void __init
+mv6131_ppu_disable(struct switch_ops *ops)
+{
+	int val;
+
+	val = ops->mdio_read(ops->dev, GLOBAL_REGS_ADDR, SWITCH_CTL_REG);
+	val &= ~0x4000;
+	ops->mdio_write(ops->dev, GLOBAL_REGS_ADDR, SWITCH_CTL_REG, val);
+}
+
+/*
+ * enable fowarding + learning on requested port.
+ */
+static void __init
+mv6131_port_enable(struct switch_ops *ops, int port)
+{
+	int val;
+
+	val = ops->mdio_read(ops->dev, 0x10 + port, PORT_CTL_REG);
+	val |= 0x3;
+	ops->mdio_write(ops->dev, 0x10 + port, PORT_CTL_REG, val);
+}
+
+static void __init
+mv6131_port_setup_leds(struct switch_ops *ops, int port)
+{
+	ops->mdio_write(ops->dev, port, COPPER_PAGE_SELECT_REG, 3);
+	ops->mdio_write(ops->dev, port, COPPER_LED_CTL_REG, COPPER_LED_CTL_VAL);
+}
+
+/*
+ * this should really be in a separated module inside drivers/, but as
+ * we need a working ethernet driver before entering here, and we
+ * can't garantee that we will enter here after ethernet driver
+ * initialization, we call this from the ethernet driver
+ * initialization, using the field switch_init in the ethernet
+ * platform data.
+ */
+int __init
+mv6131_switch_init(struct switch_ops *ops)
+{
+	int error;
+	printk(PFX "Marvell 88E6131 switch driver.\n");
+
+	error = mv6131_switch_reset();
+	if (error)
+		return error;
+
+	error = mv6131_switch_probe(ops);
+	if (error)
+		return error;
+
+	mv6131_ppu_disable(ops);
+	ops->mdio_write(ops->dev, 0x2, COPPER_CTL_REG, COPPER_PHY_POWERUP);
+
+	mv6131_port_setup_leds(ops, 0x2);
+
+	mv6131_ppu_enable(ops);
+
+	mv6131_port_enable(ops, 2);
+	mv6131_port_enable(ops, 3); /* CPU port */
+
+	return 0;
+}
diff -Nruw linux-2.6.20.14-fbx/arch/arm/mach-mv88fxx81./mv6131-switch.h linux-2.6.20.14-fbx/arch/arm/mach-mv88fxx81/mv6131-switch.h
--- linux-2.6.20.14-fbx/arch/arm/mach-mv88fxx81./mv6131-switch.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.20.14-fbx/arch/arm/mach-mv88fxx81/mv6131-switch.h	2010-12-29 19:30:05.221437474 +0100
@@ -0,0 +1,46 @@
+/*
+ * mv6131-switch.h for linux-freebox
+ * Created by <nschichan@freebox.fr> on Wed Jan 17 17:16:47 2007
+ * Freebox SA
+ */
+
+#ifndef MV6131_SWITCH_H
+# define MV6131_SWITCH_H
+
+struct switch_ops;
+
+int mv6131_switch_init(struct switch_ops *data);
+
+#define SWITCH_RESET_GPIO	6
+
+
+#define GLOBAL_REGS_ADDR	0x1b
+#define SWITCH_CTL_REG		0x4
+
+/*
+ * Port registers.
+ */
+#define PORT_STATUS_REG		0x0
+#define SWITCH_ID_REG		0x3
+#define PORT_CTL_REG		0x4
+
+/*
+ * Copper PHY integrated PHY registers
+ */
+#define COPPER_CTL_REG		0x0
+#define COPPER_LED_CTL_REG	0x10
+#define COPPER_PAGE_SELECT_REG	0x16
+
+/*
+ * COPPER PHY Control register fields
+ */
+#define COPPER_PHY_POWERUP	0x9140
+
+/*
+ * Led control register init value: first led, on when port is up,
+ * blink when port activity (value 1). other leds: disabled (value 8)
+ */
+#define COPPER_LED_CTL_VAL	0x8881
+
+
+#endif /* !MV6131_SWITCH_H */
diff -Nruw linux-2.6.20.14-fbx/arch/arm/mach-mv88fxx81./pci.c linux-2.6.20.14-fbx/arch/arm/mach-mv88fxx81/pci.c
--- linux-2.6.20.14-fbx/arch/arm/mach-mv88fxx81./pci.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.20.14-fbx/arch/arm/mach-mv88fxx81/pci.c	2010-12-29 19:30:05.221437474 +0100
@@ -0,0 +1,405 @@
+
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+
+#include <asm/mach/pci.h>
+#include <asm/arch/regs.h>
+#include <asm/arch/irqs.h>
+#include <asm/mach-types.h>
+
+#include "common.h"
+
+#define LOCAL_BUS	0
+#define LOCAL_DEV	0
+
+/*
+ * low level func called to make a configuration read access
+ */
+static int mv88fxx81_pci_read_config(unsigned int busn, unsigned int slot,
+				     unsigned int func, unsigned int reg,
+				     int size, u32 *value)
+{
+	u32 addr;
+	unsigned long flags;
+
+	/* sanity check */
+	if (busn > (MV_PCI_CFG_ADDR_BUS_MASK >> MV_PCI_CFG_ADDR_BUS_SHIFT))
+		return PCIBIOS_DEVICE_NOT_FOUND;
+
+	if (slot > (MV_PCI_CFG_ADDR_DEV_MASK >> MV_PCI_CFG_ADDR_DEV_SHIFT))
+		return PCIBIOS_DEVICE_NOT_FOUND;
+
+	if (func > (MV_PCI_CFG_ADDR_FUNC_MASK >> MV_PCI_CFG_ADDR_FUNC_SHIFT))
+		return PCIBIOS_DEVICE_NOT_FOUND;
+
+	addr = (busn << MV_PCI_CFG_ADDR_BUS_SHIFT);
+	addr |= (slot << MV_PCI_CFG_ADDR_DEV_SHIFT);
+	addr |= (func << MV_PCI_CFG_ADDR_FUNC_SHIFT);
+	addr |= ((reg >> 2) << MV_PCI_CFG_ADDR_REG_SHIFT);
+	addr |= MV_PCI_CFG_ADDR_CFG_ENABLED;
+
+	mv_writel(addr, MV_PCI_CFG_ADDR_REG);
+	if (mv_readl(MV_PCI_CFG_ADDR_REG) != addr)
+		return PCIBIOS_DEVICE_NOT_FOUND;
+
+	/* clear pci MMabort that may occur during config access */
+	local_irq_save(flags);
+	*value = mv_readl(MV_PCI_CFG_DATA_REG);
+	mv_writel(~MV_PCI_SERR_MMABORT_ENABLED, MV_PCI_INTR_CAUSE_REG);
+	local_irq_restore(flags);
+
+	switch (size) {
+	case 1:
+		*value = (*value >> (8 * (reg & 0x3))) & 0xff;
+		break;
+
+	case 2:
+		*value = (*value >> (8 * (reg & 0x2))) & 0xffff;
+		break;
+	}
+
+	return PCIBIOS_SUCCESSFUL;
+}
+
+/*
+ * low level func called to make a configuration write access
+ */
+static int mv88fxx81_pci_write_config(unsigned int busn, unsigned int slot,
+				      unsigned int func, unsigned int reg,
+				      int size, u32 value)
+{
+	u32 addr;
+	unsigned long flags;
+
+	/* sanity check */
+	if (busn > (MV_PCI_CFG_ADDR_BUS_MASK >> MV_PCI_CFG_ADDR_BUS_SHIFT))
+		return PCIBIOS_DEVICE_NOT_FOUND;
+
+	if (slot > (MV_PCI_CFG_ADDR_DEV_MASK >> MV_PCI_CFG_ADDR_DEV_SHIFT))
+		return PCIBIOS_DEVICE_NOT_FOUND;
+
+	if (func > (MV_PCI_CFG_ADDR_FUNC_MASK >> MV_PCI_CFG_ADDR_FUNC_SHIFT))
+		return PCIBIOS_DEVICE_NOT_FOUND;
+
+	if (size != 4) {
+		u32 tmp;
+		int ret;
+
+		/* need to read first */
+		ret = mv88fxx81_pci_read_config(busn, slot, func,
+						reg, 4, &tmp);
+		if (ret != PCIBIOS_SUCCESSFUL)
+			return ret;
+
+		switch (size) {
+		case 1:
+			value = (tmp & ~(0xff << ((reg & 3) << 3))) |
+				(value << ((reg & 3) << 3));
+			break;
+
+		case 2:
+			value = (tmp & ~(0xffff << ((reg & 3) << 3))) |
+				(value << ((reg & 3) << 3));
+			break;
+
+		}
+	}
+
+	addr = (busn << MV_PCI_CFG_ADDR_BUS_SHIFT);
+	addr |= (slot << MV_PCI_CFG_ADDR_DEV_SHIFT);
+	addr |= (func << MV_PCI_CFG_ADDR_FUNC_SHIFT);
+	addr |= ((reg >> 2) << MV_PCI_CFG_ADDR_REG_SHIFT);
+	addr |= MV_PCI_CFG_ADDR_CFG_ENABLED;
+
+	mv_writel(addr, MV_PCI_CFG_ADDR_REG);
+	if (mv_readl(MV_PCI_CFG_ADDR_REG) != addr)
+		return PCIBIOS_DEVICE_NOT_FOUND;
+
+	local_irq_save(flags);
+	mv_writel(value, MV_PCI_CFG_DATA_REG);
+	mv_writel(~MV_PCI_SERR_MMABORT_ENABLED, MV_PCI_INTR_CAUSE_REG);
+	local_irq_restore(flags);
+
+	return PCIBIOS_SUCCESSFUL;
+}
+
+/*
+ * linux callbacks to make configuration access
+ */
+static int pci_read_config(struct pci_bus *bus, unsigned int devfn,
+			   int where, int size, u32 *value)
+{
+	/* skip configuration access to ourself */
+	if (bus->number == LOCAL_BUS && PCI_SLOT(devfn) == LOCAL_DEV)
+		return PCIBIOS_DEVICE_NOT_FOUND;
+
+	return mv88fxx81_pci_read_config(bus->number,
+					 PCI_SLOT(devfn),
+					 PCI_FUNC(devfn),
+					 (where & 0x3f),
+					 size, value);
+}
+
+static int pci_write_config(struct pci_bus *bus, unsigned int devfn,
+			    int where, int size, u32 value)
+{
+	/* skip configuration access to ourself */
+	if (bus->number == LOCAL_BUS && PCI_SLOT(devfn) == LOCAL_DEV)
+		return PCIBIOS_DEVICE_NOT_FOUND;
+
+	return mv88fxx81_pci_write_config(bus->number,
+					  PCI_SLOT(devfn),
+					  PCI_FUNC(devfn),
+					  (where & 0x3f),
+					  size, value);
+}
+
+static struct pci_ops mv88fxx81_pci_ops = {
+	.read	= pci_read_config,
+	.write	= pci_write_config
+};
+
+/*
+ * helper used to make local configuration read/write access
+ */
+int mv_pci_local_read_config(unsigned int func, unsigned int reg,
+			     int size, u32 *value)
+{
+	return mv88fxx81_pci_read_config(LOCAL_BUS, PCI_SLOT(LOCAL_DEV),
+					 PCI_FUNC(func), (reg & 0x3f),
+					 size, value);
+}
+
+int mv_pci_local_write_config(unsigned int func, unsigned int reg,
+			     int size, u32 value)
+{
+	return mv88fxx81_pci_write_config(LOCAL_BUS, PCI_SLOT(LOCAL_DEV),
+					  PCI_FUNC(func), (reg & 0x3f),
+					  size, value);
+}
+
+/*
+ * linux callback to begin bus scan
+ */
+static __init struct pci_bus *
+mv88fxx81_pci_scan_bus(int nr, struct pci_sys_data *sysdata)
+{
+	return pci_scan_bus(sysdata->busnr, &mv88fxx81_pci_ops, sysdata);
+}
+
+static struct resource mv88fxx81_pci_mem_space = {
+	.start	= PCI1_MEM_BASE,
+	.end	= PCI1_MEM_BASE + PCI1_MEM_SIZE,
+	.flags	= IORESOURCE_MEM,
+	.name	= "PCI Mem Space"
+};
+
+static struct resource mv88fxx81_pci_io_space = {
+	.start	= PCI1_IO_BASE,
+	.end	= PCI1_IO_BASE + PCI1_IO_SIZE,
+	.flags	= IORESOURCE_IO,
+	.name	= "PCI I/O Space"
+};
+
+static __init int mv88fxx81_pci_setup(int nr, struct pci_sys_data *sys)
+{
+        if (request_resource(&ioport_resource,
+			     &mv88fxx81_pci_io_space)) {
+                printk(KERN_ERR "Request io resource failed for PCI\n");
+		return 0;
+        }
+
+        if (request_resource(&iomem_resource,
+			     &mv88fxx81_pci_mem_space)) {
+                printk(KERN_ERR "Request mem resource failed for PCI\n");
+		return 0;
+        }
+
+	sys->resource[0] = &mv88fxx81_pci_io_space;
+	sys->resource[1] = &mv88fxx81_pci_mem_space;
+	sys->resource[2] = NULL;
+
+	return 1;
+}
+
+/*
+ * swizzle the interrupt pin & slot number
+ */
+static u8 __init mv88fxx81_swizzle(struct pci_dev *dev, u8 *pinp)
+{
+	return PCI_SLOT(dev->devfn);
+}
+
+/*
+ * map correct irq number for  device at slot "slot" and interrupt pin
+ * "pin"
+ */
+static int mv88fxx81_pci_map_irq(struct pci_dev *dev, u8 slot, u8 pin)
+{
+
+	if (machine_is_orion()) {
+#ifdef CONFIG_BOARD_MV_DP_5181L_BP_DDR2
+		if (slot == 7)
+			return IRQ_GPIO_12;
+		if (slot == 8 || slot == 9)
+			return IRQ_GPIO_13;
+#endif
+
+#ifdef CONFIG_BOARD_MV_RD_5181L_VOIP_RD2
+		if (slot == 7) {
+			if (pin == 1)
+				return IRQ_GPIO_4;
+			if (pin == 2)
+				return IRQ_GPIO_10;
+		}
+#endif
+	}
+
+	if (machine_is_fbxo1_a() || machine_is_fbxo1_a_legacy()) {
+		if (slot == 7) {
+			if (pin == 1)
+				return IRQ_GPIO_27;
+		}
+		if (slot == 8) {
+			/* SMP863x interrupt */
+			if (pin == 1)
+				return IRQ_GPIO_2;
+		}
+	}
+
+	if (machine_is_fbx_node()) {
+		if (slot == 7)
+			return IRQ_GPIO_4;
+		if (slot == 8)
+			return IRQ_GPIO_3;
+	}
+
+	return -1;
+}
+
+/*
+ * Interrupt handler for pci error. Clear it and warn.
+ */
+#define PCI_INTR_MASK	\
+	MV_PCI_SERR_DPERR_ENABLED | MV_PCI_SERR_SWRPERR_ENABLED | \
+	MV_PCI_SERR_SRDPERR_ENABLED | MV_PCI_SERR_MIOERR_ENABLED | \
+	MV_PCI_SERR_MWRPERR_ENABLED | MV_PCI_SERR_MRDPERR_ENABLED | \
+	MV_PCI_SERR_MTABORT_ENABLED | MV_PCI_SERR_MDIS_ENABLED | \
+	MV_PCI_SERR_MCTABORT_ENABLED | MV_PCI_SERR_MMABORT_ENABLED | \
+	MV_PCI_SERR_MRETRY_ENABLED | MV_PCI_SERR_MDISCARD_ENABLED | \
+	MV_PCI_SERR_MUNEXP_ENABLED | MV_PCI_SERR_MERRMSG_ENABLED | \
+	MV_PCI_SERR_SCMABORT_ENABLED | MV_PCI_SERR_STBOART_ENABLED | \
+	MV_PCI_SERR_SCTABORT_ENABLED | MV_PCI_SERR_SRDBUF_ENABLED | \
+	MV_PCI_SERR_ARB_ENABLED	 | MV_PCI_SERR_SRETRY_ENABLED | \
+	MV_PCI_SERR_SCDESTRD_ENABLED
+
+static irqreturn_t pci_error_intr(int irq, void *devinfo)
+{
+	static const char *reasons[] = {
+		NULL, "SWrPerr", "SRdPerr", NULL,
+		NULL, "MWrPerr", "MRdPerr", "MCTabort",
+		"MMabort", "MTabort", NULL, "MRetry",
+		NULL, "MUnExp", "MErrMsg", NULL,
+		"SCMabort", "STabort"
+	};
+	const char *err;
+	static int faultcount = 0;
+	u32 address, orig_cause, cause;
+	unsigned int sel;
+
+	/* read cause register */
+	orig_cause = cause = mv_readl(MV_PCI_INTR_CAUSE_REG);
+	cause &= PCI_INTR_MASK;
+	if (!cause)
+		return IRQ_NONE;
+
+	/* clear cause */
+	mv_writel(~cause, MV_PCI_INTR_CAUSE_REG);
+
+	/* set cause string */
+	sel = (orig_cause & MV_PCI_CAUSE_SEL_MASK) >> MV_PCI_CAUSE_SEL_SHIFT;
+	if (sel >= ARRAY_SIZE(reasons) || !reasons[sel])
+		err = "Unknown";
+	else
+		err = reasons[sel];
+
+	/* read offending address */
+	address = mv_readl(MV_PCI_ERROR_ADDRL_REG);
+
+	/* don't flood */
+	if (printk_ratelimit())
+		printk(KERN_ERR "PCI: PCI fault (count %d): address 0x%08x: "
+		       "reason %s\n", ++faultcount, address, err);
+	return IRQ_HANDLED;
+}
+
+struct hw_pci mv88fxx81_pci __initdata = {
+	/* two busses on 5881, but we ignore pex */
+	.nr_controllers	= 1,
+	.setup		= mv88fxx81_pci_setup,
+	.scan		= mv88fxx81_pci_scan_bus,
+	.map_irq	= mv88fxx81_pci_map_irq,
+	.swizzle	= mv88fxx81_swizzle,
+};
+
+/*
+ * setup slave interface (address decoding)
+ */
+#define TO_HW_SIZE(x, shift)	((((x - 1) >> (shift))) << (shift))
+
+/*
+ * initialize host PCI
+ */
+int __init mv88fxx81_pci_init(void)
+{
+	u32 val;
+	int ret;
+
+	val = mv_readl(MV_CPU_CTL_AND_STATUS_REG);
+	val &= ~(MV_CPU_PCI_DISABLE);
+	mv_writel(val, MV_CPU_CTL_AND_STATUS_REG);
+
+	/* enable internal arbiter */
+	mv_writel(MV_PCI_ARBITER_ENABLE, MV_PCI_ARBITER_CTL_REG);
+
+	/* set our bus/device number */
+	val = LOCAL_BUS << MV_PCI_P2P_BUS_NUMBER_SHIFT;
+	val |= LOCAL_DEV << MV_PCI_P2P_DEV_NUMBER_SHIFT;
+	mv_writel(val, MV_PCI_P2P_CONFIG_REG);
+
+	/* enable all possible pci error reporting */
+	mv_writel(PCI_INTR_MASK, MV_PCI_SERR_MASK_REG);
+
+	/* setup the slave interface decoding regs */
+	mv_common_addrmap_pci_init();
+
+	/* enable pci master & pci slave & error reporting */
+	ret = mv88fxx81_pci_read_config(LOCAL_BUS, LOCAL_DEV, 0,
+					PCI_COMMAND, 4, &val);
+	if (ret != PCIBIOS_SUCCESSFUL) {
+		printk(KERN_ERR "PCI: unable to read command register\n");
+		return 1;
+	}
+	val |= PCI_COMMAND_MASTER | PCI_COMMAND_SERR |
+		PCI_COMMAND_MEMORY | PCI_COMMAND_IO;
+
+	ret = mv88fxx81_pci_write_config(LOCAL_BUS, LOCAL_DEV, 0,
+					 PCI_COMMAND, 4, val);
+	if (ret != PCIBIOS_SUCCESSFUL) {
+		printk(KERN_ERR "PCI: unable to write to command register\n");
+		return 1;
+	}
+
+	/* register pci  event interrupt */
+	mv_writel(PCI_INTR_MASK, MV_PCI_INTR_MASK_REG);
+	mv_writel(0, MV_PCI_INTR_CAUSE_REG);
+	request_irq(IRQ_PCI_ERR, pci_error_intr, 0, "pci_fault", NULL);
+
+	/* let linux scan & assign resources */
+	pci_common_init(&mv88fxx81_pci);
+
+	return 0;
+}
+
+subsys_initcall(mv88fxx81_pci_init);
diff -Nruw linux-2.6.20.14-fbx/arch/arm/mach-mv88fxx81./phone.c linux-2.6.20.14-fbx/arch/arm/mach-mv88fxx81/phone.c
--- linux-2.6.20.14-fbx/arch/arm/mach-mv88fxx81./phone.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.20.14-fbx/arch/arm/mach-mv88fxx81/phone.c	2010-12-29 19:30:05.221437474 +0100
@@ -0,0 +1,17 @@
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+static struct platform_device mv5181_phone_device =
+{
+	.name	= "mv5181_fbxphone",
+	.id	= -1,
+};
+
+int
+mv5181_phone_init(void)
+{
+	return platform_device_register(&mv5181_phone_device);
+}
+
+device_initcall(mv5181_phone_init);
diff -Nruw linux-2.6.20.14-fbx/arch/arm/mach-mv88fxx81./serial.c linux-2.6.20.14-fbx/arch/arm/mach-mv88fxx81/serial.c
--- linux-2.6.20.14-fbx/arch/arm/mach-mv88fxx81./serial.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.20.14-fbx/arch/arm/mach-mv88fxx81/serial.c	2010-12-29 19:30:05.221437474 +0100
@@ -0,0 +1,51 @@
+/*
+ * serial.c for linux
+ * Created by <nschichan@corp.free.fr> on Thu Sep 21 14:32:49 2006
+ * Freebox SA
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+
+#include <linux/tty.h>
+#include <linux/serial_core.h>
+#include <linux/serial.h>
+#include <linux/serialP.h>
+#include <linux/serial_reg.h>
+#include <linux/serial_8250.h>
+#include <linux/console.h>
+
+#include <asm/arch/timer.h>
+#include <asm/arch/devices.h>
+#include "common.h"
+
+
+static struct plat_serial8250_port serial_data[] = {
+	{
+		.membase	= MV_REGS_VA(MV88FXX81_UART0_BASE_PA),
+		.mapbase	= MV88FXX81_UART0_BASE_PA,
+		.irq		= IRQ_UART0,
+		.flags		= ASYNC_BOOT_AUTOCONF,
+		.iotype		= SERIAL_IO_MEM,
+		.regshift	= 2,
+		.uartclk	= 0, /* autodetected */
+	},
+	{ },
+};
+
+static struct platform_device serial_device = {
+	.name		= "serial8250",
+	.id		= PLAT8250_DEV_PLATFORM,
+	.dev		= {
+		.platform_data = serial_data,
+	}
+};
+
+void __init
+mv_common_serial_init(void)
+{
+	serial_data[0].uartclk = tclk_get_rate();
+
+	if (platform_device_register(&serial_device) < 0)
+		printk("unable to register platform serial device.\n");
+}
diff -Nruw linux-2.6.20.14-fbx/arch/arm/mach-mv88fxx81./spi.c linux-2.6.20.14-fbx/arch/arm/mach-mv88fxx81/spi.c
--- linux-2.6.20.14-fbx/arch/arm/mach-mv88fxx81./spi.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.20.14-fbx/arch/arm/mach-mv88fxx81/spi.c	2010-12-29 19:30:05.221437474 +0100
@@ -0,0 +1,45 @@
+/*
+ * spi.c<2> for linux-freebox
+ * Created by <nschichan@freebox.fr> on Fri Mar  9 23:31:54 2007
+ * Freebox SA
+ */
+
+#include <linux/platform_device.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/fbxspi.h>
+
+#include <asm/arch/regs.h>
+#include <asm/arch/devices.h>
+#include <asm/arch/gpio.h>
+
+static struct resource spi_resources[] =
+{
+	[0] = {
+		.start	= MV88F5181_SPI_BASE_PA,
+		.end	= MV88F5181_SPI_END_PA,
+		.flags	= IORESOURCE_MEM,
+	},
+};
+
+static struct mv88f5181_spi_platform_data spi_platform_data =
+{
+	.num_cs	= 2,
+	.gpio_cs = 29,
+};
+
+static struct platform_device spi_mv88f5181_device =
+{
+	.name		= "spi_mv88f5181",
+	.resource	= spi_resources,
+	.num_resources	= ARRAY_SIZE(spi_resources),
+	.dev		= {
+		.platform_data	= &spi_platform_data,
+	},
+};
+
+void
+mv_common_spi_init(void)
+{
+	platform_device_register(&spi_mv88f5181_device);
+}
diff -Nruw linux-2.6.20.14-fbx/arch/arm/mach-mv88fxx81./tdm.c linux-2.6.20.14-fbx/arch/arm/mach-mv88fxx81/tdm.c
--- linux-2.6.20.14-fbx/arch/arm/mach-mv88fxx81./tdm.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.20.14-fbx/arch/arm/mach-mv88fxx81/tdm.c	2010-12-29 19:30:05.221437474 +0100
@@ -0,0 +1,82 @@
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/fbxspi.h>
+
+#include <asm/arch/regs.h>
+#include <asm/arch/io.h>
+#include <asm/arch/tdm.h>
+#include <asm/delay.h>
+
+#define CONFIG_PCM_CRTL							\
+		MV_TDM_MASTER_PCLK_EXTERNAL | MV_TDM_MASTER_FS_EXTERNAL | \
+		MV_TDM_DATA_POLAR_NEG | MV_TDM_FS_POLAR_NEG |		\
+		MV_TDM_INVERT_FS_HI | MV_TDM_FS_TYPE_SHORT |		\
+		MV_TDM_PCM_SAMPLE_SIZE_1 | MV_TDM_CH_DELAY_DISABLE |	\
+		MV_TDM_CH_QUALITY_DISABLE |				\
+		MV_TDM_QUALITY_POLARITY_NEG |				\
+		MV_TDM_QUALITY_TYPE_TIME_SLOT |	MV_TDM_CS_CTRL_0 |	\
+		MV_TDM_WIDEBAND_OFF | MV_TDM_PERF_GBUS_TWO_ACCESS
+
+#define CONFIG_TIMESLOT_CTRL				\
+		(0 << MV_TDM_CH0_RX_SLOT_OFFS) |	\
+		(0 << MV_TDM_CH0_TX_SLOT_OFFS) |	\
+		(10 << MV_TDM_CH1_RX_SLOT_OFFS) |	\
+		(10 << MV_TDM_CH1_TX_SLOT_OFFS)
+
+static struct fbxspi_device slac_legerity_device =
+{
+	.name		= "slac_legerity",
+	.cs		= 0,
+	.max_speed_hz	= 8192000,
+};
+
+void __init
+mv_tdm_init(struct mv_tdm_data *data)
+{
+	mv_writel(data->tdm_dev_mux, MV_TDM_SPI_MUX_REG);
+
+	/* remove soft reset bit */
+	mv_writel(0, MV_TDM_INT_RESET_SELECT_REG);
+	mv_writel(MV_TDM_MISC_RESET_BIT, MV_TDM_MISC_CTRL_REG);
+
+	/* int cause will not be clear on read */
+	mv_writel(MV_TDM_CLEAR_INT_ON_ZERO, MV_TDM_INT_RESET_SELECT_REG);
+	/* all interrupt bits latched in status */
+	mv_writel(0x3FFFF, MV_TDM_INT_EVENT_MASK_REG);
+	/* disable interrupts / clear int status register */
+	mv_writel(0, MV_TDM_INT_STATUS_MASK_REG);
+	mv_writel(0, MV_TDM_INT_STATUS_REG);
+
+	/* PCM configuration */
+	mv_writel(CONFIG_PCM_CRTL, MV_TDM_PCM_CTRL_REG);
+	/* channels rx/tx timeslots */
+	mv_writel(CONFIG_TIMESLOT_CTRL, MV_TDM_TIMESLOT_CTRL_REG);
+	/* PCM PCLK freq / number of timeslots */
+	mv_writel(MV_TDM_PCM_2048KHZ, MV_TDM_PCM_CLK_RATE_DIV_REG);
+	mv_writel(MV_TDM_TIMESLOTS_32, MV_TDM_FRAME_TIMESLOT_REG);
+	/* Padding on Rx completion */
+	mv_writel(0, MV_TDM_DUMMY_RX_WRITE_DATA_REG);
+
+	/* undocumented register, to enable tdm */
+/* 	mv_writel(1, MV_TDM_MISC_REG); */
+
+	/* enable tdm/spi */
+
+	/* software reset */
+	mv_writeb(0, MV_TDM_MISC_CTRL_REG);
+	msleep(25);
+	mv_writeb(MV_TDM_MISC_RESET_BIT, MV_TDM_MISC_CTRL_REG);
+	msleep(5);
+
+	/* disable channels */
+	mv_writel(0, MV_TDM_CHANNEL1_ENABLE);
+	mv_writel(0, MV_TDM_CHANNEL2_ENABLE);
+
+	/* register slac spi device */
+	if (fbxspi_register_device(&slac_legerity_device) < 0)
+		printk(KERN_INFO "unable to register slac_legerity device.\n");
+}
diff -Nruw linux-2.6.20.14-fbx/arch/arm/mach-mv88fxx81./timer.c linux-2.6.20.14-fbx/arch/arm/mach-mv88fxx81/timer.c
--- linux-2.6.20.14-fbx/arch/arm/mach-mv88fxx81./timer.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.20.14-fbx/arch/arm/mach-mv88fxx81/timer.c	2010-12-29 19:30:05.221437474 +0100
@@ -0,0 +1,438 @@
+/*
+ * timer.c for freebox
+ * Created by <nschichan@corp.free.fr> on Tue Sep 19 21:15:41 2006
+ * Freebox SA
+ */
+
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+
+#include <asm/system.h>
+#include <asm/arch/timer.h>
+#include <asm/arch/io.h>
+#include <asm/arch/regs.h>
+#include <asm/arch/irqs.h>
+#include <asm/mach/time.h>
+
+#include "common.h"
+
+struct clk_rate_entry
+{
+	unsigned int cpu_rate;
+	unsigned int ddr_rate;
+};
+
+static struct clk_rate_entry clk_rate_table[0xf] = {
+	[0] =	{ 333333333, 166666666 },
+	[1] =	{ 400000000, 200000000 },
+	[2] =	{ 400000000, 133333333 },
+	[3] =	{ 500000000, 200000000 },
+	[4] =	{ 533333333, 200000000 },
+	[5] =	{ 600000000, 200000000 },
+	[6] =	{ 667000000, 166666666 },
+	[7] =	{ 800000000, 200000000 },
+	[0xc] = { 480000000, 160000000 },
+	[0xd] = { 550000000, 183333333 },
+	[0xe] = { 525000000, 175000000 },
+};
+
+static unsigned long timer0_cycles = 0;
+
+int
+timer_autoreload(int timer, int reload)
+{
+	unsigned int val;
+
+	if (timer > NUM_TIMER)
+		return -EINVAL;
+
+	val = mv_readl(MV_TIMER_CTL_REG);
+	if (reload)
+		val |= 2 << (2 * timer);
+	else
+		val &= ~(2 << (2 * timer));
+
+	mv_writel(val, MV_TIMER_CTL_REG);
+
+	return 0;
+}
+
+int
+timer_enable(int timer, int enable)
+{
+	unsigned int val;
+
+	if (timer > NUM_TIMER)
+		return -EINVAL;
+
+	val = mv_readl(MV_TIMER_CTL_REG);
+	if (enable)
+		val |= 1 << (2 * timer);
+	else
+		val &= ~(1 << (2 * timer));
+
+	mv_writel(val, MV_TIMER_CTL_REG);
+
+	return 0;
+}
+
+int
+timer_set_reload_value(int timer, unsigned int reload_value)
+{
+	if (timer > NUM_TIMER)
+		return -EINVAL;
+
+	mv_writel(reload_value, MV_TIMER_RELOAD_BASE + 8 * timer);
+	mv_writel(reload_value, MV_TIMER_COUNTER_BASE + 8 * timer);
+
+	return 0;
+}
+
+unsigned int
+timer_get_reload_value(int timer)
+{
+	if (timer > NUM_TIMER)
+		return -EINVAL;
+
+	return mv_readl(MV_TIMER_RELOAD_BASE + 8 * timer);
+}
+
+int
+timer_load_counter(int timer, unsigned int reload_value)
+{
+	if (timer > NUM_TIMER)
+		return -EINVAL;
+
+	mv_writel(reload_value, MV_TIMER_COUNTER_BASE + 8 * timer);
+	return 0;
+}
+
+unsigned int
+timer_get_counter(int timer)
+{
+	if (timer > NUM_TIMER)
+		return -EINVAL;
+
+	return mv_readl(MV_TIMER_COUNTER_BASE + 8 * timer);
+}
+
+/*
+ * safe to use timer interrupt ack helper/
+ */
+static spinlock_t timer_cause_lock = SPIN_LOCK_UNLOCKED;
+void
+timer_ack_interrupt(int timer)
+{
+	u32 cause;
+	unsigned long flags;
+
+	spin_lock_irqsave(&timer_cause_lock, flags);
+
+	cause = mv_readl(MV_CPU_BRIDGE_INT_CAUSE_REG);
+	cause &= ~TIMER_IRQ_BIT_MASK(timer);
+	mv_writel(cause, MV_CPU_BRIDGE_INT_CAUSE_REG);
+
+	spin_unlock_irqrestore(&timer_cause_lock, flags);
+}
+
+/*
+ * safe to use timer interrupt mask/unmask helper, since it may be
+ * called from almost any context.
+ */
+static spinlock_t timer_mask_lock = SPIN_LOCK_UNLOCKED;
+static inline void
+__timer_do_mask(int timer, int do_mask)
+{
+	u32 mask;
+	unsigned long flags;
+
+	spin_lock_irqsave(&timer_mask_lock, flags);
+
+	mask = mv_readl(MV_CPU_BRIDGE_INT_MASK_REG);
+	if (do_mask)
+		mask &= ~TIMER_IRQ_BIT_MASK(timer);
+	else
+		mask |= TIMER_IRQ_BIT_MASK(timer);
+	mv_writel(mask, MV_CPU_BRIDGE_INT_MASK_REG);
+
+	spin_unlock_irqrestore(&timer_mask_lock, flags);
+}
+
+void
+timer_mask_interrupt(int timer)
+{
+	__timer_do_mask(timer, 1);
+}
+
+void
+timer_unmask_interrupt(int timer)
+{
+	__timer_do_mask(timer, 0);
+}
+
+static irqreturn_t
+mv_timer_interrupt(int irq, void *dev_id)
+{
+	unsigned int cause;
+	unsigned int mask;
+
+	cause = mv_readl(MV_CPU_BRIDGE_INT_CAUSE_REG);
+	mask = mv_readl(MV_CPU_BRIDGE_INT_MASK_REG);
+	if ((cause & mask & TIMER_IRQ_BIT_MASK(TIMER0)) == 0)
+		return IRQ_NONE;
+
+	write_seqlock(&xtime_lock);
+	timer_ack_interrupt(TIMER0);
+	mv_writel(cause, MV_CPU_BRIDGE_INT_CAUSE_REG);
+	timer_tick();
+	write_sequnlock(&xtime_lock);
+
+	return IRQ_HANDLED;
+}
+
+static struct irqaction timer_irq =
+{
+	.name		= "timer0",
+	.flags		= IRQF_DISABLED | IRQF_TIMER | IRQF_SHARED,
+	.handler	= mv_timer_interrupt,
+};
+
+unsigned int
+tclk_get_rate(void)
+{
+	unsigned int val;
+	unsigned int tclk_rate;
+
+	/* first autodetect tclk_rate */
+	val = mv_readl(MV_MPP_SAMPLE_AT_RESET_REG);
+	val >>= TCLK_SHIFT;
+	val &= TCLK_MASK;
+
+	switch (val) {
+	case TCLK_133:
+		tclk_rate = 133333333;
+		break;
+	case TCLK_150:
+		tclk_rate = 150000000;
+		break;
+	case TCLK_166:
+		tclk_rate = 166666666;
+		break;
+	default:
+		printk(KERN_ERR "unknown TCLK rate: defaulting to 166mhz.\n");
+		tclk_rate = 166666666;
+		break;
+	}
+
+	return tclk_rate;
+}
+
+unsigned int
+armclk_get_rate(void)
+{
+	unsigned int val;
+	unsigned int armclk_rate;
+	struct clk_rate_entry *cre;
+
+	val = mv_readl(MV_MPP_SAMPLE_AT_RESET_REG);
+	val >>= ARMDDRCLK_SHIFT;
+	val &= ARMDDRCLK_MASK;
+
+	cre = &clk_rate_table[val];
+	if (cre->cpu_rate == 0 || cre->ddr_rate == 0) {
+		printk(KERN_ERR "unable to autodetect ARM rate. "
+		       "using default value 400mhz.\n");
+		armclk_rate = 400;
+	} else
+		armclk_rate = cre->cpu_rate;
+
+	return armclk_rate;
+}
+
+unsigned int
+ddrclk_get_rate(void)
+{
+	unsigned int val;
+	unsigned int ddrclk_rate;
+	struct clk_rate_entry *cre;
+
+	val = mv_readl(MV_MPP_SAMPLE_AT_RESET_REG);
+	val >>= ARMDDRCLK_SHIFT;
+	val &= ARMDDRCLK_MASK;
+
+	cre = &clk_rate_table[val];
+	if (cre->cpu_rate == 0 || cre->ddr_rate == 0) {
+		printk(KERN_ERR "unable to autodetect DDR rate. "
+		       "using default value 133mhz.\n");
+		ddrclk_rate = 133;
+	} else {
+		ddrclk_rate = cre->ddr_rate;
+	}
+
+	return ddrclk_rate;
+}
+
+/* borrowed from the omap tree */
+/* cycles to nsec conversions taken from arch/i386/kernel/timers/timer_tsc.c,
+ * converted to use kHz by Kevin Hilman */
+/* convert from cycles(64bits) => nanoseconds (64bits)
+ *  basic equation:
+ *		ns = cycles / (freq / ns_per_sec)
+ *		ns = cycles * (ns_per_sec / freq)
+ *		ns = cycles * (10^9 / (cpu_khz * 10^3))
+ *		ns = cycles * (10^6 / cpu_khz)
+ *
+ *	Then we use scaling math (suggested by george at mvista.com) to get:
+ *		ns = cycles * (10^6 * SC / cpu_khz / SC
+ *		ns = cycles * cyc2ns_scale / SC
+ *
+ *	And since SC is a constant power of two, we can convert the div
+ *  into a shift.
+ *			-johnstul at us.ibm.com "math is hard, lets go shopping!"
+ */
+static unsigned long cyc2ns_scale;
+#define CYC2NS_SCALE_FACTOR 10 /* 2^10, carefully chosen */
+
+static inline void set_cyc2ns_scale(unsigned long cpu_khz)
+{
+	cyc2ns_scale = (1000000 << CYC2NS_SCALE_FACTOR)/cpu_khz;
+}
+
+static inline unsigned long long cycles_2_ns(unsigned long long cyc)
+{
+	return (cyc * cyc2ns_scale) >> CYC2NS_SCALE_FACTOR;
+}
+
+/*
+ * return elapsed micro secondes since last interrupt.
+ */
+unsigned long
+mv_gettimeoffset(void)
+{
+	uint32_t ticks1;
+	uint32_t ticks2;
+	uint32_t status;
+
+	/*
+	 * we need a coherent tick value and interrupt status. however
+	 * we might read the interrupt status, have the timer
+	 * underflow, and read the timer. to detect timer underflow
+	 * condition, we gard the status read between two counter
+	 * read. if the counter goes down during the status read, it
+	 * is valid status read. if the counter goes up during the
+	 * status read, we need to retry.
+	 */
+	do {
+		ticks1 = timer_get_counter(TIMER0);
+		status = mv_readl(MV_CPU_BRIDGE_INT_CAUSE_REG);
+		ticks2 = timer_get_counter(TIMER0);
+	} while (ticks1 < ticks2);
+
+	ticks2 = timer0_cycles - ticks2;
+
+	if (status & TIMER_IRQ_BIT_MASK(TIMER0))
+		ticks2 += timer0_cycles;
+	return (long)cycles_2_ns(ticks2) / 1000;
+}
+
+/*
+ * return current time in nano second unit.
+ */
+unsigned long long
+sched_clock(void)
+{
+	uint64_t ticks64;
+	uint32_t ticks1;
+	uint32_t ticks2;
+
+	/*
+	 * similar hackery as with mv_gettimeoffset.
+	 */
+	do {
+		ticks1 = timer_get_counter(TIMER0);
+		ticks64 = jiffies;
+		ticks2 = timer_get_counter(TIMER0);
+	} while (ticks1 < ticks2);
+
+	ticks64 *= timer0_cycles;
+	ticks64 += timer0_cycles - ticks2;
+
+	return cycles_2_ns(ticks64);
+}
+
+/*
+ * setup TIMER0 and TIMER0 IRQ handler.
+ */
+void __init
+mv_common_timer_init(void)
+{
+	uint32_t val;
+	int err;
+
+	/*
+	 * check watchdog reset state as read in the sample at reset
+	 * register (DEV_HOST_D[1] on the schematics).
+	 */
+	val = mv_readl(MV_MPP_SAMPLE_AT_RESET_REG);
+	if (val & (1 << 1)) {
+		printk(KERN_WARNING "watchdog has been enabled by reset boot "
+		       "configuration.\n");
+	}
+
+	/*
+	 * check watchdog state.
+	 */
+	val = mv_readl(MV_TIMER_CTL_REG);
+	if (val & (1 << (2 * TIMERWDT))) {
+		printk(KERN_WARNING "watchdog has been enabled early "
+		       "(%i sec countdown remain)!\n",
+		       timer_get_counter(TIMERWDT));
+		val &= ~(1 << (2 * TIMERWDT));
+		mv_writel(val, MV_TIMER_CTL_REG);
+	}
+
+	set_cyc2ns_scale(tclk_get_rate() / 1000);
+
+	printk("TCLK clock rate: %u.\n", tclk_get_rate());
+	printk("ARM  clock rate: %u.\n", armclk_get_rate());
+	printk("DDR  clock rate: %u.\n", ddrclk_get_rate());
+
+	mv_writel(0, MV_TIMER_CTL_REG);
+
+	err = setup_irq(TIMER_IRQ, &timer_irq);
+	if (err) {
+		printk("unable to setup timer irq: %i.\n", err);
+		/*
+		 * almost equivalent to commiting suicide, but with no
+		 * timer interrupt, "I don't know anything more can do
+		 * to help."
+		 */
+		BUG();
+	}
+
+	/* for sched_clock */
+	timer0_cycles = tclk_get_rate() / HZ;
+
+	/* setup timer1, ticks every hz per second */
+	timer_autoreload(TIMER0, 1);
+	timer_set_reload_value(TIMER0, tclk_get_rate() / HZ);
+	timer_enable(TIMER0, 1);
+
+	/* unmask timer interupt */
+	timer_unmask_interrupt(TIMER0);
+}
+
+EXPORT_SYMBOL(timer_autoreload);
+EXPORT_SYMBOL(timer_enable);
+EXPORT_SYMBOL(timer_set_reload_value);
+EXPORT_SYMBOL(timer_load_counter);
+EXPORT_SYMBOL(timer_get_counter);
+EXPORT_SYMBOL(timer_ack_interrupt);
+EXPORT_SYMBOL(timer_mask_interrupt);
+EXPORT_SYMBOL(timer_unmask_interrupt);
+
+EXPORT_SYMBOL(tclk_get_rate);
+EXPORT_SYMBOL(ddrclk_get_rate);
+EXPORT_SYMBOL(armclk_get_rate);
diff -Nruw linux-2.6.20.14-fbx/arch/arm/mach-mv88fxx81./watchdog.c linux-2.6.20.14-fbx/arch/arm/mach-mv88fxx81/watchdog.c
--- linux-2.6.20.14-fbx/arch/arm/mach-mv88fxx81./watchdog.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.20.14-fbx/arch/arm/mach-mv88fxx81/watchdog.c	2010-12-29 19:30:05.221437474 +0100
@@ -0,0 +1,31 @@
+/*
+ * watchdog.c for linux-freebox
+ * Created by <nschichan@freebox.fr> on Wed Feb  7 23:43:54 2007
+ * Freebox SA
+ */
+
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+
+#include <asm/arch/regs.h>
+
+static struct resource wdt_resources[] = {
+	{
+		.start		= MV_TIMER_REGS_BASE,
+		.end		= MV_TIMER_REGS_BASE + MV_TIMER_REGS_SIZE,
+		.flags		= IORESOURCE_MEM,
+	},
+};
+
+static struct platform_device wdt_device = {
+	.name		= "mv88f5181_wdt",
+	.id		= -1,
+	.resource	= wdt_resources,
+	.num_resources	= ARRAY_SIZE(wdt_resources),
+};
+
+int
+mv_common_wdt_init(void)
+{
+	return platform_device_register(&wdt_device);
+}
--- /dev/null	2011-06-03 14:51:38.633053002 +0200
+++ linux-2.6.20.14-fbx/config	2011-09-09 16:15:35.570305242 +0200
@@ -0,0 +1,1128 @@
+#
+# Automatically generated make config: don't edit
+# Linux kernel version: 2.6.20.14
+# Fri Sep  9 16:15:06 2011
+#
+CONFIG_ARM=y
+# CONFIG_GENERIC_TIME is not set
+CONFIG_MMU=y
+CONFIG_GENERIC_HARDIRQS=y
+CONFIG_TRACE_IRQFLAGS_SUPPORT=y
+CONFIG_HARDIRQS_SW_RESEND=y
+CONFIG_GENERIC_IRQ_PROBE=y
+CONFIG_RWSEM_GENERIC_SPINLOCK=y
+# CONFIG_ARCH_HAS_ILOG2_U32 is not set
+# CONFIG_ARCH_HAS_ILOG2_U64 is not set
+CONFIG_GENERIC_HWEIGHT=y
+CONFIG_GENERIC_CALIBRATE_DELAY=y
+CONFIG_VECTORS_BASE=0xffff0000
+CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+
+#
+# Code maturity level options
+#
+CONFIG_EXPERIMENTAL=y
+CONFIG_BROKEN_ON_SMP=y
+CONFIG_INIT_ENV_ARG_LIMIT=32
+
+#
+# General setup
+#
+CONFIG_LOCALVERSION=""
+# CONFIG_LOCALVERSION_AUTO is not set
+# CONFIG_IGNORE_COMPILE_INFO is not set
+# CONFIG_SWAP is not set
+CONFIG_SYSVIPC=y
+# CONFIG_IPC_NS is not set
+# CONFIG_POSIX_MQUEUE is not set
+# CONFIG_BSD_PROCESS_ACCT is not set
+# CONFIG_TASKSTATS is not set
+# CONFIG_UTS_NS is not set
+# CONFIG_AUDIT is not set
+# CONFIG_IKCONFIG is not set
+CONFIG_SYSFS_DEPRECATED=y
+# CONFIG_RELAY is not set
+# CONFIG_INITRAMFS_USE_GZIP is not set
+CONFIG_INITRAMFS_SOURCE=""
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+CONFIG_SYSCTL=y
+CONFIG_EMBEDDED=y
+CONFIG_UID16=y
+CONFIG_SYSCTL_SYSCALL=y
+CONFIG_KALLSYMS=y
+# CONFIG_KALLSYMS_EXTRA_PASS is not set
+CONFIG_HOTPLUG=y
+CONFIG_PRINTK=y
+CONFIG_BUG=y
+# CONFIG_ELF_CORE is not set
+CONFIG_BASE_FULL=y
+# CONFIG_FUTEX is not set
+CONFIG_EPOLL=y
+CONFIG_SHMEM=y
+CONFIG_SLAB=y
+CONFIG_VM_EVENT_COUNTERS=y
+# CONFIG_TINY_SHMEM is not set
+CONFIG_BASE_SMALL=0
+# CONFIG_SLOB is not set
+
+#
+# Loadable module support
+#
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_MODULE_FORCE_UNLOAD is not set
+# CONFIG_MODVERSIONS is not set
+# CONFIG_MODULE_SRCVERSION_ALL is not set
+# CONFIG_KMOD is not set
+
+#
+# Block layer
+#
+CONFIG_BLOCK=y
+# CONFIG_LBD is not set
+# CONFIG_BLK_DEV_IO_TRACE is not set
+# CONFIG_LSF is not set
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+# CONFIG_IOSCHED_AS is not set
+# CONFIG_IOSCHED_DEADLINE is not set
+# CONFIG_IOSCHED_CFQ is not set
+# CONFIG_DEFAULT_AS is not set
+# CONFIG_DEFAULT_DEADLINE is not set
+# CONFIG_DEFAULT_CFQ is not set
+CONFIG_DEFAULT_NOOP=y
+CONFIG_DEFAULT_IOSCHED="noop"
+
+#
+# System Type
+#
+# CONFIG_ARCH_AAEC2000 is not set
+# CONFIG_ARCH_INTEGRATOR is not set
+# CONFIG_ARCH_REALVIEW is not set
+# CONFIG_ARCH_VERSATILE is not set
+# CONFIG_ARCH_AT91 is not set
+# CONFIG_ARCH_CLPS7500 is not set
+# CONFIG_ARCH_CLPS711X is not set
+# CONFIG_ARCH_CO285 is not set
+# CONFIG_ARCH_EBSA110 is not set
+# CONFIG_ARCH_EP93XX is not set
+# CONFIG_ARCH_FOOTBRIDGE is not set
+# CONFIG_ARCH_NETX is not set
+# CONFIG_ARCH_H720X is not set
+# CONFIG_ARCH_IMX is not set
+# CONFIG_ARCH_IOP32X is not set
+# CONFIG_ARCH_IOP33X is not set
+# CONFIG_ARCH_IOP13XX is not set
+# CONFIG_ARCH_IXP4XX is not set
+# CONFIG_ARCH_IXP2000 is not set
+# CONFIG_ARCH_IXP23XX is not set
+# CONFIG_ARCH_L7200 is not set
+# CONFIG_ARCH_PNX4008 is not set
+# CONFIG_ARCH_PXA is not set
+# CONFIG_ARCH_RPC is not set
+# CONFIG_ARCH_SA1100 is not set
+# CONFIG_ARCH_S3C2410 is not set
+# CONFIG_ARCH_SHARK is not set
+# CONFIG_ARCH_LH7A40X is not set
+# CONFIG_ARCH_OMAP is not set
+CONFIG_ARCH_MV88FXX81=y
+
+#
+# Marvell Board Types
+#
+# CONFIG_BOARD_ORION is not set
+CONFIG_BOARD_FBXO1_A=y
+# CONFIG_BOARD_FBX_NODE is not set
+CONFIG_MV6131_SWITCH=y
+
+#
+# FBXO1 A Flash configuration
+#
+CONFIG_FBXO1_A_FLASH_BUS_WIDTH=2
+CONFIG_FBXO1_A_FBXMTD_ALL_RW=y
+# CONFIG_FBXO1_A_FBXMTD_NO_CRC is not set
+CONFIG_FBXO1_A_FBXMTD_READ_BANK1_TAG=y
+# CONFIG_FBXO1_A_FBXMTD_MAP_UBOOT is not set
+CONFIG_FBXOA_1_FBXMTD_MAP_IPL=y
+
+#
+# FBX Node Flash configuration
+#
+
+#
+# Processor Type
+#
+CONFIG_CPU_32=y
+CONFIG_CPU_ARM926T=y
+CONFIG_CPU_32v5=y
+CONFIG_CPU_ABRT_EV5TJ=y
+CONFIG_CPU_CACHE_VIVT=y
+CONFIG_CPU_COPY_V4WB=y
+CONFIG_CPU_TLB_V4WBI=y
+CONFIG_CPU_CP15=y
+CONFIG_CPU_CP15_MMU=y
+
+#
+# Processor Features
+#
+# CONFIG_ARM_THUMB is not set
+# CONFIG_CPU_ICACHE_DISABLE is not set
+# CONFIG_CPU_DCACHE_DISABLE is not set
+# CONFIG_CPU_DCACHE_WRITETHROUGH is not set
+# CONFIG_CPU_CACHE_ROUND_ROBIN is not set
+
+#
+# Bus support
+#
+CONFIG_PCI=y
+
+#
+# PCCARD (PCMCIA/CardBus) support
+#
+# CONFIG_PCCARD is not set
+
+#
+# PCI Hotplug Support
+#
+CONFIG_HOTPLUG_PCI=y
+CONFIG_HOTPLUG_PCI_FAKE=y
+# CONFIG_HOTPLUG_PCI_CPCI is not set
+# CONFIG_HOTPLUG_PCI_SHPC is not set
+
+#
+# Kernel Features
+#
+# CONFIG_PREEMPT is not set
+# CONFIG_NO_IDLE_HZ is not set
+CONFIG_HZ=100
+# CONFIG_AEABI is not set
+# CONFIG_ARCH_DISCONTIGMEM_ENABLE is not set
+CONFIG_SELECT_MEMORY_MODEL=y
+CONFIG_FLATMEM_MANUAL=y
+# CONFIG_DISCONTIGMEM_MANUAL is not set
+# CONFIG_SPARSEMEM_MANUAL is not set
+CONFIG_FLATMEM=y
+CONFIG_FLAT_NODE_MEM_MAP=y
+# CONFIG_SPARSEMEM_STATIC is not set
+CONFIG_SPLIT_PTLOCK_CPUS=4096
+# CONFIG_RESOURCES_64BIT is not set
+CONFIG_ALIGNMENT_TRAP=y
+
+#
+# Boot options
+#
+CONFIG_ZBOOT_ROM_TEXT=0x0
+CONFIG_ZBOOT_ROM_BSS=0x0
+CONFIG_CMDLINE="console=ttyS0,115200 root=/dev/nfs ip=:::::eth0:dhcp"
+# CONFIG_XIP_KERNEL is not set
+
+#
+# Floating point emulation
+#
+
+#
+# At least one emulation must be selected
+#
+CONFIG_FPE_NWFPE=y
+# CONFIG_FPE_NWFPE_XP is not set
+# CONFIG_FPE_FASTFPE is not set
+# CONFIG_VFP is not set
+
+#
+# Userspace binary formats
+#
+CONFIG_BINFMT_ELF=y
+# CONFIG_BINFMT_AOUT is not set
+# CONFIG_BINFMT_MISC is not set
+# CONFIG_ARTHUR is not set
+
+#
+# Power management options
+#
+# CONFIG_PM is not set
+# CONFIG_APM is not set
+
+#
+# Networking
+#
+CONFIG_NET=y
+
+#
+# Networking options
+#
+CONFIG_NETSKBPAD=64
+# CONFIG_NETDEBUG is not set
+CONFIG_NETRXTHREAD=y
+CONFIG_NETRXTHREAD_RX_QUEUE=1
+CONFIG_NETRXTHREAD_MAX_PROCESS=16
+CONFIG_SKB_RECYCLE=y
+CONFIG_PACKET=y
+# CONFIG_PACKET_MMAP is not set
+CONFIG_UNIX=y
+CONFIG_XFRM=y
+CONFIG_XFRM_USER=y
+# CONFIG_XFRM_SUB_POLICY is not set
+CONFIG_NET_KEY=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_FFN=y
+# CONFIG_IP_ADVANCED_ROUTER is not set
+CONFIG_IP_FIB_HASH=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_DHCP_IDENTIFIER="linux-fbxo1_a"
+# CONFIG_IP_PNP_BOOTP is not set
+# CONFIG_IP_PNP_RARP is not set
+# CONFIG_NET_IPIP is not set
+# CONFIG_NET_IPGRE is not set
+# CONFIG_IP_MROUTE is not set
+# CONFIG_ARPD is not set
+# CONFIG_SYN_COOKIES is not set
+CONFIG_INET_XFRM_GC_THRESH=1024
+CONFIG_INET_AH=y
+CONFIG_INET_ESP=y
+# CONFIG_INET_IPCOMP is not set
+# CONFIG_INET_XFRM_TUNNEL is not set
+# CONFIG_INET_TUNNEL is not set
+CONFIG_INET_XFRM_MODE_TRANSPORT=y
+CONFIG_INET_XFRM_MODE_TUNNEL=y
+# CONFIG_INET_XFRM_MODE_BEET is not set
+# CONFIG_INET_DIAG is not set
+# CONFIG_TCP_CONG_ADVANCED is not set
+CONFIG_TCP_CONG_CUBIC=y
+CONFIG_DEFAULT_TCP_CONG="cubic"
+# CONFIG_TCP_MD5SIG is not set
+
+#
+# IP: Virtual Server Configuration
+#
+# CONFIG_IP_VS is not set
+CONFIG_IPV6=y
+# CONFIG_IPV6_PRIVACY is not set
+# CONFIG_IPV6_ROUTER_PREF is not set
+# CONFIG_INET6_AH is not set
+# CONFIG_INET6_ESP is not set
+# CONFIG_INET6_IPCOMP is not set
+# CONFIG_IPV6_MIP6 is not set
+# CONFIG_INET6_XFRM_TUNNEL is not set
+# CONFIG_INET6_TUNNEL is not set
+# CONFIG_INET6_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET6_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET6_XFRM_MODE_BEET is not set
+# CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION is not set
+CONFIG_IPV6_SIT=y
+CONFIG_IPV6_SIT_FBX6TO4=y
+# CONFIG_IPV6_TUNNEL is not set
+# CONFIG_IPV6_MULTIPLE_TABLES is not set
+# CONFIG_NETWORK_SECMARK is not set
+CONFIG_NETFILTER=y
+# CONFIG_NETFILTER_DEBUG is not set
+# CONFIG_BRIDGE_NETFILTER is not set
+
+#
+# Core Netfilter Configuration
+#
+# CONFIG_NETFILTER_NETLINK is not set
+CONFIG_NF_CONNTRACK_ENABLED=y
+# CONFIG_NF_CONNTRACK_SUPPORT is not set
+CONFIG_IP_NF_CONNTRACK_SUPPORT=y
+CONFIG_IP_NF_CONNTRACK=y
+CONFIG_NETFILTER_XTABLES=y
+CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y
+# CONFIG_NETFILTER_XT_TARGET_DSCP is not set
+CONFIG_NETFILTER_XT_TARGET_MARK=y
+# CONFIG_NETFILTER_XT_TARGET_NFQUEUE is not set
+# CONFIG_NETFILTER_XT_TARGET_NFLOG is not set
+# CONFIG_NETFILTER_XT_MATCH_COMMENT is not set
+# CONFIG_NETFILTER_XT_MATCH_CONNTRACK is not set
+# CONFIG_NETFILTER_XT_MATCH_DCCP is not set
+# CONFIG_NETFILTER_XT_MATCH_DSCP is not set
+# CONFIG_NETFILTER_XT_MATCH_ESP is not set
+# CONFIG_NETFILTER_XT_MATCH_HELPER is not set
+# CONFIG_NETFILTER_XT_MATCH_LENGTH is not set
+CONFIG_NETFILTER_XT_MATCH_LIMIT=y
+# CONFIG_NETFILTER_XT_MATCH_MAC is not set
+CONFIG_NETFILTER_XT_MATCH_MARK=y
+# CONFIG_NETFILTER_XT_MATCH_POLICY is not set
+CONFIG_NETFILTER_XT_MATCH_MULTIPORT=y
+# CONFIG_NETFILTER_XT_MATCH_PKTTYPE is not set
+# CONFIG_NETFILTER_XT_MATCH_QUOTA is not set
+# CONFIG_NETFILTER_XT_MATCH_REALM is not set
+# CONFIG_NETFILTER_XT_MATCH_SCTP is not set
+CONFIG_NETFILTER_XT_MATCH_STATE=y
+# CONFIG_NETFILTER_XT_MATCH_STATISTIC is not set
+# CONFIG_NETFILTER_XT_MATCH_STRING is not set
+# CONFIG_NETFILTER_XT_MATCH_TCPMSS is not set
+# CONFIG_NETFILTER_XT_MATCH_HASHLIMIT is not set
+
+#
+# IP: Netfilter Configuration
+#
+# CONFIG_IP_NF_CT_ACCT is not set
+# CONFIG_IP_NF_CONNTRACK_MARK is not set
+# CONFIG_IP_NF_CONNTRACK_EVENTS is not set
+# CONFIG_IP_NF_CT_PROTO_SCTP is not set
+CONFIG_IP_NF_FTP=m
+CONFIG_IP_NF_IRC=m
+# CONFIG_IP_NF_NETBIOS_NS is not set
+CONFIG_IP_NF_TFTP=m
+# CONFIG_IP_NF_AMANDA is not set
+CONFIG_IP_NF_PPTP=m
+# CONFIG_IP_NF_H323 is not set
+# CONFIG_IP_NF_SIP is not set
+CONFIG_IP_NF_TPROXY=y
+# CONFIG_IP_NF_MATCH_TPROXY is not set
+CONFIG_IP_NF_TARGET_TPROXY=y
+# CONFIG_IP_NF_QUEUE is not set
+CONFIG_IP_NF_IPTABLES=y
+# CONFIG_IP_NF_MATCH_IPRANGE is not set
+# CONFIG_IP_NF_MATCH_TOS is not set
+# CONFIG_IP_NF_MATCH_RECENT is not set
+# CONFIG_IP_NF_MATCH_ECN is not set
+# CONFIG_IP_NF_MATCH_AH is not set
+# CONFIG_IP_NF_MATCH_TTL is not set
+# CONFIG_IP_NF_MATCH_OWNER is not set
+# CONFIG_IP_NF_MATCH_ADDRTYPE is not set
+CONFIG_IP_NF_FILTER=y
+CONFIG_IP_NF_TARGET_REJECT=y
+# CONFIG_IP_NF_TARGET_LOG is not set
+# CONFIG_IP_NF_TARGET_ULOG is not set
+# CONFIG_IP_NF_TARGET_TCPMSS is not set
+CONFIG_IP_NF_NAT=y
+CONFIG_IP_NF_NAT_NEEDED=y
+CONFIG_IP_NF_TARGET_MASQUERADE=y
+# CONFIG_IP_NF_TARGET_REDIRECT is not set
+# CONFIG_IP_NF_TARGET_NETMAP is not set
+# CONFIG_IP_NF_TARGET_SAME is not set
+# CONFIG_IP_NF_NAT_NRES is not set
+# CONFIG_IP_NF_NAT_SNMP_BASIC is not set
+CONFIG_IP_NF_NAT_FTP=m
+CONFIG_IP_NF_NAT_IRC=m
+CONFIG_IP_NF_NAT_TFTP=m
+CONFIG_IP_NF_NAT_PPTP=m
+CONFIG_IP_NF_MANGLE=y
+# CONFIG_IP_NF_TARGET_TOS is not set
+# CONFIG_IP_NF_TARGET_ECN is not set
+# CONFIG_IP_NF_TARGET_TTL is not set
+# CONFIG_IP_NF_RAW is not set
+# CONFIG_IP_NF_ARPTABLES is not set
+
+#
+# IPv6: Netfilter Configuration (EXPERIMENTAL)
+#
+# CONFIG_IP6_NF_QUEUE is not set
+CONFIG_IP6_NF_IPTABLES=y
+# CONFIG_IP6_NF_MATCH_RT is not set
+# CONFIG_IP6_NF_MATCH_OPTS is not set
+# CONFIG_IP6_NF_MATCH_FRAG is not set
+# CONFIG_IP6_NF_MATCH_HL is not set
+# CONFIG_IP6_NF_MATCH_OWNER is not set
+# CONFIG_IP6_NF_MATCH_IPV6HEADER is not set
+# CONFIG_IP6_NF_MATCH_AH is not set
+# CONFIG_IP6_NF_MATCH_EUI64 is not set
+CONFIG_IP6_NF_FILTER=y
+# CONFIG_IP6_NF_TARGET_LOG is not set
+# CONFIG_IP6_NF_TARGET_REJECT is not set
+# CONFIG_IP6_NF_MANGLE is not set
+# CONFIG_IP6_NF_RAW is not set
+
+#
+# Bridge: Netfilter Configuration
+#
+# CONFIG_BRIDGE_NF_EBTABLES is not set
+
+#
+# DCCP Configuration (EXPERIMENTAL)
+#
+# CONFIG_IP_DCCP is not set
+
+#
+# SCTP Configuration (EXPERIMENTAL)
+#
+# CONFIG_IP_SCTP is not set
+
+#
+# TIPC Configuration (EXPERIMENTAL)
+#
+# CONFIG_TIPC is not set
+# CONFIG_ATM is not set
+CONFIG_BRIDGE=y
+CONFIG_VLAN_8021Q=y
+# CONFIG_DECNET is not set
+CONFIG_LLC=y
+# CONFIG_LLC2 is not set
+# CONFIG_IPX is not set
+# CONFIG_ATALK is not set
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+# CONFIG_ECONET is not set
+# CONFIG_WAN_ROUTER is not set
+
+#
+# QoS and/or fair queueing
+#
+# CONFIG_NET_SCHED is not set
+# CONFIG_FBXATM is not set
+
+#
+# Network testing
+#
+CONFIG_NET_PKTGEN=y
+# CONFIG_HAMRADIO is not set
+# CONFIG_IRDA is not set
+# CONFIG_BT is not set
+# CONFIG_IEEE80211 is not set
+CONFIG_WIRELESS_EXT=y
+
+#
+# Device Drivers
+#
+
+#
+# Generic Driver Options
+#
+# CONFIG_STANDALONE is not set
+# CONFIG_PREVENT_FIRMWARE_BUILD is not set
+# CONFIG_FW_LOADER is not set
+# CONFIG_SYS_HYPERVISOR is not set
+
+#
+# Connector - unified userspace <-> kernelspace linker
+#
+# CONFIG_CONNECTOR is not set
+
+#
+# Memory Technology Devices (MTD)
+#
+# CONFIG_MTD is not set
+
+#
+# Freebox Memory Technology Devices (FBXMTD)
+#
+
+#
+# Core drivers
+#
+CONFIG_FREEBOX_MTD=y
+CONFIG_FREEBOX_MTD_BACKEND_AMD=y
+# CONFIG_FREEBOX_MTD_BACKEND_INTEL is not set
+CONFIG_FREEBOX_MTD_USE_DMAMUX=y
+CONFIG_FREEBOX_MTD_BLK=y
+CONFIG_FREEBOX_MTD_CHAR=y
+
+#
+# Mapping drivers
+#
+CONFIG_FREEBOX_MTD_MAP_DRV_GENERIC=y
+# CONFIG_FREEBOX_MTD_MAP_DRV_BCM963XX is not set
+# CONFIG_FREEBOX_MTD_MAP_IOCTL is not set
+
+#
+# Freebox DMA muxing support
+#
+CONFIG_FREEBOX_DMAMUX=y
+CONFIG_FREEBOX_DMAMUX_MAX_PRIO=1
+
+#
+# DMA devices
+#
+CONFIG_MV88FXX81_DMAMUX=y
+
+#
+# Freebox GPIO
+#
+CONFIG_FREEBOX_GPIO=y
+
+#
+# Freebox Panel Support
+#
+CONFIG_FREEBOX_PANEL=y
+# CONFIG_FREEBOX_PANEL_HW_PIC_FBX is not set
+CONFIG_FREEBOX_PANEL_HW_PT6959=y
+# CONFIG_FREEBOX_PANEL_HW_PT6311 is not set
+
+#
+# Freebox SPI support
+#
+CONFIG_FREEBOX_SPI=y
+CONFIG_FREEBOX_SPI_HW_MV88F5181=y
+
+#
+# Freebox Watchdog Support
+#
+CONFIG_FREEBOX_WATCHDOG=m
+# CONFIG_FREEBOX_WATCHDOG_CHAR is not set
+CONFIG_FREEBOX_WATCHDOG_MV88F5181=m
+
+#
+# Parallel port support
+#
+# CONFIG_PARPORT is not set
+
+#
+# Plug and Play support
+#
+
+#
+# Block devices
+#
+# CONFIG_BLK_CPQ_DA is not set
+# CONFIG_BLK_CPQ_CISS_DA is not set
+# CONFIG_BLK_DEV_DAC960 is not set
+# CONFIG_BLK_DEV_UMEM is not set
+# CONFIG_BLK_DEV_COW_COMMON is not set
+# CONFIG_BLK_DEV_LOOP is not set
+# CONFIG_BLK_DEV_NBD is not set
+# CONFIG_BLK_DEV_SX8 is not set
+# CONFIG_BLK_DEV_RAM is not set
+# CONFIG_BLK_DEV_INITRD is not set
+# CONFIG_CDROM_PKTCDVD is not set
+# CONFIG_ATA_OVER_ETH is not set
+
+#
+# SCSI device support
+#
+# CONFIG_RAID_ATTRS is not set
+# CONFIG_SCSI is not set
+# CONFIG_SCSI_NETLINK is not set
+
+#
+# Serial ATA (prod) and Parallel ATA (experimental) drivers
+#
+# CONFIG_ATA is not set
+
+#
+# Multi-device support (RAID and LVM)
+#
+# CONFIG_MD is not set
+
+#
+# Fusion MPT device support
+#
+# CONFIG_FUSION is not set
+
+#
+# IEEE 1394 (FireWire) support
+#
+# CONFIG_IEEE1394 is not set
+
+#
+# I2O device support
+#
+# CONFIG_I2O is not set
+
+#
+# Network device support
+#
+CONFIG_NETDEVICES=y
+CONFIG_DUMMY=y
+# CONFIG_BONDING is not set
+# CONFIG_EQUALIZER is not set
+# CONFIG_TUN is not set
+
+#
+# ARCnet devices
+#
+# CONFIG_ARCNET is not set
+
+#
+# PHY device support
+#
+# CONFIG_PHYLIB is not set
+
+#
+# Ethernet (10 or 100Mbit)
+#
+CONFIG_NET_ETHERNET=y
+CONFIG_MII=y
+# CONFIG_HAPPYMEAL is not set
+# CONFIG_SUNGEM is not set
+# CONFIG_CASSINI is not set
+# CONFIG_NET_VENDOR_3COM is not set
+# CONFIG_SMC91X is not set
+# CONFIG_DM9000 is not set
+
+#
+# Tulip family network device support
+#
+# CONFIG_NET_TULIP is not set
+# CONFIG_HP100 is not set
+# CONFIG_NET_PCI is not set
+
+#
+# Ethernet (1000 Mbit)
+#
+# CONFIG_ACENIC is not set
+# CONFIG_DL2K is not set
+# CONFIG_E1000 is not set
+CONFIG_MV88FXX81_ETH=y
+# CONFIG_NS83820 is not set
+# CONFIG_HAMACHI is not set
+# CONFIG_YELLOWFIN is not set
+# CONFIG_R8169 is not set
+# CONFIG_SIS190 is not set
+# CONFIG_SKGE is not set
+# CONFIG_SKY2 is not set
+# CONFIG_SK98LIN is not set
+# CONFIG_TIGON3 is not set
+# CONFIG_BNX2 is not set
+# CONFIG_QLA3XXX is not set
+
+#
+# Ethernet (10000 Mbit)
+#
+# CONFIG_CHELSIO_T1 is not set
+# CONFIG_IXGB is not set
+# CONFIG_S2IO is not set
+# CONFIG_MYRI10GE is not set
+# CONFIG_NETXEN_NIC is not set
+
+#
+# Token Ring devices
+#
+# CONFIG_TR is not set
+
+#
+# Wireless LAN (non-hamradio)
+#
+CONFIG_NET_RADIO=y
+# CONFIG_NET_WIRELESS_RTNETLINK is not set
+
+#
+# Obsolete Wireless cards support (pre-802.11)
+#
+# CONFIG_STRIP is not set
+
+#
+# Wireless 802.11b ISA/PCI cards support
+#
+# CONFIG_IPW2100 is not set
+# CONFIG_IPW2200 is not set
+# CONFIG_HERMES is not set
+# CONFIG_ATMEL is not set
+
+#
+# Prism GT/Duette 802.11(a/b/g) PCI/Cardbus support
+#
+# CONFIG_PRISM54 is not set
+# CONFIG_HOSTAP is not set
+CONFIG_NET_WIRELESS=y
+
+#
+# Wan interfaces
+#
+# CONFIG_WAN is not set
+# CONFIG_FDDI is not set
+# CONFIG_HIPPI is not set
+# CONFIG_PPP is not set
+# CONFIG_SLIP is not set
+# CONFIG_SHAPER is not set
+# CONFIG_NETCONSOLE is not set
+# CONFIG_NETPOLL is not set
+# CONFIG_NET_POLL_CONTROLLER is not set
+CONFIG_TANGO2_PCINET_D=y
+CONFIG_TANGO2_PCINET_D_DMAMUX=y
+
+#
+# ISDN subsystem
+#
+# CONFIG_ISDN is not set
+
+#
+# Input device support
+#
+# CONFIG_INPUT is not set
+
+#
+# Hardware I/O ports
+#
+# CONFIG_SERIO is not set
+# CONFIG_GAMEPORT is not set
+
+#
+# Character devices
+#
+# CONFIG_VT is not set
+# CONFIG_SERIAL_NONSTANDARD is not set
+
+#
+# Serial drivers
+#
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_PCI=y
+CONFIG_SERIAL_8250_NR_UARTS=2
+CONFIG_SERIAL_8250_RUNTIME_UARTS=2
+# CONFIG_SERIAL_8250_EXTENDED is not set
+
+#
+# Non-8250 serial port support
+#
+CONFIG_SERIAL_CORE=y
+CONFIG_SERIAL_CORE_CONSOLE=y
+# CONFIG_SERIAL_JSM is not set
+CONFIG_UNIX98_PTYS=y
+CONFIG_LEGACY_PTYS=y
+CONFIG_LEGACY_PTY_COUNT=256
+
+#
+# IPMI
+#
+# CONFIG_IPMI_HANDLER is not set
+
+#
+# Watchdog Cards
+#
+# CONFIG_WATCHDOG is not set
+# CONFIG_HW_RANDOM is not set
+# CONFIG_NVRAM is not set
+# CONFIG_DTLK is not set
+# CONFIG_R3964 is not set
+# CONFIG_APPLICOM is not set
+# CONFIG_DRM is not set
+# CONFIG_RAW_DRIVER is not set
+
+#
+# TPM devices
+#
+# CONFIG_TCG_TPM is not set
+
+#
+# I2C support
+#
+CONFIG_I2C=y
+# CONFIG_I2C_CHARDEV is not set
+
+#
+# I2C Algorithms
+#
+CONFIG_I2C_ALGOBIT=y
+# CONFIG_I2C_ALGOPCF is not set
+# CONFIG_I2C_ALGOPCA is not set
+
+#
+# I2C Hardware Bus support
+#
+# CONFIG_I2C_ALI1535 is not set
+# CONFIG_I2C_ALI1563 is not set
+# CONFIG_I2C_ALI15X3 is not set
+# CONFIG_I2C_AMD756 is not set
+# CONFIG_I2C_AMD8111 is not set
+# CONFIG_I2C_I801 is not set
+# CONFIG_I2C_I810 is not set
+# CONFIG_I2C_PIIX4 is not set
+# CONFIG_I2C_NFORCE2 is not set
+# CONFIG_I2C_OCORES is not set
+# CONFIG_I2C_PARPORT_LIGHT is not set
+# CONFIG_I2C_PROSAVAGE is not set
+# CONFIG_I2C_SAVAGE4 is not set
+# CONFIG_I2C_SIS5595 is not set
+# CONFIG_I2C_SIS630 is not set
+# CONFIG_I2C_SIS96X is not set
+# CONFIG_I2C_STUB is not set
+# CONFIG_I2C_VIA is not set
+# CONFIG_I2C_VIAPRO is not set
+# CONFIG_I2C_VOODOO3 is not set
+# CONFIG_I2C_PCA_ISA is not set
+CONFIG_I2C_MV64XXX=y
+# CONFIG_I2C_GPIO is not set
+
+#
+# Miscellaneous I2C Chip support
+#
+# CONFIG_SENSORS_DS1337 is not set
+# CONFIG_SENSORS_DS1374 is not set
+CONFIG_SENSORS_EEPROM=m
+# CONFIG_SENSORS_PCF8574 is not set
+# CONFIG_SENSORS_PCA9539 is not set
+# CONFIG_SENSORS_PCF8591 is not set
+# CONFIG_SENSORS_MAX6875 is not set
+# CONFIG_I2C_DEBUG_CORE is not set
+# CONFIG_I2C_DEBUG_ALGO is not set
+# CONFIG_I2C_DEBUG_BUS is not set
+# CONFIG_I2C_DEBUG_CHIP is not set
+
+#
+# SPI support
+#
+# CONFIG_SPI is not set
+# CONFIG_SPI_MASTER is not set
+
+#
+# Dallas's 1-wire bus
+#
+# CONFIG_W1 is not set
+
+#
+# Hardware Monitoring support
+#
+# CONFIG_HWMON is not set
+# CONFIG_HWMON_VID is not set
+
+#
+# Misc devices
+#
+# CONFIG_SGI_IOC4 is not set
+# CONFIG_TIFM_CORE is not set
+CONFIG_CRASHZONE=y
+
+#
+# LED devices
+#
+# CONFIG_NEW_LEDS is not set
+
+#
+# LED drivers
+#
+
+#
+# LED Triggers
+#
+
+#
+# Multimedia devices
+#
+# CONFIG_VIDEO_DEV is not set
+
+#
+# Digital Video Broadcasting Devices
+#
+# CONFIG_DVB is not set
+
+#
+# Graphics support
+#
+# CONFIG_FIRMWARE_EDID is not set
+# CONFIG_FB is not set
+# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
+
+#
+# Sound
+#
+# CONFIG_SOUND is not set
+
+#
+# USB support
+#
+CONFIG_USB_ARCH_HAS_HCD=y
+CONFIG_USB_ARCH_HAS_OHCI=y
+CONFIG_USB_ARCH_HAS_EHCI=y
+# CONFIG_USB is not set
+
+#
+# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
+#
+
+#
+# USB Gadget Support
+#
+# CONFIG_USB_GADGET is not set
+
+#
+# MMC/SD Card support
+#
+# CONFIG_MMC is not set
+
+#
+# Real Time Clock
+#
+CONFIG_RTC_LIB=y
+# CONFIG_RTC_CLASS is not set
+
+#
+# Telephony Support
+#
+CONFIG_PHONE=m
+# CONFIG_PHONE_IXJ is not set
+
+#
+# File systems
+#
+# CONFIG_EXT2_FS is not set
+# CONFIG_EXT3_FS is not set
+# CONFIG_EXT4DEV_FS is not set
+# CONFIG_REISERFS_FS is not set
+# CONFIG_JFS_FS is not set
+# CONFIG_FS_POSIX_ACL is not set
+# CONFIG_XFS_FS is not set
+# CONFIG_GFS2_FS is not set
+# CONFIG_OCFS2_FS is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_ROMFS_FS is not set
+# CONFIG_INOTIFY is not set
+# CONFIG_QUOTA is not set
+# CONFIG_DNOTIFY is not set
+# CONFIG_AUTOFS_FS is not set
+# CONFIG_AUTOFS4_FS is not set
+# CONFIG_FUSE_FS is not set
+
+#
+# CD-ROM/DVD Filesystems
+#
+# CONFIG_ISO9660_FS is not set
+# CONFIG_UDF_FS is not set
+
+#
+# DOS/FAT/NT Filesystems
+#
+# CONFIG_MSDOS_FS is not set
+# CONFIG_VFAT_FS is not set
+# CONFIG_NTFS_FS is not set
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+CONFIG_PROC_SYSCTL=y
+CONFIG_SYSFS=y
+CONFIG_TMPFS=y
+# CONFIG_TMPFS_POSIX_ACL is not set
+# CONFIG_HUGETLB_PAGE is not set
+CONFIG_RAMFS=y
+# CONFIG_RAMFS_XATTR is not set
+# CONFIG_RAMFS_XATTR_USER is not set
+# CONFIG_CONFIGFS_FS is not set
+CONFIG_CTMP_FS=y
+
+#
+# Miscellaneous filesystems
+#
+# CONFIG_ADFS_FS is not set
+# CONFIG_AFFS_FS is not set
+# CONFIG_HFS_FS is not set
+# CONFIG_HFSPLUS_FS is not set
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_EFS_FS is not set
+# CONFIG_CRAMFS is not set
+CONFIG_SQUASHFS=y
+# CONFIG_SQUASHFS_EMBEDDED is not set
+CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE=3
+# CONFIG_SQUASHFS_VMALLOC is not set
+# CONFIG_VXFS_FS is not set
+# CONFIG_HPFS_FS is not set
+# CONFIG_QNX4FS_FS is not set
+# CONFIG_SYSV_FS is not set
+# CONFIG_UFS_FS is not set
+
+#
+# Network File Systems
+#
+CONFIG_NFS_FS=y
+# CONFIG_NFS_V3 is not set
+# CONFIG_NFS_V4 is not set
+# CONFIG_NFS_DIRECTIO is not set
+# CONFIG_NFSD is not set
+CONFIG_ROOT_NFS=y
+CONFIG_LOCKD=y
+CONFIG_NFS_COMMON=y
+CONFIG_SUNRPC=y
+# CONFIG_RPCSEC_GSS_KRB5 is not set
+# CONFIG_RPCSEC_GSS_SPKM3 is not set
+# CONFIG_SMB_FS is not set
+# CONFIG_CIFS is not set
+# CONFIG_NCP_FS is not set
+# CONFIG_CODA_FS is not set
+# CONFIG_AFS_FS is not set
+# CONFIG_9P_FS is not set
+
+#
+# Partition Types
+#
+# CONFIG_PARTITION_ADVANCED is not set
+CONFIG_MSDOS_PARTITION=y
+
+#
+# Native Language Support
+#
+# CONFIG_NLS is not set
+
+#
+# Distributed Lock Manager
+#
+# CONFIG_DLM is not set
+
+#
+# Profiling support
+#
+# CONFIG_PROFILING is not set
+
+#
+# Kernel hacking
+#
+CONFIG_PRINTK_TIME=y
+CONFIG_ENABLE_MUST_CHECK=y
+# CONFIG_MAGIC_SYSRQ is not set
+# CONFIG_UNUSED_SYMBOLS is not set
+# CONFIG_DEBUG_FS is not set
+# CONFIG_HEADERS_CHECK is not set
+# CONFIG_DEBUG_KERNEL is not set
+CONFIG_LOG_BUF_SHIFT=14
+# CONFIG_DEBUG_BUGVERBOSE is not set
+CONFIG_FRAME_POINTER=y
+# CONFIG_DEBUG_USER is not set
+
+#
+# Security options
+#
+# CONFIG_KEYS is not set
+# CONFIG_SECURITY is not set
+
+#
+# Cryptographic options
+#
+CONFIG_CRYPTO=y
+CONFIG_CRYPTO_ALGAPI=y
+CONFIG_CRYPTO_BLKCIPHER=y
+CONFIG_CRYPTO_HASH=y
+CONFIG_CRYPTO_MANAGER=y
+CONFIG_CRYPTO_HMAC=y
+# CONFIG_CRYPTO_XCBC is not set
+# CONFIG_CRYPTO_NULL is not set
+# CONFIG_CRYPTO_MD4 is not set
+CONFIG_CRYPTO_MD5=y
+CONFIG_CRYPTO_SHA1=y
+# CONFIG_CRYPTO_SHA256 is not set
+# CONFIG_CRYPTO_SHA512 is not set
+# CONFIG_CRYPTO_WP512 is not set
+# CONFIG_CRYPTO_TGR192 is not set
+# CONFIG_CRYPTO_GF128MUL is not set
+# CONFIG_CRYPTO_ECB is not set
+CONFIG_CRYPTO_CBC=y
+# CONFIG_CRYPTO_LRW is not set
+CONFIG_CRYPTO_DES=y
+# CONFIG_CRYPTO_BLOWFISH is not set
+# CONFIG_CRYPTO_TWOFISH is not set
+# CONFIG_CRYPTO_SERPENT is not set
+CONFIG_CRYPTO_AES=y
+# CONFIG_CRYPTO_CAST5 is not set
+# CONFIG_CRYPTO_CAST6 is not set
+# CONFIG_CRYPTO_TEA is not set
+# CONFIG_CRYPTO_ARC4 is not set
+# CONFIG_CRYPTO_KHAZAD is not set
+# CONFIG_CRYPTO_ANUBIS is not set
+# CONFIG_CRYPTO_DEFLATE is not set
+# CONFIG_CRYPTO_MICHAEL_MIC is not set
+# CONFIG_CRYPTO_CRC32C is not set
+# CONFIG_CRYPTO_TEST is not set
+
+#
+# Hardware crypto devices
+#
+
+#
+# Library routines
+#
+CONFIG_BITREVERSE=y
+# CONFIG_CRC_CCITT is not set
+# CONFIG_CRC16 is not set
+CONFIG_CRC32=y
+# CONFIG_LIBCRC32C is not set
+CONFIG_ZLIB_INFLATE=y
+CONFIG_IOMAP_COPY=y
+CONFIG_SQLZMA_UNCOMP=y
+CONFIG_BUILTIN_FBXSERIAL=y
+CONFIG_CROSS_PATH="/opt/toolchains/armhwfloat-uclibc-std-0.9.30-gcc-4.3.2-binutils-2.19.50.0.1/bin/arm-linux-"
diff -Nruw linux-2.6.20.14-fbx/drivers/fbxdmamux./fbxdmamux.c linux-2.6.20.14-fbx/drivers/fbxdmamux/fbxdmamux.c
--- linux-2.6.20.14-fbx/drivers/fbxdmamux./fbxdmamux.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.20.14-fbx/drivers/fbxdmamux/fbxdmamux.c	2010-12-29 19:30:06.781440861 +0100
@@ -0,0 +1,529 @@
+/*
+ * simple, hardware independant, DMA request muxer/scheduler
+ */
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/fbxdmamux.h>
+#include <linux/dma-mapping.h>
+#include <linux/fbxdmamux.h>
+#include <linux/workqueue.h>
+#include <linux/delay.h>
+
+#include "fbxdmamux_priv.h"
+
+#define PFX	"fbxdmamux: "
+
+#define REQPOOL_COUNT	64
+
+/* prevent use before module_init is called */
+static unsigned int init_done = 0;
+
+/* request pool */
+static struct list_head reqpool;
+static spinlock_t reqpool_lock;
+
+/* current hardware device, if any */
+static spinlock_t dev_lock;
+static struct fbxdmamux_device *active_dev = NULL;
+
+/* device under test */
+static struct mutex totest_dev_mutex;
+static struct fbxdmamux_device *totest_dev = NULL;
+static struct work_struct totest_work;
+
+/*
+ * allocate a new request
+ */
+static struct fbxdmamux_req *alloc_req(unsigned int priv_size)
+{
+	unsigned int size;
+	size = sizeof (struct fbxdmamux_req) + priv_size + REQ_PRIV_ALIGN;
+	return kmalloc(size, GFP_KERNEL);
+}
+
+/*
+ * return a new request from pool
+ */
+struct fbxdmamux_req *fbxdmamux_req_from_pool(void)
+{
+	struct fbxdmamux_req *req;
+
+	if (unlikely(!init_done))
+		return NULL;
+
+	spin_lock_bh(&reqpool_lock);
+	if (list_empty(&reqpool)) {
+		if (active_dev && printk_ratelimit())
+			printk(KERN_WARNING PFX "request pool is empty\n");
+		spin_unlock_bh(&reqpool_lock);
+		return NULL;
+	}
+	req = list_entry(reqpool.next, struct fbxdmamux_req, list);
+	list_del(&req->list);
+	spin_unlock_bh(&reqpool_lock);
+	return req;
+}
+
+/*
+ * return request to pool
+ */
+static inline void add_to_pool(struct fbxdmamux_req *req)
+{
+	spin_lock_bh(&reqpool_lock);
+	list_add(&req->list, &reqpool);
+	spin_unlock_bh(&reqpool_lock);
+}
+
+/*
+ * fill req poll
+ */
+static void fill_req_pool(unsigned int priv_size)
+{
+	int i;
+
+	for (i = 0; i < REQPOOL_COUNT; i++) {
+		struct fbxdmamux_req *req;
+		req = alloc_req(priv_size);
+		if (!req)
+			return;
+		list_add(&req->list, &reqpool);
+	}
+}
+
+/*
+ * free all request in pool
+ */
+static void free_req_pool(void)
+{
+	struct fbxdmamux_req *req, *req2;
+
+	list_for_each_entry_safe(req, req2, &reqpool, list) {
+		list_del(&req->list);
+		kfree(req);
+	}
+}
+
+/*
+ * return  a  cookie that  is  used  to  guaranty request  (and  their
+ * completion) ordering
+ */
+int fbxdmamux_alloc_channel_cookie(void)
+{
+	static unsigned int cookie = 0;
+	cookie++;
+	if (!cookie)
+		cookie++;
+	return cookie;
+}
+
+/*
+ * submit given request to given device
+ */
+static int __fbxdmamux_submit(struct fbxdmamux_device *dev,
+			      struct fbxdmamux_req *req)
+{
+	int ret;
+
+	/* basic sanity check on request */
+	if (unlikely(req->len == 0)) {
+		printk(KERN_INFO PFX "rejected zero size request\n");
+		add_to_pool(req);
+		return 1;
+	}
+
+	if (unlikely(req->priority >= FBXDMAMUX_MAX_PRIO)) {
+		printk(KERN_INFO PFX "invalid priority %d\n", req->priority);
+		add_to_pool(req);
+		return 1;
+	}
+
+	/* get hw address and do cache stuff if needed */
+	if (!(req->flags & FBXDMAMUX_FLAG_SRC_HW)) {
+		req->hw_src = dma_map_single(dev->dev, req->virt_src,
+					     req->len, DMA_TO_DEVICE);
+	}
+
+	if (!(req->flags & FBXDMAMUX_FLAG_DST_HW)) {
+		req->hw_dst = dma_map_single(dev->dev, req->virt_dst,
+					     req->len, DMA_FROM_DEVICE);
+	}
+
+	ret = dev->submit(dev, req);
+	if (ret)
+		add_to_pool(req);
+	return ret;
+}
+
+/*
+ * submit a request to given device, sleep for completion, and return
+ * transfer status
+ */
+struct dma_completion {
+	struct completion cpt;
+	int error;
+};
+
+static void dma_completion_cb(void *cb_data, int error)
+{
+	struct dma_completion *dcpt = (struct dma_completion *)cb_data;
+
+	dcpt->error = error;
+	complete(&dcpt->cpt);
+}
+
+static int __fbxdmamux_submit_and_sleep(struct fbxdmamux_device *dev,
+					struct fbxdmamux_req *req,
+					unsigned int timeout,
+					spinlock_t *lock)
+{
+	struct dma_completion dcpt;
+	int ret;
+
+	init_completion(&dcpt.cpt);
+	req->cb_data = &dcpt;
+	req->callback = dma_completion_cb;
+	ret = __fbxdmamux_submit(dev, req);
+	if (lock)
+		spin_unlock_bh(lock);
+	else
+		local_bh_enable();
+
+	if (ret)
+		return ret;
+
+	if (timeout) {
+		ret = wait_for_completion_timeout(&dcpt.cpt, timeout);
+		if (!ret)
+			printk(KERN_ERR PFX "transfer timeout\n");
+		return (ret != 0) ? dcpt.error : 1;
+	}
+	wait_for_completion(&dcpt.cpt);
+	return dcpt.error;
+}
+
+/*
+ * submit given request to active device if any
+ */
+int fbxdmamux_submit(struct fbxdmamux_req *req)
+{
+	int ret;
+
+	if (unlikely(!init_done)) {
+		add_to_pool(req);
+		return 1;
+	}
+
+	spin_lock_bh(&dev_lock);
+	if (!active_dev) {
+		add_to_pool(req);
+		spin_unlock_bh(&dev_lock);
+		return 1;
+	}
+
+	ret = __fbxdmamux_submit(active_dev, req);
+	spin_unlock_bh(&dev_lock);
+	return ret;
+}
+
+/*
+ * submit given request to active device if any, sleep for completion,
+ * and request transfer status
+ */
+int fbxdmamux_submit_and_sleep(struct fbxdmamux_req *req, unsigned int timeout)
+{
+	int ret;
+
+	if (unlikely(!init_done)) {
+		add_to_pool(req);
+		return 1;
+	}
+
+	spin_lock_bh(&dev_lock);
+	if (!active_dev) {
+		add_to_pool(req);
+		spin_unlock(&dev_lock);
+		return 1;
+	}
+
+	ret = __fbxdmamux_submit_and_sleep(active_dev, req, timeout,
+					   &dev_lock);
+	return ret;
+}
+
+/*
+ * flush channel associated with given cookie
+ */
+void fbxdmamux_flush_channel(unsigned int cookie)
+{
+	if (unlikely(!init_done))
+		return;
+
+	spin_lock_bh(&dev_lock);
+	if (!active_dev) {
+		spin_unlock(&dev_lock);
+		return;
+	}
+
+	active_dev->flush_channel(active_dev, cookie);
+	spin_unlock_bh(&dev_lock);
+}
+
+/*
+ * called by device when request is complete
+ */
+void fbxdmamux_complete(struct fbxdmamux_device *dma_dev,
+			struct fbxdmamux_req *req, int error)
+{
+	void (*callback)(void *cb_data, int error);
+	void *cb_data;
+
+	if (!(req->flags & FBXDMAMUX_FLAG_SRC_HW))
+		dma_unmap_single(dma_dev->dev, req->hw_src,
+				 req->len, DMA_TO_DEVICE);
+
+	if (!(req->flags & FBXDMAMUX_FLAG_DST_HW))
+		dma_unmap_single(dma_dev->dev, req->hw_dst,
+				 req->len, DMA_FROM_DEVICE);
+
+	callback = req->callback;
+	cb_data = req->cb_data;
+	add_to_pool(req);
+	if (callback)
+		callback(cb_data, error);
+}
+
+/*
+ * workqueue callback, used to test device is working before we allow
+ * it to
+ */
+#define TEST_SIZE	PAGE_SIZE
+#define TEST_TIMEOUT	(HZ * 5)
+#define TEST_REQS	4
+
+static void fbxdmamux_test_device(struct work_struct *w)
+{
+	struct fbxdmamux_req *reqs[TEST_REQS];
+	char c, *bufs[TEST_REQS][2];
+	int i, j, ret;
+
+	mutex_lock(&totest_dev_mutex);
+	memset(bufs, 0, sizeof (bufs));
+	memset(reqs, 0, sizeof (reqs));
+
+	/* unregister might have been called before we run */
+	if (!totest_dev || active_dev)
+		goto out;
+
+	/* allocate memory zone for transfer */
+	for (i = 0; i < TEST_REQS; i++) {
+		for (j = 0; j < 2; j++) {
+			bufs[i][j] = kmalloc(TEST_SIZE, GFP_KERNEL);
+			if (!bufs[i][j]) {
+				printk(KERN_ERR PFX "unable to allocate "
+				       "memory for testing\n");
+				goto out_bad;
+			}
+		}
+	}
+
+	fill_req_pool(totest_dev->req_priv_size);
+
+	for (i = 0; i < TEST_REQS; i++) {
+		reqs[i] = fbxdmamux_req_from_pool();
+		if (!reqs[i]) {
+			printk(KERN_ERR PFX "not enough request in pool "
+			       "to test device\n");
+			goto out_bad;
+		}
+
+		reqs[i]->chan_cookie = 1;
+		reqs[i]->priority = 0;
+		reqs[i]->virt_src = bufs[i][0];
+		reqs[i]->virt_dst = bufs[i][1];
+		reqs[i]->len = TEST_SIZE;
+		reqs[i]->flags = 0;
+		reqs[i]->callback = NULL;
+
+		/* put some junk in src */
+		for (c = 0, j = 0; j < TEST_SIZE; j++)
+			bufs[i][0][j] = c++ + i;
+	}
+
+	printk(KERN_INFO PFX "starting test of device %s...\n",
+	       totest_dev->get_name(totest_dev));
+
+	/* burst first requests */
+	for (i = 0; i < TEST_REQS - 1; i++) {
+		local_bh_disable();
+		ret = __fbxdmamux_submit(totest_dev, reqs[i]);
+		local_bh_enable();
+		reqs[i] = NULL;
+		if (ret) {
+			printk(KERN_ERR PFX "test: submit failed\n");
+			goto out_bad;
+		}
+	}
+
+	/* request transfer and wait for it's completion for the last
+	 * one */
+	local_bh_disable();
+	ret = __fbxdmamux_submit_and_sleep(totest_dev, reqs[TEST_REQS - 1],
+					   TEST_TIMEOUT, NULL);
+	reqs[TEST_REQS - 1] = NULL;
+	if (ret) {
+		printk(KERN_ERR PFX "test: submit failed\n");
+		goto out_bad;
+	}
+
+	/* compare buffers */
+	for (i = 0; i < TEST_REQS; i++) {
+		if (memcmp(bufs[i][0], bufs[i][1], TEST_SIZE)) {
+			printk(KERN_ERR PFX "test: src and dst buffers %d are "
+			       "not the same\n", i);
+			goto out_bad;
+		}
+	}
+
+	/* set new active device */
+	printk(KERN_INFO PFX "test pass for device %s, set as active\n",
+	       totest_dev->get_name(totest_dev));
+	spin_lock_bh(&dev_lock);
+	active_dev = totest_dev;
+	spin_unlock_bh(&dev_lock);
+
+out:
+	for (i = 0; i < TEST_REQS; i++)
+		for (j = 0; j < 2; j++)
+			kfree(bufs[i][j]);
+	totest_dev = NULL;
+	mutex_unlock(&totest_dev_mutex);
+	return;
+
+out_bad:
+	printk(KERN_ERR PFX "test of device %s FAILED\n",
+	       totest_dev->get_name(totest_dev));
+	for (i = 0; i < TEST_REQS; i++) {
+		if (reqs[i])
+			add_to_pool(reqs[i]);
+		for (j = 0; j < 2; j++)
+			kfree(bufs[i][j]);
+	}
+	free_req_pool();
+
+	totest_dev = NULL;
+	mutex_unlock(&totest_dev_mutex);
+}
+
+
+/*
+ * register given device, only one  device at a time supported. Device
+ * is scheduled for testing.
+ */
+int fbxdmamux_register_device(struct fbxdmamux_device *dev)
+{
+	mutex_lock(&totest_dev_mutex);
+
+	/* one to test device at a time */
+	if (totest_dev) {
+		mutex_unlock(&totest_dev_mutex);
+		return -EBUSY;
+	}
+
+	/* one active device at a time */
+	spin_lock_bh(&dev_lock);
+	if (active_dev) {
+		spin_unlock_bh(&dev_lock);
+		mutex_unlock(&totest_dev_mutex);
+		return -EBUSY;
+	}
+	spin_unlock_bh(&dev_lock);
+
+	totest_dev = dev;
+	schedule_work(&totest_work);
+	mutex_unlock(&totest_dev_mutex);
+
+	return 0;
+}
+
+/*
+ * return list length
+ */
+static int list_length(struct list_head *head)
+{
+	struct list_head *tmp;
+	int length;
+
+	length = 0;
+	list_for_each(tmp, head)
+		length++;
+	return length;
+}
+
+/*
+ * unregister given device
+ */
+void fbxdmamux_unregister_device(struct fbxdmamux_device *dev)
+{
+again:
+	mutex_lock(&totest_dev_mutex);
+
+	/* if device is scheduled for testing, let the test finish
+	 * first*/
+	if (dev == totest_dev) {
+		mutex_unlock(&totest_dev_mutex);
+		flush_scheduled_work();
+		goto again;
+	}
+
+	spin_lock_bh(&dev_lock);
+	if (dev == active_dev)
+		active_dev = NULL;
+	spin_unlock_bh(&dev_lock);
+	/* wait for all request to go back to pool */
+	while (list_length(&reqpool) != REQPOOL_COUNT)
+		msleep(1);
+	free_req_pool();
+	mutex_unlock(&totest_dev_mutex);
+}
+
+/*
+ * module init callback
+ */
+static int __init fbxdmamux_init(void)
+{
+	printk(KERN_INFO PFX "Freebox DMA request muxer API\n");
+
+	spin_lock_init(&dev_lock);
+	mutex_init(&totest_dev_mutex);
+	INIT_WORK(&totest_work, fbxdmamux_test_device);
+
+	/* allocate pool of request */
+	spin_lock_init(&reqpool_lock);
+	INIT_LIST_HEAD(&reqpool);
+	wmb();
+	init_done = 1;
+	return 0;
+}
+
+/*
+ * module remove callback
+ */
+static void __exit fbxdmamux_exit(void)
+{
+}
+
+module_init(fbxdmamux_init);
+module_exit(fbxdmamux_exit);
+
+EXPORT_SYMBOL(fbxdmamux_req_from_pool);
+EXPORT_SYMBOL(fbxdmamux_submit);
+EXPORT_SYMBOL(fbxdmamux_submit_and_sleep);
+EXPORT_SYMBOL(fbxdmamux_alloc_channel_cookie);
+EXPORT_SYMBOL(fbxdmamux_flush_channel);
+
+MODULE_LICENSE("GPL");
+MODULE_VERSION("1.0");
+MODULE_AUTHOR("Maxime Bizon <mbizon@freebox.fr>");
+
diff -Nruw linux-2.6.20.14-fbx/drivers/fbxdmamux./fbxdmamux_mv88fxx81.c linux-2.6.20.14-fbx/drivers/fbxdmamux/fbxdmamux_mv88fxx81.c
--- linux-2.6.20.14-fbx/drivers/fbxdmamux./fbxdmamux_mv88fxx81.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.20.14-fbx/drivers/fbxdmamux/fbxdmamux_mv88fxx81.c	2010-12-29 19:30:06.781440861 +0100
@@ -0,0 +1,573 @@
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/dma-mapping.h>
+#include <linux/platform_device.h>
+#include <linux/fbxdmamux.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+
+#include <asm/io.h>
+#include <asm/arch/io.h>
+#include <asm/arch/regs.h>
+
+#include "fbxdmamux_priv.h"
+
+#define PFX	"mv88fxx81_dma: "
+
+#define CHANNEL_COUNT	4
+
+struct mvdma_desc
+{
+	uint32_t byte_count;
+	uint32_t src;
+	uint32_t dst;
+	uint32_t next;
+};
+
+#define DESC_COUNT	64
+#define DESC_ALIGN	16
+#define DESC_SIZE	(DESC_COUNT * sizeof (struct mvdma_desc) + DESC_ALIGN)
+
+struct mvdma_chan
+{
+	struct mvdma_dev *dma_dev;
+	unsigned int idx;
+	struct tasklet_struct dmadone_tasklet;
+	struct fbxdmamux_req *active_req;
+	struct list_head activelist;
+	struct list_head donelist;
+
+	struct list_head reqlist;
+	struct mvdma_desc *descs_aligned;
+	dma_addr_t descs_hw;
+	dma_addr_t descs_hw_aligned;
+	struct mvdma_desc *descs;
+};
+
+struct mvdma_dev
+{
+	void __iomem *base;
+	struct fbxdmamux_device common;
+	int last_channel;
+	struct mvdma_chan chandata[CHANNEL_COUNT];
+	char chan_name[CHANNEL_COUNT][16];
+};
+
+/* basic list of known interface for dma source/destination */
+enum mvdma_interface {
+	E_TARGET_SDRAM = 0,
+	E_TARGET_PCI,
+	E_TARGET_DEVICE,
+};
+
+#define MVDMA_REQ_FLAGS_ERROR	(1 << 0)
+
+struct mvdma_req
+{
+	unsigned char target_src;
+	unsigned char target_dst;
+	unsigned int flags;
+};
+
+/*
+ * helpers to access dma register
+ */
+static inline u32 mv_dma_read(struct mvdma_dev *dev, unsigned int offset)
+{
+	return readl(dev->base + offset - MV_IDMA_REGS_BASE);
+}
+
+static inline void mv_dma_write(struct mvdma_dev *dev,
+				unsigned int offset, u32 data)
+{
+	writel(data, dev->base + offset - MV_IDMA_REGS_BASE);
+}
+
+/*
+ * program descriptor for given request
+ */
+static inline void mv_fill_desc(struct mvdma_desc *desc,
+				struct fbxdmamux_req *req)
+{
+	desc->byte_count = req->len;
+	desc->src = req->hw_src;
+	desc->dst = req->hw_dst;
+}
+
+/*
+ * start pending request on given
+ */
+static int mv_dma_start(struct mvdma_chan *mv_chan)
+{
+	struct fbxdmamux_req *first_req, *req;
+	struct mvdma_desc *mv_desc;
+	struct mvdma_req *mv_req;
+	unsigned char target_src, target_dst, req_flags;
+	unsigned int ctl, cur_desc;
+	unsigned long flags;
+
+	if (list_empty(&mv_chan->reqlist))
+		return 0;
+
+	/* prepare descriptor for first request in list */
+	req = list_entry(mv_chan->reqlist.next, struct fbxdmamux_req, list);
+	first_req = req;
+	INIT_LIST_HEAD(&mv_chan->activelist);
+	list_move(&req->list, &mv_chan->activelist);
+	mv_desc = &mv_chan->descs_aligned[0];
+	mv_fill_desc(mv_desc, req);
+
+	/* group request if they have the same source and dest target
+	 * id, and same the transfer flags */
+	mv_req = req_priv(req);
+	target_src = mv_req->target_src;
+	target_dst = mv_req->target_dst;
+	req_flags = req->flags & (FBXDMAMUX_FLAG_DST_NO_INCR |
+				  FBXDMAMUX_FLAG_SRC_NO_INCR);
+
+	for (cur_desc = 1; cur_desc < DESC_COUNT; cur_desc++) {
+
+		if (list_empty(&mv_chan->reqlist))
+			break;
+
+		req = list_entry(mv_chan->reqlist.next,
+				 struct fbxdmamux_req, list);
+		mv_req = req_priv(req);
+		if (mv_req->target_src != target_src ||
+		    mv_req->target_dst != target_dst ||
+		    mv_req->flags != req_flags)
+			break;
+
+		/* ok we can chain last desc with new one */
+		mv_desc->next = (u32)mv_chan->descs_hw_aligned +
+			(cur_desc * sizeof (*mv_desc));
+		mv_desc = &mv_chan->descs_aligned[cur_desc];
+		list_move_tail(&req->list, &mv_chan->activelist);
+		mv_fill_desc(mv_desc, req);
+	}
+
+	mv_desc->next = 0x0;
+
+	ctl = mv_dma_read(mv_chan->dma_dev, MV_IDMA_CTRL_REG(mv_chan->idx));
+	ctl |= MV_IDMA_CTRL_CHANNEL_ENABLED | MV_IDMA_CTRL_FETCH_NEXT;
+
+	ctl &= ~(MV_IDMA_CTRL_DST_HOLD | MV_IDMA_CTRL_SRC_HOLD);
+	if (req_flags & FBXDMAMUX_FLAG_DST_NO_INCR)
+		ctl |= MV_IDMA_CTRL_DST_HOLD;
+	if (req_flags & FBXDMAMUX_FLAG_SRC_NO_INCR)
+		ctl |= MV_IDMA_CTRL_SRC_HOLD;
+
+	ctl &= ~(MV_IDMA_CTRL_DBURST_MAX_MASK | MV_IDMA_CTRL_SBURST_MAX_MASK);
+	switch (target_src) {
+	case E_TARGET_SDRAM:
+	case E_TARGET_PCI:
+		ctl |= MV_IDMA_CTRL_SBURST_MAX_128B;
+		break;
+	case E_TARGET_DEVICE:
+		ctl |= MV_IDMA_CTRL_SBURST_MAX_8B;
+		break;
+	}
+
+	switch (target_dst) {
+	case E_TARGET_SDRAM:
+	case E_TARGET_PCI:
+		ctl |= MV_IDMA_CTRL_DBURST_MAX_128B;
+		break;
+	case E_TARGET_DEVICE:
+		ctl |= MV_IDMA_CTRL_DBURST_MAX_8B;
+		break;
+	}
+
+	local_irq_save(flags);
+	mv_chan->active_req = first_req;
+	wmb();
+	/* kick channel */
+	mv_dma_write(mv_chan->dma_dev, MV_IDMA_NEXT_DESC_REG(mv_chan->idx),
+		     (u32)mv_chan->descs_hw_aligned);
+	mv_dma_write(mv_chan->dma_dev, MV_IDMA_CTRL_REG(mv_chan->idx), ctl);
+	local_irq_restore(flags);
+	return 1;
+}
+
+/*
+ * tasklet run on dma completion
+ */
+static void mv_dma_done(unsigned long data)
+{
+	struct mvdma_chan *mv_chan;
+	struct mvdma_dev *mv_dev;
+	struct mvdma_req *mv_req;
+	struct fbxdmamux_req *req;
+	unsigned long flags;
+
+	mv_chan = (struct mvdma_chan *)data;
+	mv_dev = mv_chan->dma_dev;
+
+	do {
+		int error;
+
+		local_irq_save(flags);
+		if (list_empty(&mv_chan->donelist)) {
+			local_irq_restore(flags);
+			break;
+		}
+		req = list_entry(mv_chan->donelist.next,
+				 struct fbxdmamux_req, list);
+		list_del(&req->list);
+		local_irq_restore(flags);
+
+		mv_req = req_priv(req);
+		error = (mv_req->flags & MVDMA_REQ_FLAGS_ERROR) ? 1 : 0;
+		fbxdmamux_complete(&mv_dev->common, req, error);
+	} while (1);
+
+	if (!mv_chan->active_req)
+		mv_dma_start(mv_chan);
+}
+
+
+/*
+ * dma completion interrupt
+ */
+static const char *mv_dma_error_str[8] = {
+	[1] = "Address miss",
+	[2] = "Access protection",
+	[3] = "Write protection",
+	[4] = "CPU owned descriptor",
+};
+
+static irqreturn_t mv_dma_done_interrupt(int irq, void *dev_id)
+{
+	struct mvdma_chan *mv_chan = dev_id;
+	struct mvdma_dev *mv_dev = mv_chan->dma_dev;
+	struct mvdma_req *mv_req;
+	unsigned int cause, err_mask;
+
+	/* ack interrupt */
+	cause = mv_dma_read(mv_dev, MV_IDMA_INTR_CAUSE_REG);
+	mv_dma_write(mv_dev, MV_IDMA_INTR_CAUSE_REG, ~cause);
+
+	if (unlikely(!mv_chan->active_req)) {
+		printk(KERN_ERR PFX "interrupt while no dma active\n");
+		return IRQ_NONE;
+	}
+	mv_chan->active_req = NULL;
+
+	err_mask = MV_IDMA_INTR_ADDR_MISS(mv_chan->idx) |
+		MV_IDMA_INTR_ACCESS_PROT(mv_chan->idx) |
+		MV_IDMA_INTR_WR_PROT(mv_chan->idx);
+
+	if (unlikely(cause & err_mask)) {
+		u32 err_address, err_select;
+		struct fbxdmamux_req *req;
+		const char *err_str;
+
+		err_address = mv_dma_read(mv_dev, MV_IDMA_ERROR_ADDRESS_REG);
+		err_select = mv_dma_read(mv_dev, MV_IDMA_ERROR_SELECT_REG);
+		err_select -= MV_IDMA_ERROR_CHANNEL_GET(mv_chan->idx);
+		if (err_select > 7)
+			err_select = 0;
+		err_str = mv_dma_error_str[err_select];
+		if (!err_str)
+			err_str = "Reserved";
+
+		printk(KERN_ERR PFX "dma error chan:%d - addr:%08x - "
+		       "[%s]\n", mv_chan->idx, err_address, err_str);
+
+		/* mark all chain as failed if error is reported */
+		list_for_each_entry(req, &mv_chan->activelist, list) {
+			mv_req = req_priv(req);
+			mv_req->flags |= MVDMA_REQ_FLAGS_ERROR;
+		}
+	}
+
+	/* add active list at the end of done queue */
+	list_splice(&mv_chan->activelist, mv_chan->donelist.prev);
+
+	/* process done request and reload */
+	tasklet_schedule(&mv_chan->dmadone_tasklet);
+
+	return IRQ_HANDLED;
+}
+
+/*
+ * identify target of given hw address
+ */
+static inline unsigned char target_by_address(dma_addr_t addr)
+{
+	if (addr >= PCI1_MEM_BASE && addr <= PCI1_MEM_BASE + PCI1_MEM_SIZE - 1)
+		return E_TARGET_PCI;
+	if (addr >= DEVICE_BASE && addr <= DEVICE_BASE + DEVICE_SIZE - 1)
+		return E_TARGET_DEVICE;
+	return E_TARGET_SDRAM;
+}
+
+/*
+ * called with bh disabled
+ */
+static int mv_dma_submit(struct fbxdmamux_device *dev,
+			 struct fbxdmamux_req *req)
+{
+	struct mvdma_req *mv_req;
+	struct mvdma_dev *mv_dev;
+	struct mvdma_chan *mv_chan;
+	unsigned int channel;
+
+	/* max transfer size is 64k - 1 */
+	if (req->len > ((64 << 10) - 1))
+		return 1;
+
+	/* identify source & target interface */
+	mv_req = req_priv(req);
+	mv_req->flags = 0;
+	mv_req->target_src = target_by_address(req->hw_src);
+	mv_req->target_dst = target_by_address(req->hw_dst);
+
+	/* if channel cookie is requested, then force channel, else
+	 * pick up last_channel if it's free */
+	mv_dev = container_of(dev, struct mvdma_dev, common);
+	if (req->chan_cookie) {
+		channel = req->chan_cookie % CHANNEL_COUNT;
+	} else {
+		int i;
+
+		channel = mv_dev->last_channel;
+		for (i = 0; i < CHANNEL_COUNT; i++) {
+			mv_chan = &mv_dev->chandata[channel];
+			if (!mv_chan->active_req &&
+			    list_empty(&mv_chan->reqlist))
+				break;
+			channel++;
+			channel %= CHANNEL_COUNT;
+			mv_dev->last_channel = channel;
+		}
+	}
+
+	/* place request in channel queue and kick channel if idle */
+	mv_chan = &mv_dev->chandata[channel];
+	list_add_tail(&req->list, &mv_chan->reqlist);
+	if (!mv_chan->active_req)
+		tasklet_schedule(&mv_chan->dmadone_tasklet);
+
+	return 0;
+}
+
+/*
+ * called with bh disabled
+ */
+static void mv_dma_flush(struct fbxdmamux_device *dev,
+			 unsigned int cookie)
+{
+	struct mvdma_dev *mv_dev;
+	struct mvdma_chan *mv_chan;
+	unsigned int channel;
+
+	mv_dev = container_of(dev, struct mvdma_dev, common);
+	channel = cookie % CHANNEL_COUNT;
+	mv_chan = &mv_dev->chandata[channel];
+
+	while (mv_dma_start(mv_chan))
+		cpu_relax();
+
+	while (1) {
+		rmb();
+		if (!mv_chan->active_req)
+			break;
+		cpu_relax();
+	}
+	mv_dma_done((unsigned long)mv_chan);
+}
+
+/*
+ * return device name
+ */
+static const char *mv_dma_get_name(struct fbxdmamux_device *dev)
+{
+	return "mv88fxx81_dma";
+}
+
+/*
+ * device probe
+ */
+static int mv_dma_probe(struct platform_device *pdev)
+{
+	struct mvdma_dev *mv_dev;
+	struct resource *res;
+	int ret, i;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (res == NULL)
+		return -ENODEV;
+
+	mv_dev = kzalloc(sizeof (*mv_dev), GFP_KERNEL);
+	if (!mv_dev)
+		return -ENOMEM;
+
+	mv_dev->base = ioremap_nocache(res->start, res->end - res->start);
+	if (mv_dev->base == NULL) {
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	mv_dev->common.dev = &pdev->dev;
+	mv_dev->common.req_priv_size = sizeof (struct mvdma_req);
+	mv_dev->common.submit = mv_dma_submit;
+	mv_dev->common.flush_channel = mv_dma_flush;
+	mv_dev->common.get_name = mv_dma_get_name;
+
+	for (i = 0; i < CHANNEL_COUNT; i++) {
+		struct mvdma_chan *mv_chan;
+
+		mv_chan = &mv_dev->chandata[i];
+		mv_chan->idx = i;
+		mv_chan->dma_dev = mv_dev;
+		mv_chan->active_req = NULL;
+		mv_chan->descs = NULL;
+		INIT_LIST_HEAD(&mv_chan->reqlist);
+		INIT_LIST_HEAD(&mv_chan->donelist);
+		tasklet_init(&mv_chan->dmadone_tasklet, mv_dma_done,
+			     (unsigned long)mv_chan);
+	}
+	mv_dev->last_channel = 0;
+	pdev->dev.coherent_dma_mask = 0xffffffff;
+	platform_set_drvdata(pdev, mv_dev);
+
+	for (i = 0; i < CHANNEL_COUNT; i++) {
+		struct mvdma_chan *mv_chan;
+		unsigned int mask, val;
+		u32 ctl;
+
+		mv_chan = &mv_dev->chandata[i];
+
+		/* allocate desc pool for this channel */
+		mv_chan->descs = dma_alloc_coherent(&pdev->dev, DESC_SIZE,
+						    &mv_chan->descs_hw,
+						    GFP_KERNEL);
+		if (!mv_chan->descs) {
+			ret = -ENOMEM;
+			goto fail;
+		}
+		mv_chan->descs_aligned = (void *)
+			(((u32)mv_chan->descs + DESC_ALIGN) & ~DESC_ALIGN);
+		mv_chan->descs_hw_aligned = (u32)
+			(((u32)mv_chan->descs_hw + DESC_ALIGN) & ~DESC_ALIGN);
+
+		mv_dma_write(mv_chan->dma_dev, MV_IDMA_CTRLH_REG(i),
+			     MV_IDMA_CTRLH_RESERVED);
+
+		ctl = MV_IDMA_CTRL_INTERRUPT_LAST | MV_IDMA_CTRL_RESERVED;
+		mv_dma_write(mv_dev, MV_IDMA_CTRL_REG(i), ctl);
+
+		/* set irq mask & clear cause */
+		mask = MV_IDMA_INTR_COMPLETE(i) |
+			MV_IDMA_INTR_ADDR_MISS(i) |
+			MV_IDMA_INTR_ACCESS_PROT(i) |
+			MV_IDMA_INTR_WR_PROT(i);
+
+		mv_dma_write(mv_dev, MV_IDMA_INTR_CAUSE_REG, ~mask);
+		val = mv_dma_read(mv_dev, MV_IDMA_INTR_MASK_REG);
+		val |= mask;
+		mv_dma_write(mv_dev, MV_IDMA_INTR_MASK_REG, val);
+
+		/* make sure channel byte count is empty */
+		mv_dma_write(mv_dev, MV_IDMA_BYTE_COUNT_REG(i), 0);
+
+		/* register channel irq */
+		res = platform_get_resource(pdev, IORESOURCE_IRQ, i);
+		BUG_ON(!res);
+		sprintf(mv_dev->chan_name[i], "mv_dma_chan%d", i);
+
+		if ((ret = request_irq(res->start, mv_dma_done_interrupt, 0,
+				       mv_dev->chan_name[i], mv_chan))) {
+			printk(KERN_ERR PFX "request irq for dma failed\n");
+			goto fail;
+		}
+        }
+
+	if ((ret = fbxdmamux_register_device(&mv_dev->common))) {
+		printk(KERN_ERR PFX "unable to register dma device\n");
+		goto fail;
+	}
+
+	return 0;
+
+fail:
+	if (mv_dev) {
+		if (mv_dev->base)
+			iounmap(mv_dev->base);
+
+		for (i = 0; i < CHANNEL_COUNT; i++) {
+			struct mvdma_chan *mv_chan;
+			mv_chan = &mv_dev->chandata[i];
+
+			if (!mv_chan->descs)
+				continue;
+			dma_free_coherent(&pdev->dev, DESC_SIZE,
+					  mv_chan->descs, mv_chan->descs_hw);
+			res = platform_get_resource(pdev, IORESOURCE_IRQ, i);
+			free_irq(res->start, mv_chan);
+		}
+	}
+	kfree(mv_dev);
+	platform_set_drvdata(pdev, NULL);
+	return ret;
+}
+
+/*
+ * device remove callback
+ */
+static int mv_dma_remove(struct platform_device *pdev)
+{
+	struct mvdma_dev *mv_dev;
+	int i;
+
+	mv_dev = platform_get_drvdata(pdev);
+	fbxdmamux_unregister_device(&mv_dev->common);
+	for (i = 0; i < CHANNEL_COUNT; i++) {
+		struct mvdma_chan *mv_chan;
+		struct resource *res;
+		mv_chan = &mv_dev->chandata[i];
+		dma_free_coherent(&pdev->dev, DESC_SIZE,
+				  mv_chan->descs, mv_chan->descs_hw);
+		res = platform_get_resource(pdev, IORESOURCE_IRQ, i);
+		free_irq(res->start, mv_chan);
+		tasklet_kill(&mv_chan->dmadone_tasklet);
+	}
+	iounmap(mv_dev->base);
+	kfree(mv_dev);
+	return 0;
+}
+
+
+struct platform_driver mv88fxx81_dma_driver =
+{
+	.probe	= mv_dma_probe,
+	.remove	= mv_dma_remove,
+	.driver	= {
+		.name	= "mv88fxx81_dma",
+	},
+};
+
+/*
+ * module entry
+ */
+static int __init mv88fxx81_dma_init_module(void)
+{
+	return platform_driver_register(&mv88fxx81_dma_driver);
+}
+
+/*
+ * module exit
+ */
+static void __exit mv88fxx81_dma_exit_module(void)
+{
+	platform_driver_unregister(&mv88fxx81_dma_driver);
+}
+
+module_init(mv88fxx81_dma_init_module);
+module_exit(mv88fxx81_dma_exit_module);
+
+MODULE_VERSION("1.0");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Maxime Bizon <mbizon@freebox.fr>");
diff -Nruw linux-2.6.20.14-fbx/drivers/fbxdmamux./fbxdmamux_priv.h linux-2.6.20.14-fbx/drivers/fbxdmamux/fbxdmamux_priv.h
--- linux-2.6.20.14-fbx/drivers/fbxdmamux./fbxdmamux_priv.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.20.14-fbx/drivers/fbxdmamux/fbxdmamux_priv.h	2010-12-29 19:30:06.781440861 +0100
@@ -0,0 +1,29 @@
+
+#ifndef FBXDMAMUX_PRIV_H_
+#define FBXDMAMUX_PRIV_H_
+
+#define REQ_PRIV_ALIGN	8
+
+#define req_priv(x)	(void *)(((long)x + sizeof (*x) + \
+				REQ_PRIV_ALIGN) & ~REQ_PRIV_ALIGN)
+
+struct fbxdmamux_device
+{
+	struct device *dev;
+	unsigned int req_priv_size;
+
+	const char *(*get_name)(struct fbxdmamux_device *dma_dev);
+	int (*submit)(struct fbxdmamux_device *dma_dev,
+		      struct fbxdmamux_req *req);
+	void (*flush_channel)(struct fbxdmamux_device *dma_dev,
+			      unsigned int cookie);
+};
+
+int fbxdmamux_register_device(struct fbxdmamux_device *dev);
+
+void fbxdmamux_unregister_device(struct fbxdmamux_device *dev);
+
+void fbxdmamux_complete(struct fbxdmamux_device *dev,
+			struct fbxdmamux_req *req, int error);
+
+#endif /* !FBXDMAMUX_PRIV_H_ */
diff -Nruw linux-2.6.20.14-fbx/drivers/fbxdmamux./Kconfig linux-2.6.20.14-fbx/drivers/fbxdmamux/Kconfig
--- linux-2.6.20.14-fbx/drivers/fbxdmamux./Kconfig	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.20.14-fbx/drivers/fbxdmamux/Kconfig	2010-12-29 19:30:06.781440861 +0100
@@ -0,0 +1,22 @@
+
+menu "Freebox DMA muxing support"
+
+config FREEBOX_DMAMUX
+	bool "Support for fbxdmamux"
+
+config FREEBOX_DMAMUX_MAX_PRIO
+	int "Number of priority allowed"
+	default 1
+	depends on FREEBOX_DMAMUX
+
+comment "DMA devices"
+
+config BCM963XX_DMAMUX
+	tristate "Broadcom 963xx DMA support"
+	depends on FREEBOX_DMAMUX && BCM963XX && !BCM96358
+
+config MV88FXX81_DMAMUX
+	tristate "Marvell mv88fxx81 internal DMA engine support"
+	depends on FREEBOX_DMAMUX && ARCH_MV88FXX81
+
+endmenu
diff -Nruw linux-2.6.20.14-fbx/drivers/fbxdmamux./Makefile linux-2.6.20.14-fbx/drivers/fbxdmamux/Makefile
--- linux-2.6.20.14-fbx/drivers/fbxdmamux./Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.20.14-fbx/drivers/fbxdmamux/Makefile	2010-12-29 19:30:06.781440861 +0100
@@ -0,0 +1,4 @@
+
+obj-$(CONFIG_FREEBOX_DMAMUX) += fbxdmamux.o
+obj-$(CONFIG_BCM963XX_DMAMUX) += fbxdmamux_bcm963xx.o
+obj-$(CONFIG_MV88FXX81_DMAMUX) += fbxdmamux_mv88fxx81.o
diff -Nruw linux-2.6.20.14-fbx/drivers/fbxgpio./fbxgpio_core.c linux-2.6.20.14-fbx/drivers/fbxgpio/fbxgpio_core.c
--- linux-2.6.20.14-fbx/drivers/fbxgpio./fbxgpio_core.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.20.14-fbx/drivers/fbxgpio/fbxgpio_core.c	2010-12-29 19:30:06.781440861 +0100
@@ -0,0 +1,351 @@
+/*
+ * fbxgpio_core.c for linux-freebox
+ * Created by <nschichan@freebox.fr> on Wed Feb 21 18:12:14 2007
+ * Freebox SA
+ */
+
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+
+#include <linux/fbxgpio_core.h>
+
+#define PFX	"fbxgpio_core: "
+
+/* #define DEBUG */
+#ifdef DEBUG
+#define dprint(Fmt, Arg...)	printk(PFX Fmt, Arg)
+#else
+#define dprint(Fmt, Arg...)	do { } while (0)
+#endif
+
+/*
+ * show direction in for gpio associated with class_device dev.
+ */
+static ssize_t
+show_direction(struct class_device *dev, char *buf)
+{
+	int dir;
+	struct fbxgpio_pin *p;
+	int ret = 0;
+
+	p = dev->class_data;
+
+	if (p->ops->get_direction)
+		dir = p->ops->get_direction(p->pin_num);
+	else
+		dir = p->direction;
+
+	switch (dir) {
+	case GPIO_DIR_IN:
+		ret += sprintf(buf, "input\n");
+		break;
+	case GPIO_DIR_OUT:
+		ret += sprintf(buf, "output\n");
+		break;
+	default:
+		ret += sprintf(buf, "unknown\n");
+		break;
+	}
+	return ret;
+}
+
+/*
+ * store direction. return -EINVAL if direction string is bad. return
+ * -EPERM if flag FBXGPIO_PIN_DIR_RW is set in flags.
+ */
+static ssize_t
+store_direction(struct class_device *dev, const char *buf, size_t count)
+{
+	int dir;
+	struct fbxgpio_pin *p;
+	int match_len = 0;
+	int i;
+	static const char *word_match[] = {
+		[GPIO_DIR_IN] = "input",
+		[GPIO_DIR_OUT] = "output",
+	};
+
+	if (*buf == ' ' || *buf == '\t' || *buf == '\r' || *buf == '\n')
+		/* silently eat any spaces/tab/linefeed/carriagereturn */
+		return 1;
+
+	p = dev->class_data;
+	if (!(p->flags & FBXGPIO_PIN_DIR_RW)) {
+		dprint("pin %s direction is read only.\n", p->pin_name);
+		return -EPERM;
+	}
+	dir = 0;
+	for (i = 0; i < 2; ++i) {
+		if (count >= strlen(word_match[i]) &&
+		    !strncmp(buf, word_match[i], strlen(word_match[i]))) {
+			dir = i;
+			match_len = strlen(word_match[i]);
+			break ;
+		}
+	}
+	if (i == 2)
+		return -EINVAL;
+
+	p->ops->set_direction(p->pin_num, dir);
+	return match_len;
+}
+
+/*
+ * show input data for input gpio pins.
+ */
+static ssize_t
+show_datain(struct class_device *dev, char *buf)
+{
+	int val;
+	struct fbxgpio_pin *p;
+
+	p = dev->class_data;
+	if (p->direction == GPIO_DIR_OUT)
+		return -EINVAL;
+	val = p->ops->get_datain(p->pin_num);
+
+	if (p->flags & FBXGPIO_PIN_REVERSE_POL)
+		val = 1 - val;
+	return sprintf(buf, "%i\n", val);
+}
+
+/*
+ * show output data for output gpio pins.
+ */
+static ssize_t
+show_dataout(struct class_device *dev, char *buf)
+{
+	int val;
+	struct fbxgpio_pin *p;
+
+	p = dev->class_data;
+	if (p->direction == GPIO_DIR_IN)
+		return -EINVAL;
+	if (p->ops->get_dataout)
+		val = p->ops->get_dataout(p->pin_num);
+	else
+		val = p->cur_dataout;
+
+	if (p->flags & FBXGPIO_PIN_REVERSE_POL)
+		val = 1 - val;
+	return sprintf(buf, "%i\n", val);
+}
+
+/*
+ * store new dataout value for output gpio pins.
+ */
+static ssize_t
+store_dataout(struct class_device *dev, const char *buf, size_t count)
+{
+	int val;
+	struct fbxgpio_pin *p;
+
+	if (*buf == ' ' || *buf == '\t' || *buf == '\r' || *buf == '\n')
+		/* silently eat any spaces/tab/linefeed/carriagereturn */
+		return 1;
+
+	p = dev->class_data;
+
+	if (p->direction != GPIO_DIR_OUT)
+		return -EINVAL;
+
+	switch (*buf) {
+	case '0':
+		val = 0;
+		break ;
+	case '1':
+		val = 1;
+		break ;
+	default:
+		return -EINVAL;
+	}
+
+	p->cur_dataout = val;
+
+	if (p->flags & FBXGPIO_PIN_REVERSE_POL)
+		val = 1 - val;
+	p->ops->set_dataout(p->pin_num, val);
+	return 1;
+}
+
+/*
+ * show pin number associated with gpio pin.
+ */
+static ssize_t
+show_pinnum(struct class_device *dev, char *buf)
+{
+	struct fbxgpio_pin *p;
+
+	p = dev->class_data;
+	return sprintf(buf, "%i\n", p->pin_num);
+}
+
+/*
+ * attribute list associated with each class device.
+ */
+static struct class_device_attribute gpio_attributes[] = {
+	__ATTR(direction, 0600, show_direction, store_direction),
+	__ATTR(data_in,   0400, show_datain, NULL),
+	__ATTR(data_out,  0600, show_dataout, store_dataout),
+	__ATTR(pin_num,   0400, show_pinnum, NULL),
+	{},
+};
+
+struct class fbxgpio_class =
+{
+	.name			= "fbxgpio",
+	.release		= NULL,
+	.class_dev_attrs	= gpio_attributes,
+};
+
+/*
+ * create a class device for given pin. associate the gpio_attributes
+ * array to the class device.
+ */
+int
+fbxgpio_register_pin(struct fbxgpio_pin *pin)
+{
+	int err;
+	char buf[32];
+
+	dprint("registering pin %s\n", pin->pin_name);
+
+	/* ensure ops is valid */
+	if (pin->ops == NULL) {
+		printk(KERN_ERR PFX "no operation set for pin %s\n",
+		       pin->pin_name);
+		return -EINVAL;
+	}
+
+	/* create and register class device */
+	pin->class_dev = kzalloc(sizeof (*pin->class_dev), GFP_KERNEL);
+	if (pin->class_dev == NULL)
+		return -ENOMEM;
+
+	class_device_initialize(pin->class_dev);
+	pin->class_dev->class = &fbxgpio_class;
+	pin->class_dev->class_data = pin;
+	strlcpy(pin->class_dev->class_id, pin->pin_name, BUS_ID_SIZE);
+	err = class_device_add(pin->class_dev);
+	if (err) {
+		printk(KERN_ERR PFX "%i: unable to register fbxgpio "
+		       "device %s\n", err, pin->pin_name);
+		kfree(pin->class_dev);
+		pin->class_dev = NULL;
+		return err;
+	}
+
+	/* ensure pin direction matches hardware state */
+	if (pin->ops->get_direction &&
+	    pin->direction != pin->ops->get_direction(pin->pin_num)) {
+		printk(KERN_WARNING PFX "pin %s default direction does not "
+		       "match current hardware state, fixing.\n",
+		       pin->pin_name);
+		pin->ops->set_direction(pin->pin_num, pin->direction);
+	}
+
+	/* symlink $pin->pin_name to gpio_$pin->pin_num */
+	snprintf(buf, sizeof (buf), "%i", pin->pin_num);
+	err = sysfs_create_link(pin->class_dev->kobj.parent,
+				&pin->class_dev->kobj, buf);
+	if (err) {
+		printk(KERN_WARNING PFX "unable to link gpio number to "
+		       "symbolic name directory (err %i).\n", err);
+	}
+
+	return 0;
+}
+
+void
+fbxgpio_unregister_pin(struct fbxgpio_pin *pin)
+{
+	char buf[32];
+
+	dprint("unregistering pin %s\n", pin->pin_name);
+
+	/* remove link */
+	snprintf(buf, sizeof (buf), "%i", pin->pin_num);
+	sysfs_remove_link(pin->class_dev->kobj.parent, buf);
+
+	/* remove class device */
+	class_device_del(pin->class_dev);
+	kfree(pin->class_dev);
+	pin->class_dev = NULL;
+}
+
+int
+fbxgpio_platform_probe(struct platform_device *dev)
+{
+	int err = 0;
+	struct fbxgpio_pin *p;
+
+	p = dev->dev.platform_data;
+	while (p->pin_name) {
+		err = fbxgpio_register_pin(p);
+		if (err)
+			return err;
+		++p;
+	}
+	return 0;
+}
+
+int
+fbxgpio_platform_remove(struct platform_device *dev)
+{
+	struct fbxgpio_pin *p;
+
+	p = dev->dev.platform_data;
+	while (p->pin_name) {
+		fbxgpio_unregister_pin(p);
+		++p;
+	}
+	return 0;
+}
+
+struct platform_driver fbxgpio_platform_driver =
+{
+	.probe	= fbxgpio_platform_probe,
+	.remove	= fbxgpio_platform_remove,
+	.driver	= {
+		.name	= "fbxgpio",
+	}
+};
+
+
+
+int __init
+fbxgpio_init(void)
+{
+	int err;
+
+	err = class_register(&fbxgpio_class);
+	if (err) {
+		printk(KERN_ERR PFX "unable to register fbxgpio class.\n");
+		return err;
+	}
+
+	err = platform_driver_register(&fbxgpio_platform_driver);
+	if (err) {
+		printk(KERN_ERR PFX "unable to register fbxgpio driver.\n");
+		class_unregister(&fbxgpio_class);
+		return err;
+	}
+	return 0;
+}
+
+void __exit
+fbxgpio_exit(void)
+{
+	platform_driver_unregister(&fbxgpio_platform_driver);
+	class_unregister(&fbxgpio_class);
+}
+
+subsys_initcall(fbxgpio_init);
+module_exit(fbxgpio_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Nicolas Schichan <nicolas.schichan@freebox.fr>");
diff -Nruw linux-2.6.20.14-fbx/drivers/fbxgpio./Kconfig linux-2.6.20.14-fbx/drivers/fbxgpio/Kconfig
--- linux-2.6.20.14-fbx/drivers/fbxgpio./Kconfig	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.20.14-fbx/drivers/fbxgpio/Kconfig	2010-12-29 19:30:06.781440861 +0100
@@ -0,0 +1,7 @@
+menu "Freebox GPIO"
+
+config FREEBOX_GPIO
+	tristate "Freebox GPIO control interface."
+	default n
+
+endmenu
diff -Nruw linux-2.6.20.14-fbx/drivers/fbxgpio./Makefile linux-2.6.20.14-fbx/drivers/fbxgpio/Makefile
--- linux-2.6.20.14-fbx/drivers/fbxgpio./Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.20.14-fbx/drivers/fbxgpio/Makefile	2010-12-29 19:30:06.781440861 +0100
@@ -0,0 +1,9 @@
+##
+##  Makefile<2> for linux-freebox
+##  Created by <nschichan@freebox.fr> on Wed Feb 21 18:08:48 2007
+##  Freebox SA
+##
+
+obj-$(CONFIG_FREEBOX_GPIO)	+= fbxgpio_core.o
+
+EXTRA_CFLAGS += -Werror
diff -Nruw linux-2.6.20.14-fbx/drivers/fbxmtd./fbxmtd_blk_dev.c linux-2.6.20.14-fbx/drivers/fbxmtd/fbxmtd_blk_dev.c
--- linux-2.6.20.14-fbx/drivers/fbxmtd./fbxmtd_blk_dev.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.20.14-fbx/drivers/fbxmtd/fbxmtd_blk_dev.c	2010-12-29 19:30:06.781440861 +0100
@@ -0,0 +1,368 @@
+/*
+ * Freebox Memory Technology Device r/o block device interface.
+ *
+ * Interface to the flash via a  block device. A mapping is done using
+ * minor  number  and partitions  so  a  maximum  of 8  partitions  is
+ * possible per device.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/fs.h>
+#include <linux/genhd.h>
+#include <linux/blkdev.h>
+#include <linux/kthread.h>
+
+#include <asm/atomic.h>
+#include <asm/io.h>
+
+#include <linux/fbxmtd.h>
+
+#include "fbxmtd_priv.h"
+#include "fbxmtd_blk_dev.h"
+
+#define PFX			"fbxmtd_blk: "
+#define FBXMTD_BLK_MAJOR	252
+#define PART_PER_DEVICE		FBXMTD_MAX_PART
+
+
+static struct semaphore disks_mutex;
+static struct list_head disks;
+
+/*
+ * list helpers
+ */
+static int fbxmtd_add_disk(struct fbxmtd_blk_disk *d)
+{
+	down(&disks_mutex);
+	list_add_tail(&d->list, &disks);
+	up(&disks_mutex);
+
+	return 0;
+}
+
+static void _fbxmtd_put_disk(struct fbxmtd_blk_disk *d)
+{
+	if (!atomic_dec_and_test(&d->refcount))
+		return;
+
+	fbxmtd_put_part(d->part);
+	if (d->worker)
+		kthread_stop(d->worker);
+	if (d->gd) {
+		del_gendisk(d->gd);
+		put_disk(d->gd);
+	}
+	if (d->queue)
+		blk_cleanup_queue(d->queue);
+	list_del(&d->list);
+ 	kfree(d);
+}
+
+static void fbxmtd_put_disk(struct fbxmtd_blk_disk *d)
+{
+	down(&disks_mutex);
+	_fbxmtd_put_disk(d);
+	up(&disks_mutex);
+}
+
+static void fbxmtd_put_disk_by_part(struct fbxmtd_part *part)
+{
+	struct fbxmtd_blk_disk *d;
+
+	down(&disks_mutex);
+	list_for_each_entry(d, &disks, list) {
+		if (d->part == part) {
+			_fbxmtd_put_disk(d);
+			break;
+		}
+	}
+	up(&disks_mutex);
+}
+
+
+/*
+ * blk funcs
+ */
+static int transfer(struct fbxmtd_part *part, unsigned long sector,
+		    unsigned long nsect, char *buffer, int write,
+		    int can_sleep)
+{
+	unsigned long offset = sector * 512;
+	unsigned long nbytes = nsect * 512;
+
+	/* delay write request, as they will sleep, but try to
+	 * process read request now if it's possible */
+	if (!can_sleep) {
+		if (write)
+			return -EWOULDBLOCK;
+		return fbxmtd_read_part(part, offset, buffer, nbytes, 0);
+	}
+
+	/* we can sleep */
+	if (write)
+		return fbxmtd_write_part(part, offset, buffer, nbytes);
+	return fbxmtd_read_part(part, offset, buffer, nbytes, 1);
+}
+
+/*
+ * worker thread main func
+ */
+static int kworkerthread(void *data)
+{
+	struct fbxmtd_blk_disk *disk;
+	struct request_queue *q;
+
+	disk = (struct fbxmtd_blk_disk *)data;
+	q = disk->queue;
+
+	/* we  grab queue  lock until  there  are no  more request  to
+	 * process */
+	spin_lock_irq(&disk->lock);
+	while (!kthread_should_stop()) {
+		struct request *req;
+		int ret;
+
+		/* process next request */
+		if (!(req = elv_next_request(q))) {
+			/* nothing to do, sleep */
+			spin_unlock_irq(&disk->lock);
+			wait_event_interruptible(disk->worker_wq,
+						 disk->work_to_do == 1 ||
+						 kthread_should_stop());
+			disk->work_to_do = 0;
+			spin_lock_irq(&disk->lock);
+			continue;
+		}
+
+		/* process the  request, release queue lock  as we may
+		 * sleep */
+		spin_unlock_irq(&disk->lock);
+		ret = transfer(disk->part, req->sector,
+			       req->current_nr_sectors, req->buffer,
+			       rq_data_dir(req), 1);
+		end_request(req, ret >= 0 ? 1 : 0);
+		spin_lock_irq(&disk->lock);
+	}
+	spin_unlock_irq(&disk->lock);
+
+        return 0;
+}
+
+
+static void blk_request(request_queue_t *q)
+{
+	struct fbxmtd_blk_disk *disk = q->queuedata;
+
+	/* wakeup the worker thread to request */
+	disk->work_to_do = 1;
+	wake_up(&disk->worker_wq);
+}
+
+static int blk_open(struct inode *inode, struct file *filep)
+{
+	struct fbxmtd_blk_disk *disk;
+
+	disk = (struct fbxmtd_blk_disk *)inode->i_bdev->bd_disk->private_data;
+	filep->private_data = disk;
+
+	atomic_inc(&disk->refcount);
+
+	return 0;
+}
+
+static int blk_release(struct inode *inode, struct file *filep)
+{
+	struct fbxmtd_blk_disk *disk;
+
+	disk = (struct fbxmtd_blk_disk *)inode->i_bdev->bd_disk->private_data;
+	fbxmtd_put_disk(disk);
+
+	return 0;
+}
+
+struct block_device_operations fbxmtd_blk_ops = {
+	.open = blk_open,
+	.release = blk_release,
+	.owner = THIS_MODULE,
+};
+
+
+/*
+ * setup a fbxmtd blk disk for the given partition
+ */
+static int setup_disk(struct fbxmtd_part *part)
+{
+	struct fbxmtd_blk_disk *disk;
+
+	if (!(disk = kmalloc(sizeof (struct fbxmtd_blk_disk), GFP_KERNEL)))
+		return 1;
+
+	memset(disk, 0, sizeof (struct fbxmtd_blk_disk));
+	disk->part = part;
+	snprintf(disk->name, sizeof (disk->name), "%s/%s", part->dev->name,
+		 part->name);
+	INIT_LIST_HEAD(&disk->list);
+	spin_lock_init(&disk->lock);
+	init_waitqueue_head(&disk->worker_wq);
+	atomic_set(&disk->refcount, 1);
+
+
+	/* alloc queue */
+	disk->queue = blk_init_queue(blk_request, &disk->lock);
+	if (disk->queue == NULL) {
+		printk(KERN_ERR PFX "blk_init_queue failed !\n");
+		goto free;
+	}
+	blk_queue_hardsect_size(disk->queue, 512);
+	blk_queue_max_phys_segments(disk->queue, 1);
+	disk->queue->queuedata = disk;
+
+	/* alloc gendisk */
+	if (!(disk->gd = alloc_disk(1))) {
+		printk(KERN_ERR PFX "alloc_disk failed !\n");
+		goto free;
+	}
+	snprintf(disk->gd->disk_name, sizeof (disk->gd->disk_name),
+		 "fbxmtdblk%c%d", disk->part->dev->idx + 'a', part->idx);
+
+	disk->gd->major = FBXMTD_BLK_MAJOR;
+	disk->gd->first_minor = disk->part->dev->idx * PART_PER_DEVICE +
+		part->idx;
+
+	disk->gd->queue = disk->queue;
+	disk->gd->fops = &fbxmtd_blk_ops;
+	disk->gd->private_data = disk;
+	set_capacity(disk->gd, (part->size / 512) == 0 ? 1 : part->size / 512);
+
+	/* create worker kthread */
+	disk->worker = kthread_create(kworkerthread, disk,
+				      "kfbxmtdblk/%s", part->name);
+	if (IS_ERR(disk->worker)) {
+		disk->worker = NULL;
+		printk(KERN_ERR PFX "Error creating worker kthread\n");
+		goto free;
+	}
+
+	/* add gen disk */
+	add_disk(disk->gd);
+
+	/* add disk to the list */
+	if (fbxmtd_add_disk(disk))
+		goto free;
+
+	wake_up_process(disk->worker);
+	return 0;
+free:
+	if (disk->worker)
+		kthread_stop(disk->worker);
+	if (disk->gd) {
+		del_gendisk(disk->gd);
+		put_disk(disk->gd);
+	}
+	if (disk->queue)
+		blk_cleanup_queue(disk->queue);
+	kfree(disk);
+	return 1;
+}
+
+
+static void fbxmtd_notifier_cb(void *cb_data, struct fbxmtd_part *part,
+			       uint32_t event)
+{
+	if (event == FBXMTD_EVENT_DEAD) {
+		fbxmtd_put_disk_by_part(part);
+	} else {
+		/* event == FBXMTD_EVENT_ADD */
+		if (part->size < 512 || part->size % 512) {
+			printk(KERN_INFO PFX "skipped too small or non "
+			       "512 bytes aligned partition %s\n", part->name);
+			return;
+		}
+		if (!setup_disk(part))
+			atomic_inc(&part->dev->refcount);
+	}
+}
+
+static int foreach_part_cb(void *cb_data, struct fbxmtd_part *part)
+{
+	if (part->size < 512 || part->size % 512) {
+		printk(KERN_INFO PFX "skipped too small partition %s\n",
+		       part->name);
+		return 0;
+	}
+
+	if (!setup_disk(part)) {
+		/* keep a reference on part */
+		atomic_inc(&part->dev->refcount);
+		return 0;
+	}
+	return 1;
+}
+
+
+static int __init fbxmtd_blk_init(void)
+{
+	struct fbxmtd_blk_disk *d, *d2;
+
+	printk(KERN_INFO PFX "Freebox MTD block device access support\n");
+
+	INIT_LIST_HEAD(&disks);
+	init_MUTEX(&disks_mutex);
+
+	/* register blk device */
+	if (register_blkdev(FBXMTD_BLK_MAJOR, "fbxmtd") < 0) {
+		printk(KERN_ERR PFX "Unable to get blkdev major %d\n",
+		       FBXMTD_BLK_MAJOR);
+		return -ENODEV;
+	}
+
+	/* register notifier for future addition of mtddevice */
+	if (fbxmtd_register_notifier(fbxmtd_notifier_cb, NULL,
+				     FBXMTD_EVENT_DEAD | FBXMTD_EVENT_PART)) {
+		printk(KERN_ERR PFX "Unable to register fbxmtd notifier\n");
+		goto err;
+	}
+
+	/* create disk for existing partitions */
+	if (fbxmtd_foreach_part(foreach_part_cb, NULL))
+		goto err;
+
+	return 0;
+
+err:
+	unregister_blkdev(FBXMTD_BLK_MAJOR, "fbxmtd");
+	fbxmtd_unregister_notifier(fbxmtd_notifier_cb);
+	/* put all devices */
+	down(&disks_mutex);
+	list_for_each_entry_safe(d, d2, &disks, list) {
+		_fbxmtd_put_disk(d);
+	}
+	up(&disks_mutex);
+	return -ENODEV;
+}
+
+static void __exit fbxmtd_blk_exit(void)
+{
+	struct fbxmtd_blk_disk *d, *d2;
+
+	fbxmtd_unregister_notifier(fbxmtd_notifier_cb);
+
+	/* put all devices */
+	down(&disks_mutex);
+	list_for_each_entry_safe(d, d2, &disks, list) {
+		_fbxmtd_put_disk(d);
+	}
+	up(&disks_mutex);
+
+	unregister_blkdev(FBXMTD_BLK_MAJOR, "fbxmtd");
+}
+
+
+module_init(fbxmtd_blk_init);
+module_exit(fbxmtd_blk_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_VERSION("1.0");
+MODULE_AUTHOR("Maxime Bizon <mbizon@freebox.fr>");
diff -Nruw linux-2.6.20.14-fbx/drivers/fbxmtd./fbxmtd_blk_dev.h linux-2.6.20.14-fbx/drivers/fbxmtd/fbxmtd_blk_dev.h
--- linux-2.6.20.14-fbx/drivers/fbxmtd./fbxmtd_blk_dev.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.20.14-fbx/drivers/fbxmtd/fbxmtd_blk_dev.h	2010-12-29 19:30:06.781440861 +0100
@@ -0,0 +1,22 @@
+
+#ifndef FBXMTD_BLK_DEV_H_
+# define FBXMTD_BLK_DEV_H_
+
+#include <linux/list.h>
+#include <asm/atomic.h>
+
+struct fbxmtd_blk_disk
+{
+	char name[32];
+	struct fbxmtd_part *part;
+	struct list_head list;
+	atomic_t refcount;
+        spinlock_t lock;
+        struct request_queue *queue;
+        struct gendisk *gd;
+        struct task_struct *worker;
+	wait_queue_head_t worker_wq;
+	int work_to_do;
+};
+
+#endif /* !FBXMTD_BLK_DEV_H_ */
diff -Nruw linux-2.6.20.14-fbx/drivers/fbxmtd./fbxmtd_char_dev.c linux-2.6.20.14-fbx/drivers/fbxmtd/fbxmtd_char_dev.c
--- linux-2.6.20.14-fbx/drivers/fbxmtd./fbxmtd_char_dev.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.20.14-fbx/drivers/fbxmtd/fbxmtd_char_dev.c	2010-12-29 19:30:06.781440861 +0100
@@ -0,0 +1,220 @@
+/*
+ * Freebox Memory Technology Device character device interface.
+ *
+ * Interface to  the flash via a  character device. A  mapping is done
+ * using minor  number and  partitions so a maximum  of 8  partitions is
+ * possible per device.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/fs.h>
+#include <asm/uaccess.h>
+
+#include <linux/fbxmtd.h>
+
+#include "fbxmtd_priv.h"
+
+#define PFX			"fbxmtd_chr: "
+#define FBXMTD_CHAR_MAJOR	251
+#define PART_PER_DEVICE		FBXMTD_MAX_PART
+#define MAX_KMALLOC_SIZE	0x20000
+
+/*
+ * open callback
+ * map the minor to the associated device / partition and keep track of it
+ */
+static int fbxmtd_open(struct inode *inode, struct file *file)
+{
+	struct fbxmtd_part *part;
+	int minor, dev_idx, part_idx;
+
+	/* find partition associated if any */
+	minor = MINOR(inode->i_rdev);
+	dev_idx = minor / PART_PER_DEVICE;
+	part_idx = minor % PART_PER_DEVICE;
+
+	/* get associated partition */
+	part = fbxmtd_get_part(dev_idx, part_idx);
+	if (!part)
+		return -ENODEV;
+	file->private_data = part;
+
+	return 0;
+}
+
+/*
+ * release callback
+ * put reference to the partition we have
+ */
+static int fbxmtd_release(struct inode *inode, struct file *file)
+{
+	struct fbxmtd_part *part;
+
+	part = (struct fbxmtd_part *)file->private_data;
+	fbxmtd_put_part(part);
+
+	return 0;
+}
+
+/*
+ * read callback
+ * adjust offset and count and forward request to the core
+ */
+
+static ssize_t fbxmtd_read(struct file *file, char *buffer, size_t count,
+			   loff_t *ppos)
+{
+	struct fbxmtd_part *part;
+	size_t total_len;
+	int len;
+	char *kbuf;
+
+	part = (struct fbxmtd_part *)file->private_data;
+
+	/* adjust size if count is too big */
+	if (*ppos >= part->size)
+		return 0;
+	if (count > part->size - *ppos)
+		count = part->size - *ppos;
+
+	if (count > MAX_KMALLOC_SIZE)
+		len = MAX_KMALLOC_SIZE;
+	else
+		len = count;
+
+	if (!(kbuf = kmalloc(len, GFP_KERNEL)))
+		return -ENOMEM;
+
+	total_len = 0;
+	while (count) {
+
+		if (need_resched())
+			yield();
+
+		if (count > MAX_KMALLOC_SIZE)
+			len = MAX_KMALLOC_SIZE;
+		else
+			len = count;
+
+		len = fbxmtd_read_part(part, *ppos, kbuf, len, 1);
+		if (len <= 0) {
+			kfree(kbuf);
+			return len;
+		}
+
+		if (len) {
+			if (copy_to_user(buffer, kbuf, len)) {
+				kfree(kbuf);
+				return -EFAULT;
+			}
+		}
+
+		count -= len;
+		buffer += len;
+		*ppos += len;
+		total_len += len;
+	}
+
+	kfree(kbuf);
+	return total_len;
+}
+
+
+/*
+ * write callback
+ * adjust offset and count and forward request to the core
+ */
+static ssize_t fbxmtd_write(struct file *file, const char *buffer,
+			    size_t count, loff_t *ppos)
+{
+	struct fbxmtd_part *part;
+	size_t total_len;
+	char *kbuf;
+	int len;
+
+	part = (struct fbxmtd_part *)file->private_data;
+
+	/* write access outside the partition region is an error */
+	if (*ppos >= part->size)
+		return -EINVAL;
+	if (count > part->size - *ppos)
+		return -EFBIG;
+
+	if (count > MAX_KMALLOC_SIZE)
+		len = MAX_KMALLOC_SIZE;
+	else
+		len = count;
+
+	if (!(kbuf = kmalloc(len, GFP_KERNEL)))
+		return -ENOMEM;
+
+	total_len = 0;
+	while (count) {
+
+		if (need_resched())
+			yield();
+
+		if (count > MAX_KMALLOC_SIZE)
+			len = MAX_KMALLOC_SIZE;
+		else
+			len = count;
+
+		if (copy_from_user(kbuf, buffer, len)) {
+			kfree(kbuf);
+			return -EFAULT;
+		}
+
+		len = fbxmtd_write_part(part, *ppos, kbuf, len);
+		if (len <= 0) {
+			kfree(kbuf);
+			return len;
+		}
+
+		count -= len;
+		buffer += len;
+		*ppos += len;
+		total_len += len;
+	}
+
+	kfree(kbuf);
+	return total_len;
+
+}
+
+
+static struct file_operations fbxmtd_fops = {
+	.open = fbxmtd_open,
+	.release = fbxmtd_release,
+	.read = fbxmtd_read,
+	.write = fbxmtd_write,
+	.owner = THIS_MODULE,
+};
+
+static int __init fbxmtd_chr_init(void)
+{
+	printk(KERN_INFO PFX "Freebox MTD character device access support\n");
+
+	/* register char device */
+	if (register_chrdev(FBXMTD_CHAR_MAJOR, "fbxmtd", &fbxmtd_fops) < 0) {
+		printk(KERN_ERR PFX "Unable to get chrdev major %d\n",
+		       FBXMTD_CHAR_MAJOR);
+                return -ENODEV;
+        }
+
+	return 0;
+}
+
+static void __exit fbxmtd_chr_exit(void)
+{
+	unregister_chrdev(FBXMTD_CHAR_MAJOR, "fbxmtd");
+}
+
+
+module_init(fbxmtd_chr_init);
+module_exit(fbxmtd_chr_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_VERSION("1.0");
+MODULE_AUTHOR("Maxime Bizon <mbizon@freebox.fr>");
diff -Nruw linux-2.6.20.14-fbx/drivers/fbxmtd./fbxmtd_core_amd.c linux-2.6.20.14-fbx/drivers/fbxmtd/fbxmtd_core_amd.c
--- linux-2.6.20.14-fbx/drivers/fbxmtd./fbxmtd_core_amd.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.20.14-fbx/drivers/fbxmtd/fbxmtd_core_amd.c	2010-12-29 19:30:06.781440861 +0100
@@ -0,0 +1,622 @@
+/*
+ *
+ * Support for AMD compatible flash for Freebox MTD
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/jiffies.h>
+#include <linux/delay.h>
+#include <asm/io.h>
+
+#include <linux/fbxmtd.h>
+
+#include "fbxmtd_priv.h"
+#include "fbxmtd_core_amd.h"
+
+#define PFX		"fbxmtd_amd: "
+#define POLL_TIMEOUT	(10 * HZ)
+
+#define SZ_1K				0x00000400
+#define SZ_2K				0x00000800
+#define SZ_4K				0x00001000
+#define SZ_8K				0x00002000
+#define SZ_16K				0x00004000
+#define SZ_32K				0x00008000
+#define SZ_64K				0x00010000
+#define SZ_128K				0x00020000
+#define SZ_256K				0x00040000
+#define SZ_512K				0x00080000
+#define SZ_1M				0x00100000
+#define SZ_2M				0x00200000
+#define SZ_4M				0x00400000
+#define SZ_8M				0x00800000
+#define SZ_16M				0x01000000
+#define SZ_32M				0x02000000
+#define SZ_64M				0x04000000
+#define SZ_128M				0x08000000
+#define SZ_256M				0x10000000
+#define SZ_512M				0x20000000
+
+/*
+ * List of known flash we support
+ */
+static const struct amd_flash_info amd_flash_infos[] = {
+
+	{
+		.mfr_id = MANUFACTURER_AMD,
+		.dev_id = AM29LV040B,
+		.name = "AMD AM29LV040B",
+		.size = SZ_512K,
+		.use_dq5 = 1,
+		.use_ext_dev_id = 0,
+		.need_tpoll_delay = 1,
+
+		.regions = {
+			{ .offset = 0x0, .size = SZ_64K, .count = 8 },
+			{ .count = 0 },
+		},
+	 },
+
+	{
+		.mfr_id = MANUFACTURER_AMD,
+		.dev_id = SPANSION_GENERIC,
+		.ext_dev_id = { 0x221A, 0x2200 },
+		.name = "AMD AM29LV320MB / S29GL032[AN]R4 (bottom)",
+		.size = SZ_4M,
+		.use_dq5 = 1,
+		.use_ext_dev_id = 1,
+		.need_tpoll_delay = 1,
+
+		.regions = {
+			{ .offset = 0x0, .size = SZ_8K, .count = 8 },
+			{ .offset = 0x10000, .size = SZ_64K, .count = 63 },
+			{ .count = 0 },
+		},
+	 },
+
+	{
+		.mfr_id = MANUFACTURER_SPANSION,
+		.dev_id = SPANSION_GENERIC,
+		.ext_dev_id = { 0x2210, 0x2200 },
+		.name = "SPANSION S29GL064[AMN]R4 (bottom)",
+		.size = SZ_8M,
+		.use_dq5 = 1,
+		.need_tpoll_delay = 1,
+		.use_ext_dev_id = 1,
+
+		.regions = {
+			{ .offset = 0x0, .size = SZ_8K, .count = 8 },
+			{ .offset = 0x10000, .size = SZ_64K, .count = 126 },
+			{ .count = 0 },
+		},
+	 },
+
+	{
+		.mfr_id = MANUFACTURER_SPANSION,
+		.dev_id = SPANSION_GENERIC,
+		.ext_dev_id = { 0x2222, 0x2201 },
+		.name = "SPANSION S29GL256[PN]",
+		.size = SZ_32M,
+		.use_dq5 = 1,
+		.use_ext_dev_id = 1,
+		.use_write_buffer = 1,
+		.write_buffer_size = 32,
+		.need_tpoll_delay = 1,
+
+		.regions = {
+			{ .offset = 0x0, .size = SZ_128K, .count = 256 },
+			{ .count = 0 },
+		},
+	 },
+
+	{
+		.mfr_id = MANUFACTURER_SPANSION,
+		.dev_id = SPANSION_GENERIC,
+		.ext_dev_id = { 0x2221, 0x2201 },
+		.name = "SPANSION S29GL128P",
+		.size = SZ_16M,
+		.use_dq5 = 1,
+		.use_ext_dev_id = 1,
+		.use_write_buffer = 1,
+		.write_buffer_size = 32,
+		.need_tpoll_delay = 1,
+
+		.regions = {
+			{ .offset = 0x0, .size = SZ_128K, .count = 128 },
+		},
+	},
+
+	{
+		.mfr_id = MANUFACTURER_ATMEL,
+		.dev_id = AT49BV322A,
+		.name = "ATMEL AT49BV322A (bottom)",
+		.size = SZ_4M,
+		.use_dq5 = 1,
+		.need_tpoll_delay = 0,
+
+		.regions = {
+			{ .offset = 0x0, .size = SZ_8K, .count = 8 },
+			{ .offset = 0x10000, .size = SZ_64K, .count = 63 },
+			{ .count = 0 },
+		},
+	},
+
+
+	{
+		.mfr_id = MANUFACTURER_SST,
+		.dev_id = SST39VF3201,
+		.name = "SST 39VF3201",
+		.size = SZ_4M,
+		.use_dq5 = 0,
+		.need_tpoll_delay = 0,
+
+		.regions = {
+			{ .offset = 0x0, .size = SZ_4K, .count = 1024 },
+			{ .count = 0 },
+		},
+	},
+
+	{
+		.mfr_id = MANUFACTURER_SST,
+		.dev_id = SST39VF3202_REVB,
+		.name = "SST 39VF3202 (rev B)",
+		.size = SZ_4M,
+		.use_dq5 = 0,
+		.need_tpoll_delay = 0,
+
+		.regions = {
+			{ .offset = 0x0, .size = SZ_64K, .count = 64 },
+			{ .count = 0 },
+		},
+	},
+
+	{
+		.mfr_id = MANUFACTURER_SST,
+		.dev_id = SST39VF6401,
+		.name = "SST 39VF6401",
+		.size = SZ_8M,
+		.use_dq5 = 0,
+		.need_tpoll_delay = 0,
+
+		.regions = {
+			{ .offset = 0x0, .size = SZ_4K, .count = 2048 },
+			{ .count = 0 },
+		},
+	},
+
+	{
+		.mfr_id = MANUFACTURER_SST,
+		.dev_id = SST39VF6401_REVB,
+		.name = "SST 39VF6401 (rev B)",
+		.size = SZ_8M,
+		.use_dq5 = 0,
+		.need_tpoll_delay = 0,
+
+		.regions = {
+			{ .offset = 0x0, .size = SZ_64K, .count = 128 },
+			{ .count = 0 },
+		},
+	},
+
+
+	{
+		.mfr_id = MANUFACTURER_MACRONIX,
+		.dev_id = MX29LV320DT,
+		.name = "MX 29LV320DT (top)",
+		.size = SZ_4M,
+		.use_dq5 = 1,
+		.need_tpoll_delay = 0,
+
+		.regions = {
+			{ .offset = 0x0, .size = SZ_64K, .count = 63 },
+			{ .offset = 0x3f0000, .size = SZ_8K, .count = 8 },
+			{ .count = 0 },
+		},
+	},
+
+	{
+		.mfr_id = MANUFACTURER_SPANSION,
+		.dev_id = S29AL032D_M3,
+		.name = "SPANSION S29AL032D_M3",
+		.size = SZ_4M,
+		.use_dq5 = 1,
+		.need_tpoll_delay = 1,
+
+		.regions = {
+			{ .offset = 0x0, .size = SZ_64K, .count = 63 },
+			{ .offset = 0x3f0000, .size = SZ_8K, .count = 8 },
+			{ .count = 0 },
+		},
+	 },
+
+	{
+		.mfr_id = MANUFACTURER_NUMONIX,
+		.dev_id = SPANSION_GENERIC,
+		.ext_dev_id = { 0x2222, 0x2201 },
+		.name = "NUMONIX M29EW256",
+		.size = SZ_32M,
+		.use_dq5 = 1,
+		.use_ext_dev_id = 1,
+		.use_write_buffer = 1,
+		.write_buffer_size = 256,
+		.need_tpoll_delay = 0,
+
+		.regions = {
+			{ .offset = 0x0, .size = SZ_128K, .count = 256 },
+			{ .count = 0 },
+		},
+	 },
+
+	/* end of list */
+	{
+		.mfr_id = 0,
+	},
+};
+
+
+/*
+ * send given command to flash wrt its width
+ */
+static uint32_t __amd_swizzle_addr(struct fbxmtd_dev_map *map, uint32_t addr)
+{
+	return addr << (map->flash_width - 1);
+}
+
+static void __amd_unlock(struct fbxmtd_dev_map *map)
+{
+	fbxmtd_bus_width_write(map, __amd_swizzle_addr(map, ADDR_UNLOCK1),
+			       CMD_UNLOCK_DATA_1);
+	fbxmtd_bus_width_write(map, __amd_swizzle_addr(map, ADDR_UNLOCK2),
+			       CMD_UNLOCK_DATA_2);
+}
+
+static void amd_reset(struct fbxmtd_dev_map *map)
+{
+	fbxmtd_bus_width_write(map, 0, CMD_RESET_DATA);
+}
+
+static void amd_cmd(struct fbxmtd_dev_map *map, uint32_t cmd)
+{
+	__amd_unlock(map);
+	fbxmtd_bus_width_write(map, __amd_swizzle_addr(map, ADDR_UNLOCK1),
+			       cmd);
+}
+
+static void amd_cmd_addr(struct fbxmtd_dev_map *map, uint32_t addr,
+			 uint32_t cmd)
+{
+	__amd_unlock(map);
+	fbxmtd_bus_width_write(map, addr, cmd);
+}
+
+/*
+ * poll status bit waiting for operation to finish
+ */
+static int amd_status_poll(struct fbxmtd_dev *dev, uint32_t offset,
+			   uint16_t data)
+{
+	struct amd_flash_info *info;
+	unsigned long timeout;
+
+	info = (struct amd_flash_info *)dev->priv_data;
+	if (info->need_tpoll_delay)
+		udelay(5);
+
+	timeout = jiffies + POLL_TIMEOUT;
+	do {
+		uint16_t flag;
+
+		flag = fbxmtd_bus_width_read(&dev->map, offset);
+		if ((flag & 0x80) == (data & 0x80)) {
+			return 0;
+		}
+
+		if (info->use_dq5 && (flag & 0x20)) {
+			flag = fbxmtd_bus_width_read(&dev->map, offset);
+			return ((flag & 0x80) == (data & 0x80)) ? 0 : 1;
+		}
+
+		if (need_resched())
+			yield();
+
+	} while (time_before(jiffies, timeout));
+
+	printk(KERN_ERR PFX "poll: timeout !\n");
+	return -ETIMEDOUT;
+}
+
+/*
+ * erase chip
+ */
+static int amd_erase_chip(struct fbxmtd_dev *dev)
+{
+	printk(KERN_INFO PFX "Erasing chip...\n");
+
+	amd_cmd(&dev->map, CMD_UNLOCK_ERASE);
+	amd_cmd(&dev->map, CMD_CHIP_ERASE);
+
+	if (amd_status_poll(dev, 0, 0xffff) < 0) {
+		printk(KERN_ERR PFX "Chip erase failed at 0x%.8x\n",
+		       (uint32_t)dev->map.base);
+		amd_reset(&dev->map);
+		return -EIO;
+	}
+	amd_reset(&dev->map);
+
+	return 0;
+}
+
+/*
+ * erase sector at given offset
+ */
+static int amd_erase_sector(struct fbxmtd_dev *dev, uint32_t offset)
+{
+	struct amd_flash_info *info;
+
+	info = (struct amd_flash_info *)dev->priv_data;
+
+	amd_cmd(&dev->map, CMD_UNLOCK_ERASE);
+	amd_cmd_addr(&dev->map, offset, CMD_SECTOR_ERASE);
+
+	if (amd_status_poll(dev, offset, 0xffff) < 0) {
+		printk(KERN_ERR PFX "Sector erase failed at 0x%.8x\n",
+		       (uint32_t)(dev->map.base + offset));
+		amd_reset(&dev->map);
+		return -EIO;
+	}
+	amd_reset(&dev->map);
+
+	return 0;
+}
+
+/*
+ * program data in buf at requested offset using write buffer method
+ * count is assumed to be % write_buffer_size
+ */
+static int amd_program_buffer(struct fbxmtd_dev *dev, uint32_t offset,
+			      const uint8_t *buf, size_t count)
+{
+	struct amd_flash_info *info;
+	int status = 0;
+	uint32_t sec_offset;
+
+	info = (struct amd_flash_info *)dev->priv_data;
+
+	sec_offset = offset;
+	while (count > 0) {
+		unsigned int word_count;
+		uint32_t data, last_offset;
+		int i;
+
+		/* send write to buffer at sector address */
+		amd_cmd_addr(&dev->map, sec_offset, CMD_WRITE_TO_BUFFER);
+
+		/* send number of words to be programmed minus 1 */
+		word_count = info->write_buffer_size / dev->map.flash_width;
+		fbxmtd_bus_width_write(&dev->map, sec_offset, word_count - 1);
+
+		data = 0;
+		for (i = 0; i < word_count; i++) {
+			const uint8_t *p;
+
+			/* prevent unaligned access on buf */
+			p = buf + (i * dev->map.flash_width);
+			data = fbxmtd_get_bus_word(&dev->map, p);
+
+			/* try to program requested data */
+			fbxmtd_bus_width_write(&dev->map, offset + i * 2, data);
+		}
+
+		/* send program buffer at sector address */
+		fbxmtd_bus_width_write(&dev->map,
+				       sec_offset, CMD_PROGRAM_BUFFER);
+
+		/* poll on last offset / data */
+		last_offset = offset + info->write_buffer_size -
+			dev->map.flash_width;
+		status = amd_status_poll(dev, last_offset, data);
+
+		if (status < 0) {
+			printk(KERN_ERR PFX "write buffer failed at offset "
+			       "0x%08x\n", offset);
+			amd_cmd(&dev->map, CMD_WRITE_TO_BUFFER_RESET);
+			return status;
+		}
+
+		offset += info->write_buffer_size;
+		buf += info->write_buffer_size;
+		count -= info->write_buffer_size;
+	}
+
+	amd_reset(&dev->map);
+
+	return 0;
+}
+
+/*
+ * program data in buf at requested offset
+ */
+static int amd_program(struct fbxmtd_dev *dev, uint32_t offset,
+		       const uint8_t *buf, size_t count)
+{
+	struct amd_flash_info *info;
+	int status = 0;
+
+	/* sanity check */
+	if (offset % dev->map.flash_width) {
+		printk(KERN_ERR PFX "program offset must be bus aligned !\n");
+		return -EIO;
+	}
+
+	if (count % dev->map.flash_width) {
+		printk(KERN_ERR PFX "program count must be bus aligned !\n");
+		return -EIO;
+	}
+
+	info = (struct amd_flash_info *)dev->priv_data;
+
+	/* if  write buffer  is allowed  and  data to  program is  big
+	 * enough, then do it */
+	if (info->use_write_buffer && (count % info->write_buffer_size) == 0)
+		return amd_program_buffer(dev, offset, buf, count);
+
+	while (count > 0) {
+		uint32_t data;
+
+		/* prevent unaligned access on buf */
+		data = fbxmtd_get_bus_word(&dev->map, buf);
+
+		/* no need to program in case all bits are set */
+		if (fbxmtd_bus_word_equal(&dev->map, data, 0xffffffff))
+			goto next;
+
+		/* try to program requested data */
+		amd_cmd(&dev->map, CMD_PROGRAM_UNLOCK_DATA);
+		fbxmtd_bus_width_write(&dev->map, offset, data);
+		status = amd_status_poll(dev, offset, data);
+		if (status < 0) {
+			printk(KERN_ERR PFX "program failed at offset "
+			       "0x%08x\n", offset);
+			amd_reset(&dev->map);
+			return status;
+		}
+
+next:
+		offset += dev->map.flash_width;
+		buf += dev->map.flash_width;
+		count -= dev->map.flash_width;
+	}
+	amd_reset(&dev->map);
+
+	return 0;
+}
+
+/*
+ * Probe amd flash and return flash structure if found in table
+ */
+static const struct amd_flash_info *amd_probe(struct fbxmtd_dev_map *map)
+{
+	uint32_t mfr_id;
+	uint32_t dev_id;
+	uint32_t ext_id[2];
+	int i;
+
+	/* recover from bad state */
+	amd_cmd(map, CMD_WRITE_TO_BUFFER_RESET);
+	amd_reset(map);
+
+	/* read flash id */
+	amd_cmd(map, CMD_AUTOSELECT_DATA);
+	mfr_id = fbxmtd_bus_width_read(map, map->flash_width *
+				       ADDR_MANUFACTURER);
+	dev_id = fbxmtd_bus_width_read(map, map->flash_width *
+				       ADDR_DEVICE_ID);
+	ext_id[0] = fbxmtd_bus_width_read(map, map->flash_width *
+					  ADDR_EXT_DEVICE_ID);
+	ext_id[1] = fbxmtd_bus_width_read(map, map->flash_width *
+					  (ADDR_EXT_DEVICE_ID + 1));
+
+	/* return to read mode */
+	amd_reset(map);
+
+	for (i = 0; amd_flash_infos[i].mfr_id != 0; i++) {
+		const struct amd_flash_info *info = &amd_flash_infos[i];
+
+		if (mfr_id == info->mfr_id && dev_id == info->dev_id) {
+
+			if (info->use_ext_dev_id &&
+			    (ext_id[0] != info->ext_dev_id[0] ||
+			     ext_id[1] != info->ext_dev_id[1]))
+				continue;
+
+			printk(KERN_INFO PFX "Probed %s flash at 0x%08x, "
+			       "%d bit, size %ukB\n",
+			       info->name, (uint32_t)map->base,
+			       8 * (1 << (map->flash_width - 1)),
+			       info->size / 1024);
+			if (info->use_write_buffer)
+				printk(KERN_INFO PFX
+				       " -> using write buffer programming "
+				       "(%u bytes)\n",
+				       info->write_buffer_size);
+			return info;
+		}
+	}
+
+	if (mfr_id != 0xff && dev_id != 0xff) {
+		printk(KERN_NOTICE PFX "unknown flash at 0x%08x (man:%04x "
+		       "dev:%04x)\n", (uint32_t)map->base, mfr_id, dev_id);
+	}
+	return NULL;
+}
+
+/*
+ * return region information of device
+ */
+static struct fbxmtd_region *
+amd_get_region(struct fbxmtd_dev *dev)
+{
+	struct amd_flash_info *info;
+
+	info = (struct amd_flash_info *)dev->priv_data;
+	return info->regions;
+}
+
+/*
+ * return region information of device
+ */
+static uint32_t
+amd_get_size(struct fbxmtd_dev *dev)
+{
+	struct amd_flash_info *info;
+
+	info = (struct amd_flash_info *)dev->priv_data;
+	return info->size;
+}
+
+/*
+ * probe for an AMD flash and create mtd device if found
+ */
+struct fbxmtd_dev *fbxmtd_core_amd_probe(dma_addr_t base_phys,
+					 unsigned int flash_width)
+{
+	const struct amd_flash_info *info;
+	struct fbxmtd_dev_map map;
+	struct fbxmtd_dev *dev;
+	uint8_t *base_remap;
+
+	/* temporary remap base until we know full size */
+	if (!(base_remap = ioremap((unsigned long)base_phys, 0x20000))) {
+		printk(KERN_ERR PFX "first ioremap failed\n");
+		return NULL;
+	}
+
+	/* create temporary mapping during probe */
+	map.base = base_remap;
+	map.base_phys = base_phys;
+	map.flash_width = flash_width;
+
+	if ((info = amd_probe(&map)) == NULL) {
+		iounmap(base_remap);
+		return NULL;
+	}
+	iounmap(base_remap);
+
+	if ((dev = kmalloc(sizeof (struct fbxmtd_dev), GFP_KERNEL)) == NULL)
+		return NULL;
+
+	memset(dev, 0, sizeof (struct fbxmtd_dev));
+
+	dev->erase = amd_erase_sector;
+	dev->program = amd_program;
+	dev->chip_erase = amd_erase_chip;
+	dev->get_region_info = amd_get_region;
+	dev->get_size = amd_get_size;
+	dev->priv_data = (void *)info;
+
+	return dev;
+}
+
+EXPORT_SYMBOL(fbxmtd_core_amd_probe);
diff -Nruw linux-2.6.20.14-fbx/drivers/fbxmtd./fbxmtd_core_amd.h linux-2.6.20.14-fbx/drivers/fbxmtd/fbxmtd_core_amd.h
--- linux-2.6.20.14-fbx/drivers/fbxmtd./fbxmtd_core_amd.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.20.14-fbx/drivers/fbxmtd/fbxmtd_core_amd.h	2010-12-29 19:30:06.781440861 +0100
@@ -0,0 +1,82 @@
+
+#ifndef FBXMTD_CORE_AMD_H_
+# define FBXMTD_CORE_AMD_H_
+
+#define ADDR_MANUFACTURER		0x0000
+#define ADDR_DEVICE_ID			0x0001
+#define ADDR_SECTOR_LOCK		0x0002
+#define ADDR_HANDSHAKE			0x0003
+#define ADDR_EXT_DEVICE_ID		0x000E
+#define ADDR_UNLOCK1			0x5555
+#define ADDR_UNLOCK2			0xAAAA
+
+#define CMD_CHIP_ERASE			0x0010
+#define CMD_UNLOCK_BYPASS_MODE		0x0020
+#define CMD_SECTOR_ERASE		0x0030
+#define CMD_UNLOCK_DATA_1		0x00AA
+#define CMD_UNLOCK_DATA_2		0x0055
+#define CMD_UNLOCK_SECTOR		0x0060
+#define CMD_UNLOCK_ERASE		0x0080
+#define CMD_AUTOSELECT_DATA		0x0090
+#define CMD_PROGRAM_UNLOCK_DATA		0x00A0
+#define CMD_WRITE_TO_BUFFER		0x0025
+#define CMD_PROGRAM_BUFFER		0x0029
+#define CMD_SET_CONFIG			0x00D0
+#define CMD_RESET_DATA			0x00F0
+#define CMD_WRITE_TO_BUFFER_RESET	0x00F0
+
+#define D0_MASK				(0x0001 << 0)
+#define D1_MASK				(0x0001 << 1)
+#define D2_MASK				(0x0001 << 2)
+#define D3_MASK				(0x0001 << 3)
+#define D4_MASK				(0x0001 << 4)
+#define D5_MASK				(0x0001 << 5)
+#define D6_MASK				(0x0001 << 6)
+#define D7_MASK				(0x0001 << 7)
+
+
+#define MANUFACTURER_AMD		0x0001
+#define MANUFACTURER_FUJITSU		0x0004
+#define MANUFACTURER_ATMEL		0x001f
+#define MANUFACTURER_SPANSION		0x0001
+#define MANUFACTURER_ST			0x0020
+#define MANUFACTURER_NUMONIX		0x0089
+#define MANUFACTURER_MACRONIX		0x00C2
+#define MANUFACTURER_SST		0x00bf
+
+/* ATMEL */
+#define AT49BV322A			0x00c8
+
+/* Macronix */
+#define MX29LV320DT			0x22a7
+#define MX29LV320DB			0x22a8
+
+/* Spansion/AMD */
+#define AM29LV040B			0x004f
+#define SPANSION_GENERIC		0x227E
+#define S29AL032D_M3			0x22F6
+
+/* SST */
+#define SST39VF3201			0x235B
+#define SST39VF3202_REVB		0x235C
+#define SST39VF6401			0x236B
+#define SST39VF6401_REVB		0x236D
+
+#define MAX_REGION	4
+
+struct amd_flash_info
+{
+	uint16_t mfr_id;
+	uint16_t dev_id;
+	uint16_t ext_dev_id[2];
+	char *name;
+	uint32_t size;
+	int use_dq5;
+	int use_ext_dev_id;
+	int use_write_buffer;
+	unsigned int write_buffer_size;
+	int need_tpoll_delay;
+	struct fbxmtd_region regions[MAX_REGION + 1];
+};
+
+#endif /* !FBXMTD_CORE_AMD_H_ */
diff -Nruw linux-2.6.20.14-fbx/drivers/fbxmtd./fbxmtd_core.c linux-2.6.20.14-fbx/drivers/fbxmtd/fbxmtd_core.c
--- linux-2.6.20.14-fbx/drivers/fbxmtd./fbxmtd_core.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.20.14-fbx/drivers/fbxmtd/fbxmtd_core.c	2010-12-29 19:30:06.781440861 +0100
@@ -0,0 +1,845 @@
+/*
+ * Freebox Memory Technology Device driver
+ *
+ * Allow transparent access to flash (handle read modify erase write),
+ * and  partitionning of  device.  Read /  write  access are  mutually
+ * exclusive.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/fbxdmamux.h>
+#include <asm/io.h>
+
+#include <linux/fbxmtd.h>
+
+#include "fbxmtd_priv.h"
+
+#define PFX	"fbxmtd: "
+
+/*
+ * notifier stuffs
+ */
+static struct list_head notifiers;
+static struct semaphore notifiers_mutex;
+
+static void fbxmtd_run_notifier(struct fbxmtd_part *part, uint32_t event)
+{
+	struct fbxmtd_notifier *n;
+
+	down(&notifiers_mutex);
+	list_for_each_entry(n, &notifiers, list) {
+		if (n->event_mask & event) {
+			n->cb(n->cb_data, part, event);
+		}
+	}
+	up(&notifiers_mutex);
+}
+
+int fbxmtd_register_notifier(void (*cb)(void *, struct fbxmtd_part *,
+					uint32_t),
+			     void *cb_data, uint32_t mask)
+{
+	struct fbxmtd_notifier *n;
+
+	if (!(n = kmalloc(sizeof (struct fbxmtd_notifier), GFP_KERNEL)))
+		return 1;
+	n->cb = cb;
+	n->cb_data = cb_data;
+	n->event_mask = mask;
+	INIT_LIST_HEAD(&n->list);
+
+	down(&notifiers_mutex);
+	list_add_tail(&n->list, &notifiers);
+	up(&notifiers_mutex);
+
+	return 0;
+}
+
+void fbxmtd_unregister_notifier(void (*cb)(void *, struct fbxmtd_part *,
+					   uint32_t))
+{
+	struct fbxmtd_notifier *n, *n2;
+
+	down(&notifiers_mutex);
+	list_for_each_entry_safe(n, n2, &notifiers, list) {
+		if (cb == n->cb) {
+			list_del(&n->list);
+			kfree(n);
+		}
+	}
+	up(&notifiers_mutex);
+}
+
+
+/*
+ * mtd device list
+ */
+static struct fbxmtd_dev *mtddevs[FBXMTD_MAX_DEVICES];
+static int mtddevs_count = 0;
+static struct semaphore mtddevs_mutex;
+
+/*
+ * helper for list access, they assume list lock is taken
+ */
+static int _add_device(struct fbxmtd_dev *dev)
+{
+	struct fbxmtd_dev *p;
+	int	i, first;
+
+	/* check for room */
+	if (mtddevs_count == FBXMTD_MAX_DEVICES) {
+		printk(KERN_ERR PFX "FBXMTD_MAX_DEVICES reached\n");
+		return 1;
+	}
+
+	/* check for name clash */
+	first = -1;
+	for (i = 0; i < FBXMTD_MAX_DEVICES; i++) {
+		p = mtddevs[i];
+		if (p == NULL) {
+			/* first empty slot */
+			if (first == -1)
+				first = i;
+			continue;
+		}
+		if (!strcmp(p->name, dev->name)) {
+			printk(KERN_ERR PFX "duplicate partition name "
+			       "\"%s\"\n", p->name);
+			return 1;
+		}
+	}
+
+	mtddevs[first] = dev;
+	dev->idx = first;
+	mtddevs_count++;
+	__module_get(THIS_MODULE);
+	init_MUTEX(&dev->sem);
+	atomic_set(&dev->refcount, 1);
+
+	return 0;
+}
+
+static struct fbxmtd_part *_get_part(unsigned int dev_idx,
+				     unsigned int part_idx)
+{
+	struct fbxmtd_dev *p;
+
+	/* find device */
+	p = mtddevs[dev_idx];
+
+	if (!p || p->dead)
+		return NULL;
+
+	/* find partition */
+	if (part_idx >= p->part_count) {
+		return NULL;
+	}
+
+	atomic_inc(&p->refcount);
+	return &p->parts[part_idx];
+}
+
+static struct fbxmtd_part *_get_part_by_name(unsigned int dev_idx,
+					     const char *part_name)
+{
+	struct fbxmtd_dev *p;
+	int i;
+
+	/* find device */
+	p = mtddevs[dev_idx];
+
+	if (!p || p->dead)
+		return NULL;
+
+	/* find partition */
+	for (i = 0; i < p->part_count; i++) {
+		if (!strcmp(p->parts[i].name, part_name)) {
+			atomic_inc(&p->refcount);
+			return &p->parts[i];
+		}
+	}
+	return NULL;
+}
+
+
+static void _put_device(struct fbxmtd_dev *dev)
+{
+	int i;
+
+	if (!atomic_dec_and_test(&dev->refcount))
+		return;
+
+	/* out of the list */
+	for (i = 0; i < FBXMTD_MAX_DEVICES; i++) {
+		if (mtddevs[i] == dev) {
+			mtddevs[i] = NULL;
+			break;
+		}
+	}
+	mtddevs_count--;
+
+	/* free it */
+	for (i = 0; i < dev->part_count; i++) {
+		if (dev->parts[i].name)
+			kfree(dev->parts[i].name);
+	}
+
+	if (dev->map.base) {
+		iounmap(dev->map.base);
+	}
+	kfree(dev->name);
+	kfree(dev);
+	module_put(THIS_MODULE);
+}
+
+/*
+ * list accessor
+ */
+static int fbxmtd_add_device(struct fbxmtd_dev *dev)
+{
+	int res;
+
+	down(&mtddevs_mutex);
+	res = _add_device(dev);
+	up(&mtddevs_mutex);
+	return res;
+}
+
+struct fbxmtd_part *fbxmtd_get_part(unsigned int dev_idx,
+				    unsigned int part_idx)
+{
+	struct fbxmtd_part *part;
+
+	if (down_interruptible(&mtddevs_mutex))
+		return NULL;
+	part = _get_part(dev_idx, part_idx);
+	up(&mtddevs_mutex);
+	return part;
+}
+
+struct fbxmtd_part *fbxmtd_get_part_by_name(unsigned int dev_idx,
+					    const char *part_name)
+{
+	struct fbxmtd_part *part;
+
+	if (down_interruptible(&mtddevs_mutex))
+		return NULL;
+	part = _get_part_by_name(dev_idx, part_name);
+	up(&mtddevs_mutex);
+	return part;
+}
+
+void fbxmtd_put_device(struct fbxmtd_dev *dev)
+{
+	down(&mtddevs_mutex);
+	_put_device(dev);
+	up(&mtddevs_mutex);
+}
+
+void fbxmtd_put_part(struct fbxmtd_part *part)
+{
+	down(&mtddevs_mutex);
+	_put_device(part->dev);
+	up(&mtddevs_mutex);
+}
+
+/*
+ * call  given callback for  each existing  partitions, if  cb returns
+ * true, a reference on partition is taken for it
+ */
+int fbxmtd_foreach_part(int (cb)(void *, struct fbxmtd_part *),
+			void *cb_data)
+{
+	int i, j;
+
+	down(&mtddevs_mutex);
+
+	for (i = 0; i < FBXMTD_MAX_DEVICES; i++) {
+		struct fbxmtd_dev *p;
+
+		p = mtddevs[i];
+
+		if (!p || p->dead)
+			continue;
+
+		for (j = 0; j < p->part_count; j++)
+			if (cb(cb_data, &p->parts[j])) {
+				up(&mtddevs_mutex);
+				return 1;
+			}
+	}
+	up(&mtddevs_mutex);
+	return 0;
+}
+
+
+/*
+ * read from fbxmtd device
+ */
+int fbxmtd_read_dev(struct fbxmtd_dev *dev, uint32_t offset, char *buffer,
+		    unsigned int count)
+{
+	uint32_t size;
+
+	/* eof if device is dead */
+	if (dev->dead)
+		return 0;
+
+	size = dev->get_size(dev);
+
+	/* access outside the device range is an error */
+	if (offset > size || count > size - offset)
+		return -EINVAL;
+
+	if (down_interruptible(&dev->sem))
+		return -ERESTARTSYS;
+
+	memcpy_fromio(buffer, dev->map.base + offset, count);
+	up(&dev->sem);
+
+	return count;
+}
+
+/*
+ * find sector or next sector boundary for current offset
+ */
+static int find_sector_boundary(struct fbxmtd_dev *dev,
+				uint32_t offset, int want_next_sector,
+				uint32_t *boundary)
+{
+	struct fbxmtd_region *regs;
+	uint32_t r_offset, i;
+
+	regs = dev->get_region_info(dev);
+
+	r_offset = 0;
+	for (i = 0; regs[i].size != 0; i++) {
+		unsigned int sector;
+
+		if (offset < regs[i].offset ||
+		    (offset >= regs[i].offset + regs[i].count * regs[i].size))
+			continue;
+
+		/* this is the right region, get sector number */
+		sector = (offset - regs[i].offset) / regs[i].size;
+
+		if (want_next_sector) {
+			/* check if this was last sector of region */
+			if (sector < regs[i].count - 1)
+				sector++;
+			else {
+				/* yes, we want first sector of next
+				 * region if it exists */
+				i++;
+				if (!regs[i].size)
+					return -EINVAL;
+				sector = 0;
+			}
+		}
+
+		*boundary = regs[i].offset + sector * regs[i].size;
+		return 0;
+	}
+
+	/* offset is outside flash */
+	return -EINVAL;
+}
+
+int fbxmtd_find_sector_boundary(struct fbxmtd_dev *dev,
+				uint32_t offset, uint32_t *boundary)
+{
+	return find_sector_boundary(dev, offset, 0, boundary);
+}
+
+int fbxmtd_find_next_sector_boundary(struct fbxmtd_dev *dev,
+				     uint32_t offset, uint32_t *boundary)
+{
+	return find_sector_boundary(dev, offset, 1, boundary);
+}
+
+
+#ifdef CONFIG_FREEBOX_MTD_USE_DMAMUX
+static int dma_transfer(uint8_t *dst, dma_addr_t hw_src, unsigned int count)
+{
+	struct fbxdmamux_req *req;
+
+	req = fbxdmamux_req_from_pool();
+	if (!req)
+		return 1;
+
+	req->chan_cookie = 0;
+	req->priority = 0;
+	req->hw_src = hw_src;
+	req->virt_dst = dst;
+	req->len = count;
+	req->flags = FBXDMAMUX_FLAG_SRC_HW;
+	return fbxdmamux_submit_and_sleep(req, HZ * 60);
+}
+#endif
+
+/*
+ * read from fbxmtd partition
+ */
+int fbxmtd_read_part(struct fbxmtd_part *part, uint32_t offset, char *buffer,
+		     unsigned int count, int can_sleep)
+{
+	/* eof if device is dead */
+	if (part->dev->dead)
+		return 0;
+
+	/* access outside the range partition is an error */
+	if (offset > part->size || count > part->size - offset)
+		return -EINVAL;
+
+	if (!can_sleep) {
+		if (down_trylock(&part->dev->sem))
+			return -EWOULDBLOCK;
+	} else {
+		if (down_interruptible(&part->dev->sem))
+			return -ERESTARTSYS;
+	}
+	/* assume we can use it as memory mapped io */
+	offset += part->offset;
+
+#ifdef CONFIG_FREEBOX_MTD_USE_DMAMUX
+	/* use dma */
+	if (!can_sleep) {
+		up(&part->dev->sem);
+		return -EWOULDBLOCK;
+	}
+
+	/* if dma transfer is not possible or failed, fallback on memcpy */
+	if (virt_addr_valid(buffer)
+	    && dma_transfer(buffer, (part->dev->map.base_phys + offset),
+			    count) == 0) {
+		up(&part->dev->sem);
+		return count;
+	}
+	/* fallback */
+#endif
+
+	/* use memcpy */
+	memcpy_fromio(buffer, part->dev->map.base + offset, count);
+	up(&part->dev->sem);
+
+	return count;
+}
+
+
+/*
+ * write to fbxmtd partition
+ */
+int fbxmtd_write_part(struct fbxmtd_part *part, uint32_t offset, char *buffer,
+		      unsigned int count)
+{
+	struct fbxmtd_dev *dev;
+	struct fbxmtd_region *regs;
+	unsigned int total_len;
+
+	/* eof if device is dead */
+	if (part->dev->dead)
+		return 0;
+
+	/* check partition is rw */
+	if (part->rw == 0)
+		return -EBADF;
+
+	/* access outside the range of the partition is an error */
+	if (offset >= part->size)
+		return -EINVAL;
+	if (count > part->size - offset)
+		return -EFBIG;
+
+	/* fetch flash region information */
+	dev = part->dev;
+	regs = dev->get_region_info(dev);
+
+	/* calculate real offset in device */
+	offset = offset + part->offset;
+
+	/* start read/erase/modify/write loop */
+	total_len = 0;
+	while (count > 0) {
+		int		i, j, res, erase_needed, program_needed;
+		int		after_all_one;
+		uint32_t	r_offset, size;
+		uint8_t		*tbuf;
+
+		/* find flash region for this offset */
+		r_offset = 0;
+		for (i = 0; regs[i].size != 0; i++) {
+			if (offset < regs[i].offset ||
+			    (offset >= regs[i].offset +
+			     regs[i].count * regs[i].size))
+				continue;
+			/* this is the right region, get sector number */
+			res = (offset - regs[i].offset) / regs[i].size;
+			r_offset = regs[i].offset + res * regs[i].size;
+			break;
+		}
+
+		if (!regs[i].size) {
+			/* oops, no region found, region desc must be wrong*/
+			printk(KERN_ERR PFX "%s/%s: couldn't find associated "
+			       "region for offset %08x\n", dev->name,
+			       part->name, offset);
+			return -EIO;
+		}
+
+		/* read the current data */
+		if (!(tbuf = kmalloc(regs[i].size, GFP_KERNEL)))
+			return -ENOMEM;
+
+		/* size is the part of the data we will overwrite */
+		size = r_offset + regs[i].size - offset;
+		if (count < size)
+			size = count;
+
+		if (down_interruptible(&dev->sem))
+			return -ERESTARTSYS;
+
+		/* read whole sector */
+		memcpy_fromio(tbuf, dev->map.base + r_offset, regs[i].size);
+
+		/*
+		 * check if we  need to erase the sector,  this is the
+		 * case if there is at  least one bit in the modified data
+		 * that has been set.
+		 *
+		 * we use a simple XOR/AND to test this, a XOR between
+		 * old  and new  data  gives us  bits  that have  been
+		 * toggled.
+		 *
+		 * If a logical AND between this and the original data
+		 * gives the same, then  all toggled bits where set in
+		 * the original data, and no erase is needed.
+		 *
+		 * orig ^ new = toggled
+		 * orig & toggled = common_modified_bits
+		 * (common_modified_bits == toggled) -> no erase needed
+		 */
+		erase_needed = 0;
+		program_needed = 0;
+		after_all_one = 1;
+
+		for (j = 0; j < size; j++) {
+			uint8_t before, after, toggled;
+
+			before = tbuf[(offset - r_offset) + j];
+			after = buffer[j];
+
+			/*
+			 * remember  if any  new byte  value is  to be
+			 * different than 0xff
+			 */
+			if (after != 0xff)
+				after_all_one = 0;
+
+			if (before != after) {
+				/*
+				 * we want to change the byte value
+				 */
+				if (after == 0xff) {
+					/*
+					 * we want to change it to
+					 * 0xff, the only way to do
+					 * this is to erase the sector
+					 */
+					erase_needed = 1;
+				} else {
+					/*
+					 * we want to  set the byte to
+					 * anything  else  than  0xff,
+					 * this  imply programming the
+					 * sector, this may also imply
+					 * erasing  if some  bits need
+					 * to be changed from 0 to 1
+					 */
+					program_needed = 1;
+					toggled = before ^ after;
+					if ((before & toggled) != toggled)
+						erase_needed = 1;
+				}
+			}
+
+			/*
+			 * if  we decide  that erasing  the  sector is
+			 * needed then all previous data will be lost,
+			 * thus  if  any new  byte  is  to  be set  to
+			 * anything else  than 0xff, then  we have to
+			 * program after erasing.
+			 */
+			if (erase_needed && !after_all_one)
+				program_needed = 1;
+
+			if (program_needed && erase_needed)
+				break;
+		}
+
+		/* modify data */
+		memcpy(tbuf + (offset - r_offset), buffer, size);
+
+		/* erase sector if needed */
+		if (erase_needed && dev->erase(dev, r_offset)) {
+			up(&dev->sem);
+			kfree(tbuf);
+			printk(KERN_ERR PFX "%s/%s: erase failed at "
+			       "[0x%08x/%d]\n", dev->name,
+			       part->name, r_offset, regs[i].size);
+			return -EIO;
+		}
+
+		/* program it with the new data */
+		if (program_needed &&
+		    dev->program(dev, r_offset, tbuf, regs[i].size)) {
+			up(&dev->sem);
+			kfree(tbuf);
+			printk(KERN_ERR PFX "%s/%s: program failed at "
+			       "[0x%08x/%d]\n", dev->name,
+			       part->name, r_offset, regs[i].size);
+			return -EIO;
+		}
+
+		up(&dev->sem);
+		kfree(tbuf);
+
+		if (printk_ratelimit())
+			printk(KERN_DEBUG PFX "%s/%s: %s%s%s%soffset=0x%08x "
+			       "segment=[0x%08x/%d]\n", dev->name,
+			       part->name,
+			       erase_needed ? "ERASE" : "",
+			       (erase_needed && program_needed) ? "/" : "",
+			       program_needed ? "PRGM" : "",
+			       (erase_needed || program_needed) ? " " : "",
+			       offset, r_offset, regs[i].size);
+
+		count -= size;
+		total_len += size;
+		offset += size;
+		buffer += size;
+
+		/* give readers a chance */
+		if (need_resched())
+			yield();
+	}
+
+	return total_len;
+}
+
+
+/*
+ * add partitions to fbxmtd device
+ */
+int fbxmtd_set_partitions(struct fbxmtd_dev *dev, struct fbxmtd_part *parts,
+			  unsigned int count)
+{
+	unsigned int	i, j, size;
+	int res;
+
+	/* grab the queue lock, so  nobody can get the device while we
+	 * are changing its partitions */
+	down(&mtddevs_mutex);
+	res = 0;
+
+	if (dev->dead) {
+		res = -ENOENT;
+		goto out;
+	}
+
+	/* can't (re)partition while device is in use */
+	if (atomic_read(&dev->refcount) > 1) {
+		printk(KERN_ERR PFX "device \"%s\" is busy\n", dev->name);
+		res = -EBUSY;
+		goto out;
+	}
+
+	/* check partitions */
+	size = dev->get_size(dev);
+	for (i = 0; i < count; i++) {
+
+		if (!parts[i].name[0]) {
+			printk(KERN_ERR PFX "invalid partition %d name\n", i);
+			res = -EINVAL;
+			goto out;
+		}
+
+		/* (size == - 1) means greatest possible size */
+		if (parts[i].size == -1)
+			parts[i].size = size - parts[i].offset;
+
+		if (parts[i].offset % 2) {
+			printk(KERN_ERR PFX "odd partition %d offset\n", i);
+			res = -EINVAL;
+			goto out;
+		}
+
+		if (parts[i].size % 2) {
+			printk(KERN_ERR PFX "odd partition %d size\n", i);
+			res = -EINVAL;
+			goto out;
+		}
+
+		if (parts[i].offset + parts[i].size > size) {
+			printk(KERN_ERR PFX "partition %d size too big\n", i);
+			res = -EINVAL;
+			goto out;
+		}
+	}
+
+	/* check all partition have different name */
+	for (i = 0; i < count; i++) {
+		for (j = 0; j < count; j++) {
+			if (i == j)
+				continue;
+			if (!strcmp(parts[i].name, parts[j].name)) {
+				printk(KERN_ERR PFX "duplicate partition "
+				       "name: %s\n", parts[i].name);
+				res = -EINVAL;
+				goto out;
+			}
+		}
+	}
+
+	/* partitions seems ok, replace previous one with new  */
+	for (i = 0; i < dev->part_count; i++) {
+		if (dev->parts[i].name) {
+			kfree(parts[i].name);
+			parts[i].name = NULL;
+		}
+	}
+
+	for (i = 0; i < count; i++) {
+		dev->parts[i] = parts[i];
+		dev->parts[i].name = kstrdup(parts[i].name, GFP_KERNEL);
+		dev->parts[i].dev = dev;
+		dev->parts[i].idx = i;
+	}
+	dev->part_count = count;
+
+out:
+	up(&mtddevs_mutex);
+
+	/* notify partition  change, only caller  may change partition
+	 * again,  so it  is  safe  to read  partition  table in  this
+	 * event */
+	for (i = 0; i < dev->part_count; i++)
+		fbxmtd_run_notifier(&dev->parts[i], FBXMTD_EVENT_ADD);
+
+	return res;
+}
+
+/*
+ * ask for device removal
+ */
+void fbxmtd_mark_dead_dev(struct fbxmtd_dev *dev)
+{
+	int i;
+
+	dev->dead = 1;
+	/* notify dead device */
+	for (i = 0; i < dev->part_count; i++)
+		fbxmtd_run_notifier(&dev->parts[i], FBXMTD_EVENT_DEAD);
+}
+
+
+/*
+ * probe for an fbxmtd device at specified address
+ */
+struct fbxmtd_dev *fbxmtd_probe(const char *name, dma_addr_t base_phys,
+				unsigned int flash_width)
+{
+	struct fbxmtd_dev *dev;
+
+	/* probe using all method we know */
+#ifdef CONFIG_FREEBOX_MTD_BACKEND_AMD
+	if ((dev = fbxmtd_core_amd_probe(base_phys, flash_width)))
+		goto found;
+#endif
+#ifdef CONFIG_FREEBOX_MTD_BACKEND_INTEL
+	if ((dev = fbxmtd_core_intel_probe(base_phys, flash_width)))
+		goto found;
+#endif
+	/* nothing found */
+	return NULL;
+
+found:
+	/* add the device */
+	if (!(dev->name = kstrdup(name, GFP_KERNEL))) {
+		printk(KERN_ERR PFX "kstrdup failed\n");
+		goto free;
+	}
+
+	/* create final flash mapping */
+	dev->map.base_phys = base_phys;
+	dev->map.flash_width = flash_width;
+	if (!(dev->map.base = ioremap_nocache((unsigned long)base_phys,
+					      dev->get_size(dev)))) {
+		printk(KERN_ERR PFX "ioremap failed\n");
+		goto free;
+	}
+
+	if (fbxmtd_add_device(dev)) {
+		printk(KERN_ERR PFX "can't add device \"%s\"\n", name);
+		goto free;
+	}
+
+	return dev;
+free:
+	if (dev->name)
+		kfree(dev->name);
+	if (dev->map.base)
+		iounmap(dev->map.base);
+	kfree(dev);
+	return NULL;
+}
+
+
+static int __init fbxmtd_init(void)
+{
+	printk(KERN_INFO PFX "Freebox Memory Technology Device driver\n");
+#ifdef CONFIG_FREEBOX_MTD_USE_DMAMUX
+	printk(KERN_INFO PFX " -> using fbxdmamux for read transfer\n");
+#endif
+
+	INIT_LIST_HEAD(&notifiers);
+	init_MUTEX(&mtddevs_mutex);
+	init_MUTEX(&notifiers_mutex);
+
+	return 0;
+}
+
+static void __exit fbxmtd_exit(void)
+{
+}
+
+
+module_init(fbxmtd_init);
+module_exit(fbxmtd_exit);
+
+EXPORT_SYMBOL(fbxmtd_probe);
+EXPORT_SYMBOL(fbxmtd_mark_dead_dev);
+EXPORT_SYMBOL(fbxmtd_set_partitions);
+EXPORT_SYMBOL(fbxmtd_foreach_part);
+
+EXPORT_SYMBOL(fbxmtd_register_notifier);
+EXPORT_SYMBOL(fbxmtd_unregister_notifier);
+
+EXPORT_SYMBOL(fbxmtd_read_dev);
+EXPORT_SYMBOL(fbxmtd_read_part);
+EXPORT_SYMBOL(fbxmtd_write_part);
+
+EXPORT_SYMBOL(fbxmtd_find_sector_boundary);
+EXPORT_SYMBOL(fbxmtd_find_next_sector_boundary);
+
+EXPORT_SYMBOL(fbxmtd_get_part_by_name);
+EXPORT_SYMBOL(fbxmtd_get_part);
+EXPORT_SYMBOL(fbxmtd_put_part);
+EXPORT_SYMBOL(fbxmtd_put_device);
+
+MODULE_LICENSE("GPL");
+MODULE_VERSION("1.0");
+MODULE_AUTHOR("Maxime Bizon <mbizon@freebox.fr>");
diff -Nruw linux-2.6.20.14-fbx/drivers/fbxmtd./fbxmtd_core_io.c linux-2.6.20.14-fbx/drivers/fbxmtd/fbxmtd_core_io.c
--- linux-2.6.20.14-fbx/drivers/fbxmtd./fbxmtd_core_io.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.20.14-fbx/drivers/fbxmtd/fbxmtd_core_io.c	2010-12-29 19:30:06.781440861 +0100
@@ -0,0 +1,120 @@
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <asm/io.h>
+
+#include <linux/fbxmtd.h>
+#include "fbxmtd_priv.h"
+
+
+#define FLASH_IO_READ_8(x)	__raw_readb((void *)x)
+#define FLASH_IO_READ_16(x)	__raw_readw((void *)x)
+#define FLASH_IO_READ_32(x)	__raw_readl((void *)x)
+
+#define FLASH_IO_WRITE_8(x,d)	__raw_writeb(d, (void *)x)
+#define FLASH_IO_WRITE_16(x,d)	__raw_writew(d, (void *)x)
+#define FLASH_IO_WRITE_32(x,d)	__raw_writel(d, (void *)x)
+
+/*
+ * read/write data by doing a flash bus width access
+ */
+uint32_t fbxmtd_bus_width_read(struct fbxmtd_dev_map *map,
+			       uint32_t offset)
+{
+	uint8_t *address;
+	uint32_t val;
+
+	address = map->base + offset;
+
+	switch (map->flash_width) {
+	case 1:
+		val = (uint32_t)FLASH_IO_READ_8(address);
+		break;
+
+	case 2:
+		val = (uint32_t)FLASH_IO_READ_16(address);
+		break;
+
+	case 4:
+		val = (uint32_t)FLASH_IO_READ_32(address);
+		break;
+
+	default:
+		printk(KERN_ERR "flash width not supported\n");
+		return 0;
+	}
+	return val;
+}
+
+void fbxmtd_bus_width_write(struct fbxmtd_dev_map *map, uint32_t offset,
+			    uint32_t data)
+{
+	uint8_t *address;
+
+	address = map->base + offset;
+
+	switch (map->flash_width) {
+	case 1:
+		FLASH_IO_WRITE_8(address, data);
+		break;
+
+	case 2:
+		FLASH_IO_WRITE_16(address, data);
+		break;
+
+	case 4:
+		FLASH_IO_WRITE_32(address, data);
+		break;
+
+	default:
+		printk(KERN_ERR "flash width not supported\n");
+		break;
+	}
+}
+
+uint32_t fbxmtd_get_bus_word(struct fbxmtd_dev_map *map, const uint8_t *buf)
+{
+	uint32_t val;
+
+	switch (map->flash_width) {
+	case 1:
+		val = buf[0];
+		break;
+
+	case 2:
+		val = (buf[0] << 8) | buf[1];
+		val = be16_to_cpu(val);
+		break;
+
+	case 4:
+		val = (buf[0] << 24) | (buf[1] << 16) |
+			(buf[2] << 8) | buf[3];
+		val = be32_to_cpu(val);
+		break;
+
+	default:
+		printk(KERN_ERR "flash width not supported\n");
+		return 0;
+	}
+
+	return val;
+}
+
+int fbxmtd_bus_word_equal(struct fbxmtd_dev_map *map, uint32_t d1,
+			  uint32_t d2)
+{
+	switch (map->flash_width) {
+	case 1:
+		return ((d1 & 0xff) == (d2 & 0xff));
+
+	case 2:
+		return ((d1 & 0xffff) == (d2 & 0xffff));
+
+	case 4:
+		return ((d1 & 0xffffffff) == (d2 & 0xffffffff));
+
+	default:
+		printk(KERN_ERR "flash width not supported\n");
+		return 0;
+	}
+}
diff -Nruw linux-2.6.20.14-fbx/drivers/fbxmtd./fbxmtd_map_drv_generic.c linux-2.6.20.14-fbx/drivers/fbxmtd/fbxmtd_map_drv_generic.c
--- linux-2.6.20.14-fbx/drivers/fbxmtd./fbxmtd_map_drv_generic.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.20.14-fbx/drivers/fbxmtd/fbxmtd_map_drv_generic.c	2010-12-29 19:30:06.781440861 +0100
@@ -0,0 +1,440 @@
+/*
+ * fbxmtd_map_drv_generic.c for linux-freebox
+ * Created by <nschichan@freebox.fr> on Thu Jan 18 23:00:15 2007
+ * Freebox SA
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/crc32.h>
+
+#include <fbximagetag.h>
+#include <linux/fbxmtd.h>
+
+#define PFX	"fbxmtd_map_drv_generic: "
+
+static struct fbxmtd_dev *dev = NULL;
+
+static struct fbxmtd_part partitions[FBXMTD_MAX_PART];
+uint32_t partition_count;
+
+struct cmplzma_header
+{
+	u32	dst;
+	u32	entry;
+	u32	len;
+};
+
+/*
+ * read imagetag at offset and set parts offset/size for romfs, this
+ * comes from the legacy fbxmtd map code.
+ */
+static void
+read_imagetag(struct fbxmtd_part *parent,
+	      struct fbxmtd_part *part,
+	      int check_crc, int skip_kernel)
+{
+	struct fbx_imagetag tag;
+	int ret;
+	int offset;
+
+	offset = parent->offset;
+
+	if (skip_kernel) {
+		/*
+		 * on fbx4, the CFE only know how to boot a cmplzma
+		 * compressed kernel image.
+		 *
+		 * the bank1 partition starts with a compressed kernel
+		 * image immediately followed by an image tag align on
+		 * a 4k page boundary. we skip the image here try to
+		 * look at an image tag right here.
+		 */
+		struct cmplzma_header hdr;
+
+		ret = fbxmtd_read_dev(dev, parent->offset, (u8*)&hdr,
+				      sizeof (hdr));
+		if (ret != sizeof (hdr))
+			printk(KERN_ERR PFX "%s: unable to read cmplzma "
+			       "header on at 0x%08x\n", parent->name,
+			       parent->offset);
+
+		offset += be32_to_cpu(hdr.len);
+
+		/* align offset on a 4k page boundary. */
+		offset &= ~0xfff;
+		offset += 0x1000;
+
+		printk(KERN_INFO PFX "image tag expected at 0x%08x\n", offset);
+	}
+
+	/* read the tag */
+	ret = fbxmtd_read_dev(dev, offset,
+			      (uint8_t *)&tag, sizeof (tag));
+	if (ret != sizeof(tag)) {
+		printk(KERN_ERR PFX "%s: unable to read image tag "
+		       "at 0x%08x\n", parent->name, offset);
+		return;
+	}
+
+	/* check the tag */
+	if (be32_to_cpu(tag.magic) != FBX_IMAGETAG_MAGIC) {
+		printk(KERN_NOTICE PFX "%s: invalid tag magic "
+		       "(0x%08x, expected 0x%08x)\n", parent->name,
+		       be32_to_cpu(tag.magic), FBX_IMAGETAG_MAGIC);
+		return;
+	}
+
+	if (check_crc) {
+		unsigned char buf[512];
+		int i, len;
+		unsigned int size, crc = 0;
+
+		/* calculate CRC32 of whole  image minus the crc field
+		 * itself */
+		size = be32_to_cpu(tag.total_size);
+		for (i = 0; i < size; i += 512) {
+			/* read some data */
+			len = (size - i) > 512 ? 512 : size - i;
+			ret = fbxmtd_read_dev(dev, offset + i,
+					      buf, len);
+			if (ret != len) {
+				printk(KERN_ERR PFX
+				       "%s: unable to check crc\n",
+				       parent->name);
+				return;
+			}
+
+			/* skip image tag CRC32 field */
+			if (i == 0)
+				crc = crc32(crc, buf + 4, len - 4);
+			else
+				crc = crc32(crc, buf, len);
+		}
+
+		if (crc != be32_to_cpu(tag.crc32)) {
+			printk(KERN_NOTICE PFX "%s: invalid image CRC "
+			       "(0x%08x, expected 0x%08x)\n", parent->name,
+			       crc, be32_to_cpu(tag.crc32));
+			return;
+		}
+		printk("CRC on %s ok.\n", part->name);
+	}
+
+	if (!(be32_to_cpu(tag.flags) & FBX_IMAGETAG_FLAGS_HAS_FS)) {
+		printk(KERN_NOTICE PFX "%s: image has no FS\n", parent->name);
+		goto end;
+	}
+
+	/* set right offset for fs */
+	part->offset = offset + be32_to_cpu(tag.fs_offset);
+	part->size = be32_to_cpu(tag.fs_size);
+	/* adjust off partition */
+	if (part->size % 2) {
+		part->size++;
+	}
+
+	/*
+	 * sanity check: check that fs partition ends before the
+	 * parent end.
+	 */
+	if (offset + part->size > offset + parent->size) {
+		printk(KERN_ERR PFX "skipping partition %s, partition ends "
+		       "after parent end, makes no sense.\n", parent->name);
+		part->size = 0;
+		part->offset = 0;
+	}
+
+end:
+	tag.name[127] = 0;
+	tag.builder[31] = 0;
+	printk(KERN_INFO PFX "%s: tag \"%s\" by \"%s\"\n", parent->name,
+	       tag.name, tag.builder);
+}
+
+struct fbxmtd_platform_part *
+get_partition_by_name(const char *name, struct fbxmtd_platform_part *parts,
+		      unsigned int num)
+{
+	int i;
+
+	if (name == NULL)
+		return NULL;
+
+	for (i = 0; i < num; ++i) {
+		if (!strcmp(name, parts[i].name))
+			return &parts[i];
+	}
+	return NULL;
+}
+
+static int
+fbxmtd_map_drv_generic_probe(struct platform_device *pdev)
+{
+	int res;
+	uint32_t i;
+	struct fbxmtd_platform_data *pdat;
+	struct fbxmtd_platform_part *local_parts = NULL;
+	uint32_t size;
+
+	printk(KERN_DEBUG PFX "probe.\n");
+
+	pdat = pdev->dev.platform_data;
+
+	/* by default ... */
+	pdat->status = E_FBXMTD_FAULTY;
+
+	/* sanity check on platform data */
+	if (pdat == NULL) {
+		printk(KERN_ERR PFX "fbxmtd platform data is missing.\n");
+		return -ENODEV;
+	}
+	if (pdat->num_parts == 0 || pdat->parts == NULL) {
+		printk(KERN_ERR PFX "fbxmtd platform data is missing a "
+		       "partition table.\n");
+		return -ENODEV;
+	}
+
+	dev = fbxmtd_probe(pdat->name, pdat->base, pdat->width);
+	if (dev == NULL)
+		return -ENODEV;
+	size = dev->get_size(dev);
+	pdat->size = size;
+
+	printk(KERN_INFO PFX "flash has %iM size.\n", size >> 20);
+
+	/*
+	 * pdat->parts is const and it is good thing. however we need
+	 * to change fields, so kmalloc local_parts and memcpy it.
+	 */
+	local_parts = kmalloc(pdat->num_parts * sizeof (*local_parts),
+			      GFP_KERNEL);
+	if (local_parts == NULL) {
+		res = -ENOMEM;
+		goto out_cleanup;
+	}
+	memcpy(local_parts, pdat->parts,
+	       pdat->num_parts * sizeof (*local_parts));
+
+	/*
+	 * if FBXMTD_PART_MAP_ALL is set on partition 0 then it has
+	 * offset 0 and covers the whole flash.
+	 */
+	if (local_parts[0].flags & FBXMTD_PART_MAP_ALL) {
+		local_parts[0].offset = 0;
+		local_parts[0].size = size;
+		printk(KERN_INFO PFX "partition `%s' covers the whole "
+		       "flash.\n", pdat->parts[0].name);
+	}
+
+	/*
+	 * adjust offset values depending on roffset.
+	 */
+	for (i = 0; i < pdat->num_parts; ++i) {
+		if (local_parts[i].roffset) {
+			if (local_parts[i].roffset >= size) {
+				printk(KERN_ERR PFX "partition %s roffset "
+				       "too big!\n", local_parts[i].name);
+				local_parts[i].offset = 0;
+				local_parts[i].size = 0;
+				continue ;
+			}
+			local_parts[i].offset = size - local_parts[i].roffset;
+		}
+	}
+
+	/*
+	 * handle FBXMTD_PART_AUTOSIZE, end partition at the starting
+	 * offset of the next one, or the end of the flash.
+	 */
+	for (i = 0; i < pdat->num_parts; ++i) {
+		struct fbxmtd_platform_part *align_part;
+
+		if ((local_parts[i].flags & FBXMTD_PART_AUTOSIZE) == 0)
+			continue ;
+		align_part = get_partition_by_name(local_parts[i].align_part,
+						   local_parts,
+						   pdat->num_parts);
+		if (align_part == NULL) {
+			printk(KERN_ERR PFX "%s: no partition to align "
+			       "with, align with flash end.\n",
+			       local_parts[i].name);
+			local_parts[i].size = size - local_parts[i].offset;
+			continue ;
+		}
+
+		if (local_parts[i].offset >= align_part->offset) {
+			printk(KERN_ERR PFX "%s starts after %s: unable to "
+			       "align.\n", local_parts[i].name,
+			       align_part->name);
+			continue ;
+		}
+		local_parts[i].size = align_part->offset -
+		  local_parts[i].offset;
+	}
+
+	/*
+	 * build partition table from platform partition table. for
+	 * partitions that have the FBXMTD_PART_HAS_FS set, a
+	 * partition named $name_fs with the data found in the image
+	 * tag.
+	 */
+	for (partition_count = 0, i = 0; i < pdat->num_parts; ++i) {
+		const struct fbxmtd_platform_part *p;
+		char *name;
+
+		if (partition_count >= FBXMTD_MAX_PART) {
+			printk(KERN_ERR PFX "platform partition count too "
+			       "big.\n");
+			res = -EINVAL;
+			goto out_cleanup;
+		}
+		p = &local_parts[i];
+
+		/*
+		 * stop now if the partition is bigger than the
+		 * available flash size.
+		 */
+		if (p->offset + p->size > size)
+			break;
+
+		if (p->name) {
+			name = kstrdup(p->name, GFP_KERNEL);
+		} else {
+			/* set name to "part%d" */
+			name = kmalloc(8, GFP_KERNEL);
+		}
+		if (name == NULL) {
+			res = -ENOMEM;
+			goto out_cleanup;
+		}
+
+		if (!p->name)
+			sprintf(name, "part%d", partition_count);
+
+		partitions[partition_count].name = name;
+		partitions[partition_count].offset = p->offset;
+		partitions[partition_count].size = p->size;
+
+		if (p->flags & FBXMTD_PART_RW)
+			partitions[partition_count].rw = 1;
+		else
+			partitions[partition_count].rw = 0;
+
+
+		++partition_count;
+
+		if (p->flags & FBXMTD_PART_HAS_FS) {
+			if (partition_count >= FBXMTD_MAX_PART) {
+				printk(KERN_ERR PFX "platform partition "
+				       "count too big.\n");
+				res = -EINVAL;
+				goto out_cleanup;
+			}
+
+			name = kmalloc(strlen(p->name) + 4, GFP_KERNEL);
+			if (name == NULL) {
+				res = -ENOMEM;
+				goto out_cleanup;
+			}
+			snprintf(name, strlen(p->name) + 4, "%s_fs",
+				 p->name);
+
+			partitions[partition_count].name = name;
+			if (p->flags & FBXMTD_PART_IGNORE_TAG) {
+				partitions[partition_count].offset = 0;
+				partitions[partition_count].size = 0;
+			} else {
+				read_imagetag(&partitions[partition_count - 1],
+					      &partitions[partition_count],
+					      p->flags & FBXMTD_PART_CHECK_CRC,
+					      p->flags & FBXMTD_PART_SKIP_KERNEL);
+			}
+			++partition_count;
+		}
+	}
+
+	/*
+	 * print partition table.
+	 */
+	printk(PFX "partition table:\n");
+	for (i = 0; i < partition_count; ++i) {
+		struct fbxmtd_part *p;
+
+		p = &partitions[i];
+		printk("  %s(%i): %08x -> %08x (%iK), %s\n",
+		       p->name, i, p->offset,
+		       p->offset + p->size, p->size >> 10,
+		       p->rw ? "rw" : "ro");
+	}
+
+	res = fbxmtd_set_partitions(dev, partitions, partition_count);
+	if (res < 0) {
+		printk(KERN_ERR PFX "failed to set partition.\n");
+	} else {
+		pdat->core_dev = dev;
+		pdat->status = E_FBXMTD_PROBED;
+	}
+
+ out_cleanup:
+	if (local_parts)
+		kfree(local_parts);
+
+	for (i = 0; i < partition_count; ++i) {
+		if (partitions[i].name) {
+			kfree(partitions[i].name);
+			partitions[i].name = NULL;
+		}
+	}
+
+	if (dev && res < 0) {
+		fbxmtd_put_device(dev);
+		dev = NULL;
+	}
+	return res;
+}
+
+static int
+fbxmtd_map_drv_generic_remove(struct platform_device *pdev)
+{
+	if (dev) {
+		fbxmtd_mark_dead_dev(dev);
+		fbxmtd_put_device(dev);
+	}
+	platform_set_drvdata(pdev, NULL);
+	return 0;
+}
+
+
+static struct platform_driver fbxmtd_map_drv_generic =
+{
+	.probe	= fbxmtd_map_drv_generic_probe,
+	.remove	= fbxmtd_map_drv_generic_remove,
+	.driver = {
+		.owner	= THIS_MODULE,
+		.name	= "fbxmtd_map_drv",
+	},
+};
+
+int __init
+fbxmtd_map_drv_generic_init(void)
+{
+	printk(KERN_INFO PFX "2007, Freebox SA.\n");
+
+	platform_driver_register(&fbxmtd_map_drv_generic);
+	return 0;
+}
+
+void __exit
+fbxmtd_map_drv_generic_exit(void)
+{
+	platform_driver_unregister(&fbxmtd_map_drv_generic);
+}
+
+module_init(fbxmtd_map_drv_generic_init);
+module_exit(fbxmtd_map_drv_generic_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_VERSION("1.0");
+MODULE_AUTHOR("Nicolas Schichan <nschichan@freebox.fr>");
diff -Nruw linux-2.6.20.14-fbx/drivers/fbxmtd./fbxmtd_priv.h linux-2.6.20.14-fbx/drivers/fbxmtd/fbxmtd_priv.h
--- linux-2.6.20.14-fbx/drivers/fbxmtd./fbxmtd_priv.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.20.14-fbx/drivers/fbxmtd/fbxmtd_priv.h	2010-12-29 19:30:06.781440861 +0100
@@ -0,0 +1,44 @@
+
+#ifndef FBXMTD_PRIV_H_
+# define FBXMTD_PRIV_H_
+
+/*
+ * notifier
+ */
+#define FBXMTD_EVENT_PART	(1 << 0)
+#define FBXMTD_EVENT_DEAD	(1 << 1)
+
+struct fbxmtd_notifier
+{
+	uint32_t		event_mask;
+	void			(*cb)(void *cb_data,
+				      struct fbxmtd_part *, uint32_t);
+	void			*cb_data;
+	struct list_head	list;
+};
+
+
+/*
+ * io helper used by backend
+ */
+uint32_t fbxmtd_bus_width_read(struct fbxmtd_dev_map *map,
+			       uint32_t offset);
+
+void fbxmtd_bus_width_write(struct fbxmtd_dev_map *map, uint32_t offset,
+			    uint32_t data);
+
+uint32_t fbxmtd_get_bus_word(struct fbxmtd_dev_map *map, const uint8_t *buf);
+
+int fbxmtd_bus_word_equal(struct fbxmtd_dev_map *map, uint32_t d1,
+			  uint32_t d2);
+
+
+/*
+ * backend chip handler
+ */
+struct fbxmtd_dev *fbxmtd_core_amd_probe(dma_addr_t base_phys,
+					 unsigned int flash_width);
+struct fbxmtd_dev *fbxmtd_core_intel_probe(dma_addr_t base_phys,
+					   unsigned int flash_width);
+
+#endif /* ! FBXMTD_PRIV_H_ */
diff -Nruw linux-2.6.20.14-fbx/drivers/fbxmtd./Kconfig linux-2.6.20.14-fbx/drivers/fbxmtd/Kconfig
--- linux-2.6.20.14-fbx/drivers/fbxmtd./Kconfig	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.20.14-fbx/drivers/fbxmtd/Kconfig	2010-12-29 19:30:06.781440861 +0100
@@ -0,0 +1,55 @@
+menu "Freebox Memory Technology Devices (FBXMTD)"
+
+comment "Core drivers"
+
+#
+# Freebox MTD
+#
+config FREEBOX_MTD
+	tristate "Freebox Memory Technology Devices (FBXMTD) support"
+	select FREEBOX_DRIVERS
+
+config FREEBOX_MTD_BACKEND_AMD
+	bool "Support for AMD compatible flash"
+	depends on FREEBOX_MTD
+
+config FREEBOX_MTD_BACKEND_INTEL
+	bool "Support for Intel Strataflash"
+	depends on FREEBOX_MTD
+
+config FREEBOX_MTD_USE_DMAMUX
+	bool "Use fbxdmamux for transfer"
+	depends on FREEBOX_DMAMUX && FREEBOX_MTD
+
+config FREEBOX_MTD_BLK
+	tristate "Block device access to fbxmtd"
+	depends on FREEBOX_MTD && BLOCK
+
+config FREEBOX_MTD_CHAR
+	tristate "Character device access to fbxmtd"
+	depends on FREEBOX_MTD
+
+
+comment "Mapping drivers"
+
+#
+# Generic mapping driver.
+#
+config FREEBOX_MTD_MAP_DRV_GENERIC
+	tristate "Generic mapping Driver."
+	depends on FREEBOX_MTD
+	select CRC32
+
+config FREEBOX_MTD_MAP_DRV_BCM963XX
+	tristate "Broadcom 963xx flash format"
+	depends on FREEBOX_MTD
+	select CRC32
+
+#
+# Freebox MTD Map Control interface
+#
+config FREEBOX_MTD_MAP_IOCTL
+	tristate "IOCTL control interface"
+	depends on FREEBOX_MTD_MAP_DRV_GENERIC
+
+endmenu
diff -Nruw linux-2.6.20.14-fbx/drivers/fbxmtd./Makefile linux-2.6.20.14-fbx/drivers/fbxmtd/Makefile
--- linux-2.6.20.14-fbx/drivers/fbxmtd./Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.20.14-fbx/drivers/fbxmtd/Makefile	2010-12-29 19:30:06.781440861 +0100
@@ -0,0 +1,24 @@
+
+# core support
+obj-$(CONFIG_FREEBOX_MTD) += fbxmtd.o
+
+fbxmtd-objs += fbxmtd_core.o fbxmtd_core_io.o
+ifeq ($(CONFIG_FREEBOX_MTD_BACKEND_AMD),y)
+fbxmtd-objs += fbxmtd_core_amd.o
+endif
+
+ifeq ($(CONFIG_FREEBOX_MTD_BACKEND_INTEL),y)
+fbxmtd-objs += fbxmtd_core_intel.o
+endif
+
+# generic character device access support (r/w with read erase modify write)
+obj-$(CONFIG_FREEBOX_MTD_CHAR) += fbxmtd_char.o
+fbxmtd_char-objs += fbxmtd_char_dev.o
+
+# generic r/o block device access support
+obj-$(CONFIG_FREEBOX_MTD_BLK) += fbxmtd_blk.o
+fbxmtd_blk-objs += fbxmtd_blk_dev.o
+
+obj-$(CONFIG_FREEBOX_MTD_MAP_DRV_GENERIC) += fbxmtd_map_drv_generic.o
+obj-$(CONFIG_FREEBOX_MTD_MAP_DRV_BCM963XX) += fbxmtd_map_drv_bcm963xx.o
+obj-$(CONFIG_FREEBOX_MTD_MAP_IOCTL) += fbxmtd_map_ioctl.o
diff -Nruw linux-2.6.20.14-fbx/drivers/fbxpanel./fbxpanel_anim.c linux-2.6.20.14-fbx/drivers/fbxpanel/fbxpanel_anim.c
--- linux-2.6.20.14-fbx/drivers/fbxpanel./fbxpanel_anim.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.20.14-fbx/drivers/fbxpanel/fbxpanel_anim.c	2010-12-29 19:30:06.781440861 +0100
@@ -0,0 +1,389 @@
+/*
+ * fbxpanel_anim.c for linux-freebox
+ * Created by <nschichan@freebox.fr> on Thu Mar  8 16:48:56 2007
+ * Freebox SA
+ */
+
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+
+#include <asm/semaphore.h>
+
+#include <linux/fbxpanel.h>
+#include "fbxpanel_priv.h"
+
+#define	PFX	"fbxpanel_anim: "
+
+/* turning nibble */
+const static struct anim_frame anim1[] = {
+	{ { SEG_F | SEG_E, 0, 0, 0 } },
+	{ { SEG_F | SEG_A, 0, 0, 0 } },
+	{ { SEG_A, SEG_A, 0, 0 } },
+	{ { 0, SEG_A, SEG_A, 0 } },
+	{ { 0, 0, SEG_A, SEG_A } },
+	{ { 0, 0, 0, SEG_A | SEG_B } },
+	{ { 0, 0, 0, SEG_B | SEG_C } },
+	{ { 0, 0, 0, SEG_D | SEG_C } },
+	{ { 0, 0, SEG_D, SEG_D } },
+	{ { 0, SEG_D, SEG_D, 0 } },
+	{ { SEG_D, SEG_D, 0, 0 } },
+	{ { SEG_D | SEG_E, 0, 0, 0 } }
+};
+
+/* blinking ring */
+const static struct anim_frame anim2[] = {
+	{ { 0, 0, 0, 0 } },
+	{ { SEG_A | SEG_E | SEG_F | SEG_D, SEG_A | SEG_D,
+	    SEG_A | SEG_D,
+	    SEG_A | SEG_B | SEG_C | SEG_D } }
+};
+
+/* fixed ring */
+const static struct anim_frame anim3[] = {
+	{ { SEG_A | SEG_E | SEG_F | SEG_D,
+	    SEG_A | SEG_D, SEG_A | SEG_D,
+	    SEG_A | SEG_B | SEG_C | SEG_D } }
+};
+
+/* line moving up and down */
+const static struct anim_frame anim4[] = {
+	{ { SEG_A, SEG_A, SEG_A, SEG_A } },
+	{ { SEG_G, SEG_G, SEG_G, SEG_G } },
+	{ { SEG_D, SEG_D, SEG_D, SEG_D } },
+	{ { SEG_G, SEG_G, SEG_G, SEG_G } },
+};
+
+/* blinking 'PPP ' */
+const static struct anim_frame anim5[] = {
+	{ { 0, 0, 0, 0 } },
+	{ { SEG_A | SEG_F | SEG_B | SEG_G | SEG_E,
+	    SEG_A | SEG_F | SEG_B | SEG_G | SEG_E,
+	    SEG_A | SEG_F | SEG_B | SEG_G | SEG_E } }
+};
+
+#define LETTER_T	(SEG_F|SEG_E|SEG_D|SEG_G)
+#define LETTER_E	(SEG_A|SEG_F|SEG_E|SEG_D|SEG_G)
+#define LETTER_L	(SEG_F|SEG_E|SEG_D)
+#define LETTER_A	(SEG_A | SEG_B | SEG_C | SEG_E | SEG_F | SEG_G)
+#define LETTER_P	(SEG_A | SEG_B | SEG_E | SEG_F | SEG_G)
+#define LETTER_O	(SEG_A | SEG_B | SEG_C | SEG_D | SEG_E | SEG_F)
+
+#define LETTER_0 (SEG_A | SEG_B | SEG_C | SEG_D | SEG_E | SEG_F)
+#define LETTER_1 (SEG_B | SEG_C)
+#define LETTER_2 (SEG_A | SEG_B | SEG_G | SEG_E | SEG_D)
+
+/* TEL with line moving up and down */
+const static struct anim_frame anim6[] = {
+	{ { LETTER_T, LETTER_E, LETTER_L, SEG_A } },
+	{ { LETTER_T, LETTER_E, LETTER_L, SEG_G } },
+	{ { LETTER_T, LETTER_E, LETTER_L, SEG_D } },
+	{ { LETTER_T, LETTER_E, LETTER_L, SEG_G } },
+};
+
+/* flash bank0 of fiber part */
+const static struct anim_frame anim7[] = {
+	{ { LETTER_0, SEG_A, SEG_A, SEG_A } },
+	{ { LETTER_0, SEG_G, SEG_G, SEG_G } },
+	{ { LETTER_0, SEG_D, SEG_D, SEG_D } },
+	{ { LETTER_0, SEG_G, SEG_G, SEG_G } },
+};
+
+/* flash bank1 of fiber part */
+const static struct anim_frame anim8[] = {
+	{ { LETTER_1, SEG_A, SEG_A, SEG_A } },
+	{ { LETTER_1, SEG_G, SEG_G, SEG_G } },
+	{ { LETTER_1, SEG_D, SEG_D, SEG_D } },
+	{ { LETTER_1, SEG_G, SEG_G, SEG_G } },
+};
+
+/* flash bank1 of TV part */
+const static struct anim_frame anim9[] = {
+	{ { LETTER_2, SEG_A, SEG_A, SEG_A, } },
+	{ { LETTER_2, SEG_G, SEG_G, SEG_G, } },
+	{ { LETTER_2, SEG_D, SEG_D, SEG_D, } },
+	{ { LETTER_2, SEG_G, SEG_G, SEG_G, } },
+};
+
+/* download animation, optical part, bank0 */
+const static struct anim_frame anim10[] = {
+	{ { LETTER_0, SEG_G, 0, 0 } },
+	{ { LETTER_0, 0, SEG_G, 0 } },
+	{ { LETTER_0, 0, 0, SEG_G } },
+	{ { LETTER_0, 0, SEG_G, 0 } },
+};
+
+/* download animation, optical part bank1 */
+const static struct anim_frame anim11[] = {
+	{ { LETTER_1, SEG_G, 0, 0 } },
+	{ { LETTER_1, 0, SEG_G, 0 } },
+	{ { LETTER_1, 0, 0, SEG_G } },
+	{ { LETTER_1, 0, SEG_G, 0 } },
+};
+
+/* download animation, tv part */
+const static struct anim_frame anim12[] = {
+	{ { LETTER_2, SEG_G, 0, 0 } },
+	{ { LETTER_2, 0, SEG_G, 0 } },
+	{ { LETTER_2, 0, 0, SEG_G } },
+	{ { LETTER_2, 0, SEG_G, 0 } },
+};
+
+/* blinking APPL */
+const static struct anim_frame anim13[] = {
+	{ { LETTER_A, LETTER_P, LETTER_P, LETTER_L } },
+	{ { 0, 0, 0, 0 } },
+
+};
+
+struct anim
+{
+	const struct anim_frame *p;
+	unsigned int anim_size;
+	unsigned int anim_rate;
+};
+
+const static struct anim anims[] = {
+	{ anim1, sizeof (anim1) / sizeof (struct anim_frame), HZ / 2 },
+	{ anim1, sizeof (anim1) / sizeof (struct anim_frame), HZ / 10 },
+	{ anim2, sizeof (anim2) / sizeof (struct anim_frame), HZ / 2 },
+	{ anim3, sizeof (anim3) / sizeof (struct anim_frame), HZ },
+	{ anim4, sizeof (anim4) / sizeof (struct anim_frame), HZ },
+	{ anim5, sizeof (anim5) / sizeof (struct anim_frame), HZ / 2 },
+	{ anim6, sizeof (anim6) / sizeof (struct anim_frame), HZ / 2 },
+	{ anim7, sizeof (anim7) / sizeof (struct anim_frame), HZ },
+	{ anim8, sizeof (anim8) / sizeof (struct anim_frame), HZ },
+	{ anim9, sizeof (anim9) / sizeof (struct anim_frame), HZ },
+	{ anim10, sizeof (anim10) / sizeof (struct anim_frame), HZ },
+	{ anim11, sizeof (anim11) / sizeof (struct anim_frame), HZ },
+	{ anim12, sizeof (anim12) / sizeof (struct anim_frame), HZ },
+	{ anim13, sizeof (anim13) / sizeof (struct anim_frame), HZ / 2 },
+};
+
+enum {
+	E_ANIM_SLOW_SNAKE,
+	E_ANIM_FAST_SNAKE,
+	E_ANIM_BLINK_RING,
+	E_ANIM_FIXED_RING,
+	E_ANIM_LINE_UP_DOWN,
+	E_ANIM_PPP,
+	E_ANIM_TEL_FLASH,
+	E_ANIM_FLASH_BANK0,
+	E_ANIM_FLASH_BANK1,
+	E_ANIM_FLASH_TV,
+	E_ANIM_DOWNLOAD_BANK0,
+	E_ANIM_DOWNLOAD_BANK1,
+	E_ANIM_DOWNLOAD_TV,
+	E_ANIM_RECEIVE_PHONE_CALL,
+	E_ANIM_LAST,
+};
+
+/*
+ * handle animation here. we will be awoken if anyone changes the
+ * current animation number or if animation requires an update.
+ */
+static int
+kanimator_thread(void *data)
+{
+	struct fbxpanel *p;
+
+	p = data;
+
+	while (!kthread_should_stop()) {
+		int anim;
+		int i;
+
+		if (down_interruptible(&p->mutex))
+			continue ;
+
+		anim = p->current_anim;
+		if (anim == -1) {
+			/* nothing to animate, wait for anim change */
+			dprint("waiting for animation request ...\n");
+			up(&p->mutex);
+			wait_event_interruptible(p->animator_wq,
+						 p->current_anim != -1 ||
+						 kthread_should_stop());
+			continue ;
+		}
+
+		/* check for animation change */
+		if (p->last_anim != anim) {
+			p->last_anim = anim;
+
+			/*
+			 * do not set current_frame to 0 when possible
+			 * to allow smooth transitions from slow snake
+			 * to fast snake.
+			 */
+			if (anims[p->current_anim].anim_size <
+			    p->current_frame)
+				p->current_frame = 0;
+		}
+
+		if (++(p->current_frame) >= anims[anim].anim_size)
+			p->current_frame = 0;
+
+		/* update panel with current frame */
+		for (i = 0; i < 4; ++i) {
+			_fbxpanel_set_digit(p, i,
+					    p->frames[anim][p->current_frame].d[i]);
+		}
+
+		up(&p->mutex);
+		wait_event_interruptible_timeout(p->animator_wq,
+						 p->current_anim != anim,
+						 anims[anim].anim_rate);
+	}
+
+	return 0;
+}
+
+/*
+ * map generic bit values to offsets in device specific digit desc
+ * structure.
+ */
+static struct {
+	int gen_bit;
+	int offset;
+} bit_map[] = {
+	{ SEG_A,
+	  offsetof(struct digit_seg_desc, h_top) / sizeof (int) },
+	{ SEG_B,
+	  offsetof(struct digit_seg_desc, v_top_right) / sizeof (int) },
+	{ SEG_C,
+	  offsetof(struct digit_seg_desc, v_bottom_right) / sizeof (int) },
+	{ SEG_D,
+	  offsetof(struct digit_seg_desc, h_bottom) / sizeof (int) },
+	{ SEG_E,
+	  offsetof(struct digit_seg_desc, v_bottom_left) / sizeof (int)},
+	{ SEG_F,
+	  offsetof(struct digit_seg_desc, v_top_left) / sizeof (int) },
+	{ SEG_G,
+	  offsetof(struct digit_seg_desc, h_middle_left) / sizeof (int) },
+	{ SEG_G,
+	  offsetof(struct digit_seg_desc, h_middle_right) / sizeof (int)},
+};
+
+/*
+ * setup p->frames so that it contains all device specific digit
+ * values directly usable by the driver callbacks.
+ */
+static int
+build_anim_frames(struct fbxpanel *p)
+{
+	int i;
+	int err;
+	int *desc;
+
+	desc = (int*)p->digit_seg_desc;
+
+	err = -ENOMEM;
+	p->frames = kzalloc(E_ANIM_LAST * sizeof (*p->frames), GFP_KERNEL);
+	if (p->frames == NULL)
+		goto out_error;
+
+	/* loop over animations */
+	for (i = 0; i < E_ANIM_LAST; ++i) {
+		int j;
+
+		p->frames[i] = kzalloc(anims[i].anim_size *
+				       sizeof (**p->frames),
+				       GFP_KERNEL);
+		if (p->frames[i] == NULL)
+			goto out_error;
+
+		/* loop over ainmation frames  */
+		for (j = 0; j < anims[i].anim_size; ++j) {
+			int k;
+
+			/* loop over animation digits*/
+			for (k = 0; k < 4; ++k) {
+				int l;
+
+				/* loop over digit bits */
+				for (l = 0; l < 8; ++l) {
+					if (anims[i].p[j].d[k] &
+					    bit_map[l].gen_bit)
+						p->frames[i][j].d[k] |=
+						  desc[bit_map[l].offset];
+				}
+			}
+		}
+	}
+	p->anim_count = E_ANIM_LAST;
+	return 0;
+
+ out_error:
+	if (p->frames) {
+		for (i = 0; i < E_ANIM_LAST; ++i) {
+			if (p->frames[i])
+				kfree(p->frames[i]);
+		}
+		kfree(p->frames);
+		p->frames = NULL;
+	}
+	return err;
+}
+
+/*
+ * free all data allocated in build_anim_frames.
+ */
+static void
+free_anim_frames(struct fbxpanel *p)
+{
+	int i;
+
+	for (i = 0; i < E_ANIM_LAST; ++i) {
+		if (p->frames[i])
+			kfree(p->frames[i]);
+	}
+	kfree(p->frames);
+	p->frames = NULL;
+}
+
+/*
+ * build animation frames and spawn a kthread for animation.
+ *
+ * NOTE: a kanimator process is spawned for each panel available on
+ * the system.
+ */
+int
+fbxpanel_animator_init(struct fbxpanel *p)
+{
+	int err;
+
+	err = build_anim_frames(p);
+	if (err)
+		goto out_error;
+
+	init_waitqueue_head(&p->animator_wq);
+	p->animator = kthread_create(kanimator_thread, p,
+				     "kanimator/%s", p->name);
+	if (IS_ERR(p->animator)) {
+		printk(KERN_ERR PFX
+		       "unable to create animator thread for %s\n", p->name);
+		err = PTR_ERR(p->animator);
+		goto out_error;
+	}
+	p->current_anim = -1;
+	wake_up_process(p->animator);
+	return 0;
+
+ out_error:
+	if (p->frames)
+		free_anim_frames(p);
+	if (p->animator)
+		kthread_stop(p->animator);
+	return err;
+}
+
+/*
+ * stop kanimator thread and free allocated animation frames.
+ */
+int
+fbxpanel_animator_exit(struct fbxpanel *p)
+{
+	kthread_stop(p->animator);
+	free_anim_frames(p);
+	return 0;
+}
diff -Nruw linux-2.6.20.14-fbx/drivers/fbxpanel./fbxpanel_class.c linux-2.6.20.14-fbx/drivers/fbxpanel/fbxpanel_class.c
--- linux-2.6.20.14-fbx/drivers/fbxpanel./fbxpanel_class.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.20.14-fbx/drivers/fbxpanel/fbxpanel_class.c	2010-12-29 19:30:06.781440861 +0100
@@ -0,0 +1,341 @@
+/*
+ * fbxpanel.c for linux-freebox
+ * Created by <nschichan@freebox.fr> on Wed Mar  7 22:14:40 2007
+ * Freebox SA
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/sched.h>
+
+#include <linux/fbxpanel.h>
+#include "fbxpanel_priv.h"
+
+#define PFX	"fbxpanel: "
+
+/*
+ * helpers with cache handling, must be called with mutex
+ * held or if kanimator is not running !
+ */
+int
+_fbxpanel_set_digit(struct fbxpanel *p, int digit, uint16_t value)
+{
+	if (digit >= p->digit_count)
+		return -EINVAL;
+
+	if (p->digit_cache[digit] == value)
+		return 0;
+	p->digit_cache[digit] = value;
+	return p->set_digit(p, digit, value);
+}
+
+/*
+ * add a len parameter since sysfs buffers are not zero terminated.
+ */
+int
+_fbxpanel_set_string(struct fbxpanel *p, const char *buf, int len)
+{
+	int ret;
+	int i;
+	int err = 0;
+
+	/* disable potentially running animation */
+	p->current_anim = -1;
+
+	if (len > p->digit_count)
+		len = p->digit_count;
+
+	ret = 0;
+	for (i = 0; i < len; ++i) {
+		uint16_t digit;
+
+		digit = p->ascii_table[(uint8_t)buf[i]];
+		if (!digit)
+			digit = p->ascii_table[' '];
+
+		err = _fbxpanel_set_digit(p, i, digit);
+		if (err) {
+			dprint("%s: unable to send digit.\n", p->name);
+			ret = err;
+		}
+	}
+	return err;
+}
+
+int
+_fbxpanel_colon_ctl(struct fbxpanel *p, int enable, int blink_msec)
+{
+	return p->set_colon_digit(p, enable, blink_msec);
+}
+
+/*
+ * display 88:88 to the front panel.
+ */
+int
+_fbxpanel_set_initial(struct fbxpanel *p)
+{
+	_fbxpanel_set_string(p, "8888", 4);
+	/* enable colon, no blink */
+	_fbxpanel_colon_ctl(p, 1, 0);
+	return 0;
+}
+
+/*
+ * terminate strings correctly since data comming from the lower layer
+ * is not zero terminated. result is stored in out.
+ */
+static void
+copyz(char *out, int out_count, const char *in, size_t in_count)
+{
+	int len;
+
+	if (in_count > out_count - 1)
+		len = in_count;
+	else
+		len = out_count;
+	strlcpy(out, in, len);
+}
+
+/*
+ * sysfs callback: update front panel string.
+ */
+static ssize_t
+store_text(struct class_device *dev, const char *buf, size_t count)
+{
+	struct fbxpanel *panel;
+	int err;
+	int ret;
+
+	panel = dev->class_data;
+
+	if (panel == NULL)
+		/* dead device */
+		return -ENODEV;
+
+	if (down_interruptible(&panel->mutex))
+		return -ERESTARTSYS;
+
+	ret = count;
+	err = _fbxpanel_set_string(panel, buf, count);
+	if (err)
+		ret = err;
+
+	up(&panel->mutex);
+	return ret;
+}
+
+/*
+ * update animation number and wakeup kanimator thread.
+ */
+static ssize_t
+store_anim(struct class_device *dev, const char *buf, size_t count)
+{
+	struct fbxpanel *panel;
+	int val;
+	char str[16];
+
+	panel = dev->class_data;
+
+	if (panel == NULL)
+		/* dead device */
+		return -ENODEV;
+
+	if (down_interruptible(&panel->mutex))
+		return -ERESTARTSYS;
+
+	copyz(str, sizeof (str), buf, count);
+	val = simple_strtoul(str, NULL, 0);
+	if (val < panel->anim_count) {
+		panel->current_anim = val;
+		wake_up(&panel->animator_wq);
+	}
+
+	up(&panel->mutex);
+	return count;
+}
+
+static ssize_t
+store_colon_ctl(struct class_device *dev, const char *buf, size_t count)
+{
+	struct fbxpanel *panel;
+	int enable;
+	int msec_blink;
+	char *end;
+	char *zbuf = NULL;
+	int err = 0;
+
+	panel = dev->class_data;
+	if (down_interruptible(&panel->mutex))
+		return -ERESTARTSYS;
+
+
+	zbuf = kmalloc(count + 1, GFP_KERNEL);
+	if (zbuf == NULL) {
+		err = -ENOMEM;
+		goto out;
+	}
+	memcpy(zbuf, buf, count);
+	zbuf[count] = 0;
+
+	enable = simple_strtol(zbuf, &end, 10);
+	if (*end != ':') {
+		err = -EINVAL;
+		goto out;
+	}
+	msec_blink = simple_strtol(end + 1, &end, 10);
+	if (*end != 0 && *end != '\n') {
+		err = -EINVAL;
+		goto out;
+	}
+
+	_fbxpanel_colon_ctl(panel, enable, msec_blink);
+
+ out:
+	if (zbuf)
+		kfree(zbuf);
+	up(&panel->mutex);
+	if (err)
+		return err;
+	return count;
+}
+
+static struct class_device_attribute panel_attributes[] = {
+	__ATTR(anim, 0600, NULL, store_anim),
+	__ATTR(text, 0600, NULL, store_text),
+	__ATTR(colon_ctl, 0600, NULL, store_colon_ctl),
+	{ },
+};
+
+static struct class fbxpanel_class =
+{
+	.name			= "fbxpanel",
+	.owner			= THIS_MODULE,
+	.class_dev_attrs	= panel_attributes,
+};
+
+/*
+ * called when there are no reference on the class device. only free
+ * dev, most of the cleanup stuff has alread been done in the
+ * fbxpanel_unregister.
+ */
+static void
+fbxpanel_release(struct class_device *dev)
+{
+	kfree(dev);
+}
+
+/*
+ * create a new class_device named $panel->name and add it to the
+ * class layer.
+ *
+ * the attributes will so be available in
+ * /sys/class/fbxpanel/$panel->name/
+ */
+int
+fbxpanel_register(struct fbxpanel *panel)
+{
+	int err;
+
+	printk(KERN_INFO PFX "registering panel %s\n", panel->name);
+	panel->class_dev = kzalloc(sizeof (*panel->class_dev), GFP_KERNEL);
+	if (panel->class_dev == NULL) {
+		err = -ENOMEM;
+		printk(KERN_ERR PFX "unable to allocate class device.\n");
+		goto out_error;
+	}
+	class_device_initialize(panel->class_dev);
+	panel->class_dev->class = &fbxpanel_class;
+	panel->class_dev->class_data = panel;
+	panel->class_dev->release = fbxpanel_release;
+	strlcpy(panel->class_dev->class_id, panel->name, BUS_ID_SIZE);
+
+	err = -ENOMEM;
+	panel->digit_cache = kzalloc(panel->digit_count * sizeof (uint16_t),
+				     GFP_KERNEL);
+	if (panel->digit_cache == NULL)
+		goto out_error;
+
+	_fbxpanel_set_initial(panel);
+
+	err = fbxpanel_animator_init(panel);
+	if (err) {
+		printk(KERN_ERR PFX "unable to init animator for %s\n",
+		       panel->name);
+		goto out_error;
+	}
+
+	err = class_device_add(panel->class_dev);
+	if (err) {
+		printk(KERN_ERR PFX "unable to add panel %s\n",
+		       panel->class_dev->class_id);
+		goto out_animator;
+	}
+
+	return 0;
+
+ out_animator:
+	fbxpanel_animator_exit(panel);
+ out_error:
+	if (panel->digit_cache)
+		kfree(panel->digit_cache);
+	if (panel->class_dev)
+		kfree(panel->class_dev);
+	return err;
+}
+
+/*
+ * cleanup everything allocated in fbxpanel_register. allocated
+ * class_device will be freed in the release callback.
+ */
+int
+fbxpanel_unregister(struct fbxpanel *panel)
+{
+	printk(KERN_INFO PFX "unregistering panel %s\n", panel->name);
+
+	class_device_del(panel->class_dev);
+	class_device_put(panel->class_dev);
+	panel->class_dev->class_data = NULL;
+
+	fbxpanel_animator_exit(panel);
+	_fbxpanel_set_initial(panel);
+	kfree(panel->digit_cache);
+	return 0;
+}
+
+/*
+ * register the fbxpanel class.
+ */
+static int __init
+fbxpanel_class_init(void)
+{
+	int err;
+
+	printk(KERN_INFO PFX "2007, Freebox SA.\n");
+
+	err = class_register(&fbxpanel_class);
+	if (err) {
+		printk(KERN_ERR PFX "unable to register fbxpanel class.\n");
+		return err;
+	}
+	return 0;
+}
+
+/*
+ * unregister the fbxpanel class.
+ */
+static void __exit
+fbxpanel_class_exit(void)
+{
+	class_unregister(&fbxpanel_class);
+}
+
+subsys_initcall(fbxpanel_class_init);
+module_exit(fbxpanel_class_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Nicolas Schichan <nschichan@freebox.fr>");
+MODULE_DESCRIPTION("Freebox Front Panel Class Device - www.freebox.fr");
+
+EXPORT_SYMBOL_GPL(fbxpanel_register);
+EXPORT_SYMBOL_GPL(fbxpanel_unregister);
diff -Nruw linux-2.6.20.14-fbx/drivers/fbxpanel./fbxpanel_hw_pt6959.c linux-2.6.20.14-fbx/drivers/fbxpanel/fbxpanel_hw_pt6959.c
--- linux-2.6.20.14-fbx/drivers/fbxpanel./fbxpanel_hw_pt6959.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.20.14-fbx/drivers/fbxpanel/fbxpanel_hw_pt6959.c	2010-12-29 19:30:06.781440861 +0100
@@ -0,0 +1,401 @@
+/*
+ * fbxpanel_hw_pt6959.c for linux-freebox
+ * Created by <nschichan@freebox.fr> on Wed Mar 28 15:25:58 2007
+ * Freebox SA
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/fbxspi.h>
+#include <linux/sched.h>
+#include <linux/fbxpanel.h>
+#include <linux/kthread.h>
+
+#include "fbxpanel_hw_pt6959.h"
+
+#define PFX	"fbxpanel_hw_pt6959: "
+
+/*
+ * map ascii characters to digit value
+ */
+static const uint16_t ascii_to_panel[256] =
+{
+	[' '] = DIGIT_L(0, 0, 0, 0, 0, 0, 0, 0) | DIGIT_H(0, 0, 0, 0, 0, 0),
+	['('] = DIGIT_L(0, 0, 0, 1, 0, 0, 0, 0) | DIGIT_H(0, 0, 1, 0, 0, 0),
+	[')'] = DIGIT_L(0, 0, 1, 0, 0, 0, 0, 0) | DIGIT_H(0, 0, 0, 0, 0, 1),
+	['+'] = DIGIT_L(0, 1, 0, 0, 0, 0, 0, 0) | DIGIT_H(0, 1, 0, 1, 0, 0),
+	['-'] = DIGIT_L(0, 1, 0, 0, 0, 0, 0, 0) | DIGIT_H(0, 0, 0, 1, 0, 0),
+	['/'] = DIGIT_L(0, 0, 1, 0, 0, 0, 0, 0) | DIGIT_H(0, 0, 1, 0, 0, 0),
+	['\\'] = DIGIT_L(0, 0, 0, 1, 0, 0, 0, 0) | DIGIT_H(0, 0, 0, 0, 0, 1),
+	['0'] = DIGIT_L(1, 0, 0, 0, 0, 1, 1, 1) | DIGIT_H(1, 0, 0, 0, 1, 0),
+	['1'] = DIGIT_L(0, 0, 0, 0, 0, 0, 0, 1) | DIGIT_H(0, 0, 0, 0, 1, 0),
+	['2'] = DIGIT_L(1, 1, 0, 0, 0, 1, 1, 0) | DIGIT_H(0, 0, 0, 1, 1, 0),
+	['3'] = DIGIT_L(1, 1, 0, 0, 0, 1, 0, 1) | DIGIT_H(0, 0, 0, 1, 1, 0),
+	['4'] = DIGIT_L(0, 1, 0, 0, 0, 0, 0, 1) | DIGIT_H(1, 0, 0, 1, 1, 0),
+	['5'] = DIGIT_L(1, 1, 0, 0, 0, 1, 0, 1) | DIGIT_H(1, 0, 0, 1, 0, 0),
+	['6'] = DIGIT_L(1, 1, 0, 0, 0, 1, 1, 1) | DIGIT_H(1, 0, 0, 1, 0, 0),
+	['7'] = DIGIT_L(1, 0, 0, 0, 0, 0, 0, 1) | DIGIT_H(0, 0, 0, 0, 1, 0),
+	['8'] = DIGIT_L(1, 1, 0, 0, 0, 1, 1, 1) | DIGIT_H(1, 0, 0, 1, 1, 0),
+	['9'] = DIGIT_L(1, 1, 0, 0, 0, 1, 0, 1) | DIGIT_H(1, 0, 0, 1, 1, 0),
+	['<'] = DIGIT_L(0, 0, 0, 1, 0, 0, 0, 0) | DIGIT_H(0, 0, 1, 0, 0, 0),
+	['>'] = DIGIT_L(0, 0, 1, 0, 0, 0, 0, 0) | DIGIT_H(0, 0, 0, 0, 0, 1),
+	['A'] = DIGIT_L(1, 1, 0, 0, 0, 0, 1, 1) | DIGIT_H(1, 0, 0, 1, 1, 0),
+	['B'] = DIGIT_L(1, 0, 0, 0, 0, 1, 0, 1) | DIGIT_H(0, 1, 0, 1, 1, 0),
+	['C'] = DIGIT_L(1, 0, 0, 0, 0, 1, 1, 0) | DIGIT_H(1, 0, 0, 0, 0, 0),
+	['D'] = DIGIT_L(1, 0, 0, 0, 0, 1, 0, 1) | DIGIT_H(0, 1, 0, 0, 1, 0),
+	['E'] = DIGIT_L(1, 1, 0, 0, 0, 1, 1, 0) | DIGIT_H(1, 0, 0, 1, 0, 0),
+	['F'] = DIGIT_L(1, 1, 0, 0, 0, 0, 1, 0) | DIGIT_H(1, 0, 0, 1, 0, 0),
+	['G'] = DIGIT_L(1, 0, 0, 0, 0, 1, 1, 1) | DIGIT_H(1, 0, 0, 1, 0, 0),
+	['H'] = DIGIT_L(0, 1, 0, 0, 0, 0, 1, 1) | DIGIT_H(1, 0, 0, 1, 1, 0),
+	['I'] = DIGIT_L(1, 0, 0, 0, 0, 1, 0, 0) | DIGIT_H(0, 1, 0, 0, 0, 0),
+	['J'] = DIGIT_L(0, 0, 0, 0, 0, 1, 1, 1) | DIGIT_H(0, 0, 0, 0, 1, 0),
+	['K'] = DIGIT_L(0, 1, 0, 1, 0, 0, 1, 0) | DIGIT_H(1, 0, 1, 0, 0, 0),
+	['L'] = DIGIT_L(0, 0, 0, 0, 0, 1, 1, 0) | DIGIT_H(1, 0, 0, 0, 0, 0),
+	['M'] = DIGIT_L(0, 0, 0, 0, 0, 0, 1, 1) | DIGIT_H(1, 0, 1, 0, 1, 1),
+	['N'] = DIGIT_L(0, 0, 0, 1, 0, 0, 1, 1) | DIGIT_H(1, 0, 0, 0, 1, 1),
+	['O'] = DIGIT_L(1, 0, 0, 0, 0, 1, 1, 1) | DIGIT_H(1, 0, 0, 0, 1, 0),
+	['P'] = DIGIT_L(1, 1, 0, 0, 0, 0, 1, 0) | DIGIT_H(1, 0, 0, 1, 1, 0),
+	['Q'] = DIGIT_L(1, 0, 0, 1, 0, 1, 1, 1) | DIGIT_H(1, 0, 0, 0, 1, 0),
+	['R'] = DIGIT_L(1, 1, 0, 1, 0, 0, 1, 0) | DIGIT_H(1, 0, 0, 1, 1, 0),
+	['S'] = DIGIT_L(1, 1, 0, 0, 0, 1, 0, 1) | DIGIT_H(1, 0, 0, 1, 0, 0),
+	['T'] = DIGIT_L(1, 0, 0, 0, 0, 0, 0, 0) | DIGIT_H(0, 1, 0, 0, 0, 0),
+	['U'] = DIGIT_L(0, 0, 0, 0, 0, 1, 1, 1) | DIGIT_H(1, 0, 0, 0, 1, 0),
+	['V'] = DIGIT_L(0, 0, 0, 1, 0, 0, 0, 1) | DIGIT_H(0, 0, 0, 0, 1, 1),
+	['W'] = DIGIT_L(0, 0, 1, 1, 0, 0, 1, 1) | DIGIT_H(1, 0, 0, 0, 1, 0),
+	['X'] = DIGIT_L(0, 0, 1, 1, 0, 0, 0, 0) | DIGIT_H(0, 0, 1, 0, 0, 1),
+	['Y'] = DIGIT_L(0, 1, 0, 0, 0, 1, 0, 1) | DIGIT_H(1, 0, 0, 1, 1, 0),
+	['Z'] = DIGIT_L(1, 0, 1, 0, 0, 1, 0, 0) | DIGIT_H(0, 0, 1, 0, 0, 0),
+	['_'] = DIGIT_L(0, 0, 0, 0, 0, 1, 0, 0) | DIGIT_H(0, 0, 0, 0, 0, 0),
+	['a'] = DIGIT_L(0, 1, 0, 0, 0, 1, 1, 1) | DIGIT_H(0, 0, 0, 1, 0, 0),
+	['b'] = DIGIT_L(0, 1, 0, 0, 0, 1, 1, 1) | DIGIT_H(1, 0, 0, 1, 0, 0),
+	['c'] = DIGIT_L(0, 1, 0, 0, 0, 1, 1, 0) | DIGIT_H(0, 0, 0, 1, 0, 0),
+	['d'] = DIGIT_L(0, 1, 0, 0, 0, 1, 1, 1) | DIGIT_H(0, 0, 0, 1, 1, 0),
+	['e'] = DIGIT_L(1, 1, 0, 0, 0, 1, 1, 0) | DIGIT_H(1, 0, 0, 1, 1, 0),
+	['f'] = DIGIT_L(1, 1, 0, 0, 0, 0, 1, 0) | DIGIT_H(1, 0, 0, 0, 0, 0),
+	['g'] = DIGIT_L(1, 1, 0, 0, 0, 1, 0, 1) | DIGIT_H(1, 0, 0, 1, 1, 0),
+	['h'] = DIGIT_L(0, 1, 0, 0, 0, 0, 1, 1) | DIGIT_H(1, 0, 0, 1, 0, 0),
+	['i'] = DIGIT_L(0, 0, 0, 0, 0, 0, 0, 0) | DIGIT_H(0, 1, 0, 0, 0, 0),
+	['j'] = DIGIT_L(0, 0, 0, 0, 0, 1, 1, 1) | DIGIT_H(0, 0, 0, 0, 1, 0),
+	['k'] = DIGIT_L(0, 0, 0, 1, 0, 0, 0, 0) | DIGIT_H(0, 1, 0, 1, 0, 0),
+	['l'] = DIGIT_L(0, 0, 0, 0, 0, 0, 1, 0) | DIGIT_H(1, 0, 0, 0, 0, 0),
+	['m'] = DIGIT_L(0, 1, 0, 0, 0, 0, 1, 1) | DIGIT_H(0, 1, 0, 1, 0, 0),
+	['n'] = DIGIT_L(0, 1, 0, 0, 0, 0, 1, 1) | DIGIT_H(0, 0, 0, 1, 0, 0),
+	['o'] = DIGIT_L(0, 1, 0, 0, 0, 1, 1, 1) | DIGIT_H(0, 0, 0, 1, 0, 0),
+	['p'] = DIGIT_L(1, 1, 0, 0, 0, 0, 1, 0) | DIGIT_H(1, 0, 0, 1, 1, 0),
+	['q'] = DIGIT_L(1, 1, 0, 0, 0, 0, 0, 1) | DIGIT_H(1, 0, 0, 1, 1, 0),
+	['r'] = DIGIT_L(0, 1, 0, 0, 0, 0, 1, 0) | DIGIT_H(0, 0, 0, 0, 0, 0),
+	['s'] = DIGIT_L(1, 1, 0, 0, 0, 1, 0, 1) | DIGIT_H(1, 0, 0, 1, 0, 0),
+	['t'] = DIGIT_L(0, 1, 0, 0, 0, 1, 1, 0) | DIGIT_H(1, 0, 0, 0, 0, 0),
+	['u'] = DIGIT_L(0, 0, 0, 0, 0, 1, 1, 1) | DIGIT_H(0, 0, 0, 0, 0, 0),
+	['v'] = DIGIT_L(0, 0, 0, 1, 0, 0, 0, 1) | DIGIT_H(0, 0, 0, 0, 0, 0),
+	['w'] = DIGIT_L(0, 0, 1, 1, 0, 0, 1, 1) | DIGIT_H(0, 0, 0, 0, 0, 0),
+	['x'] = DIGIT_L(0, 1, 0, 1, 0, 0, 0, 0) | DIGIT_H(0, 1, 0, 1, 0, 0),
+	['y'] = DIGIT_L(0, 0, 1, 0, 0, 0, 0, 0) | DIGIT_H(0, 0, 1, 0, 0, 1),
+	['z'] = DIGIT_L(0, 1, 0, 0, 0, 1, 0, 0) | DIGIT_H(0, 1, 0, 1, 0, 0),
+	['|'] = DIGIT_L(0, 0, 0, 0, 0, 0, 0, 0) | DIGIT_H(0, 1, 0, 0, 0, 0),
+};
+
+/* #define __LIFA */
+static const int digit_layout[] = {
+#ifdef __LIFA
+	0, 1, 3, 2
+#else
+	3, 2, 1, 0,
+#endif
+};
+
+/*
+ * digit layout.
+ */
+static struct digit_seg_desc digit_seg_desc =
+{
+	.h_top		= (1 << 7),
+	.h_middle_left	= (1 << 6),
+	.h_middle_right	= (1 << 10),
+	.h_bottom	= (1 << 2),
+
+	.v_top_left	= (1 << 13),
+	.v_top_right	= (1 << 9),
+
+	.v_bottom_left	= (1 << 1),
+	.v_bottom_right	= (1 << 0),
+};
+
+int
+fbxpanel_hw_pt6959_set_digit(struct fbxpanel *p, int digit, uint16_t value)
+{
+	uint8_t cmd[4];
+	struct fbxpanel_pt6959_priv *priv;
+	int i;
+	int hw_digit;
+
+	priv = p->priv;
+
+
+	hw_digit = digit_layout[digit];
+	priv->vram[digit] = value;
+
+	for (i = 0; i < 2; ++i) {
+		cmd[0] = CMD_ADDR_SET << CMD_SHIFT;
+		cmd[0] |= hw_digit * 2 + i;
+		cmd[1] = (value >> (8 * i)) & 0xff;
+
+		if (fbxspi_write_then_read(priv->spi_dev, cmd, 2, NULL, 0) < 0)
+			return -EIO;
+	}
+	return 0;
+}
+
+#define BLINK_MASK	(1 << 3)
+#define COLON_DIGIT	1
+
+/*
+ * I really miss the PIC available on fbx5b, where it was possible to
+ * make the colon blink with the help of the pic. since we are on our
+ * own here, do it with a kthread.
+ * </mylife>
+ *
+ * we protect ourself from userland and kanimator using panel->mutex.
+ */
+static int
+blink_kthread(void *data)
+{
+	struct fbxpanel_pt6959_priv *priv;
+	struct fbxpanel *p;
+
+	p = data;
+	priv = p->priv;
+
+	while (!kthread_should_stop()) {
+		uint16_t val;
+		int blink_speed;
+
+		if (down_interruptible(&p->mutex))
+			continue ;
+
+		blink_speed = priv->blink_speed;
+
+		if (blink_speed == 0) {
+			up(&p->mutex);
+			wait_event_interruptible(priv->blink_wq,
+						 priv->blink_speed > 0 ||
+						 kthread_should_stop());
+			continue ;
+		}
+
+
+		if (priv->blink_state)
+			val = priv->vram[COLON_DIGIT] | BLINK_MASK;
+		else
+			val = priv->vram[COLON_DIGIT] & ~BLINK_MASK;
+		priv->blink_state = (priv->blink_state + 1) & 0x1;
+		fbxpanel_hw_pt6959_set_digit(p, COLON_DIGIT, val);
+
+		up(&p->mutex);
+		wait_event_interruptible_timeout(priv->blink_wq,
+						 priv->blink_speed !=
+						 blink_speed ||
+						 kthread_should_stop(),
+						 blink_speed * HZ / 1000);
+	}
+	return 0;
+}
+
+int
+fbxpanel_hw_pt6959_set_colon_digit(struct fbxpanel *p, int enable,
+				   int msec_blink)
+{
+	struct fbxpanel_pt6959_priv *priv;
+
+	priv = p->priv;
+
+	/* no blink or need to disable: handle it there */
+	if (msec_blink == 0 || enable == 0) {
+		uint16_t val;
+		val = priv->vram[COLON_DIGIT];
+		if (enable)
+			val |= BLINK_MASK;
+		else
+			val &= ~BLINK_MASK;
+		fbxpanel_hw_pt6959_set_digit(p, COLON_DIGIT, val);
+	}
+
+	/* change blink speed and wake up blink process anyway */
+	priv->blink_speed = msec_blink;
+	wake_up(&priv->blink_wq);
+	return 0;
+}
+
+int
+fbxpanel_hw_pt6959_probe(struct fbxspi_device *dev)
+{
+	int err;
+	int i;
+	struct fbxpanel *panel = NULL;
+	struct fbxpanel_pt6959_priv *priv = NULL;
+	uint8_t cmd;
+
+	printk(KERN_INFO PFX "probe.\n");
+	panel = kzalloc(sizeof (*panel), GFP_KERNEL);
+	if (panel == NULL) {
+		printk(KERN_ERR PFX "unable to allocate panel structure.\n");
+		err = -ENOMEM;
+		goto out_error;
+	}
+	init_MUTEX(&panel->mutex);
+
+	priv = kzalloc(sizeof (*priv), GFP_KERNEL);
+	if (priv == NULL) {
+		printk(KERN_ERR PFX "unable to allocate private panel "
+		       "structure.\n");
+		err = -ENOMEM;
+		goto out_error;
+	}
+
+	priv->spi_dev = dev;
+
+	panel->priv = priv;
+	panel->name = "fbxo1a_panel";
+	panel->set_digit = fbxpanel_hw_pt6959_set_digit;
+	panel->set_colon_digit = fbxpanel_hw_pt6959_set_colon_digit;
+
+	panel->digit_count = 4;
+	panel->ascii_table = ascii_to_panel;
+	panel->digit_seg_desc = &digit_seg_desc;
+
+	/* perform hardware init of the front panel controler */
+
+	/* set data mode: normal operation, auto increment addresses */
+	cmd = (CMD_DATA_CTL << CMD_SHIFT) | DC_FIXED_ADDRESS;
+	fbxspi_w8(priv->spi_dev, cmd);
+
+	/* set display mode */
+	cmd = (CMD_SET_DISPLAY_MODE << CMD_SHIFT) | DM_4DIGITS_14SEGS;
+	fbxspi_w8(priv->spi_dev, cmd);
+
+	/* clear internal RAM */
+	for (i = 0; i < 0xe; ++i) {
+		uint8_t cmd[2];
+
+		cmd[0] = CMD_ADDR_SET << CMD_SHIFT;
+		cmd[0] |= i;
+		cmd[1] = 0;
+		fbxspi_write_then_read(priv->spi_dev, cmd, 2, NULL, 0);
+	}
+
+	/* power on display */
+	cmd = (CMD_DISPLAY_CTL << CMD_SHIFT) | DISPLAY_ON | PULSE_WIDTH_14_16;
+	fbxspi_w8(priv->spi_dev, cmd);
+
+
+	init_waitqueue_head(&priv->blink_wq);
+	priv->blink_kthread = kthread_create(blink_kthread, panel,
+					     "blink/%s", panel->name);
+	if (IS_ERR(priv->blink_kthread)) {
+		printk(KERN_ERR PFX "unable to start blink kthread\n");
+		err = PTR_ERR(priv->blink_kthread);
+		priv->blink_kthread = NULL;
+		goto out_error;
+	}
+	wake_up_process(priv->blink_kthread);
+
+	err = fbxpanel_register(panel);
+	if (err) {
+		printk(KERN_ERR PFX "unable to register panel.");
+		goto out_kill_blink;
+	}
+
+#if 0
+	i = 0;
+	while (1) {
+		int k;
+		uint16_t val;
+
+		k = (i / 4) % 2;
+		if (k)
+			val = ascii_to_panel['X'];
+		else
+			val = ascii_to_panel['8'];
+		fbxpanel_hw_pt6959_set_digit(panel, 0, 0);
+		fbxpanel_hw_pt6959_set_digit(panel, 1, 0);
+		fbxpanel_hw_pt6959_set_digit(panel, 2, 0);
+		fbxpanel_hw_pt6959_set_digit(panel, 3, 0);
+		fbxpanel_hw_pt6959_set_digit(panel, i % 4, val);
+		i += 1;
+		msleep(1000);
+	}
+#endif
+	dev->drv->drvdata = panel;
+
+	return 0;
+
+ out_kill_blink:
+	kthread_stop(priv->blink_kthread);
+ out_error:
+	if (panel)
+		kfree(panel);
+	if (priv)
+		kfree(priv);
+	return err;
+}
+
+int
+fbxpanel_hw_pt6959_remove(struct fbxspi_device *dev)
+{
+	uint8_t cmd;
+	struct fbxpanel *panel;
+	struct fbxpanel_pt6959_priv *priv;
+	printk(KERN_INFO PFX "remove.\n");
+
+	panel = dev->drv->drvdata;
+	priv = panel->priv;
+
+
+	kthread_stop(priv->blink_kthread);
+
+	/* power off display */
+	cmd = (CMD_DISPLAY_CTL << CMD_SHIFT);
+	fbxspi_w8(priv->spi_dev, cmd);
+
+	fbxpanel_unregister(panel);
+	kfree(panel->priv);
+	kfree(panel);
+
+	return 0;
+}
+
+static struct fbxspi_driver pt6959_driver =
+{
+	.name		= "pt6959",
+	.probe		= fbxpanel_hw_pt6959_probe,
+	.remove		= fbxpanel_hw_pt6959_remove,
+};
+
+static int __init
+fbxpanel_hw_pt6959_init(void)
+{
+	int err;
+
+	printk(KERN_INFO PFX "2007, Freebox SA.\n");
+	err = fbxspi_register_driver(&pt6959_driver);
+	if (err) {
+		printk(KERN_ERR PFX "unable to register fbxspi driver %s.\n",
+		       pt6959_driver.name);
+		return err;
+	}
+	return 0;
+}
+
+static void __exit
+fbxpanel_hw_pt6959_exit(void)
+{
+	fbxspi_unregister_driver(&pt6959_driver);
+}
+
+module_init(fbxpanel_hw_pt6959_init);
+module_exit(fbxpanel_hw_pt6959_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Nicolas Schichan <nschichan@freebox.fr>");
+MODULE_DESCRIPTION("Princeton 6959 Front Panel Driver - www.freebox.fr");
diff -Nruw linux-2.6.20.14-fbx/drivers/fbxpanel./fbxpanel_hw_pt6959.h linux-2.6.20.14-fbx/drivers/fbxpanel/fbxpanel_hw_pt6959.h
--- linux-2.6.20.14-fbx/drivers/fbxpanel./fbxpanel_hw_pt6959.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.20.14-fbx/drivers/fbxpanel/fbxpanel_hw_pt6959.h	2010-12-29 19:30:06.781440861 +0100
@@ -0,0 +1,70 @@
+/*
+ * fbxpanel_hw_pt6959.h for linux-freebox
+ * Created by <nschichan@freebox.fr> on Wed Mar 28 16:50:00 2007
+ * Freebox SA
+ */
+
+#ifndef FBXPANEL_HW_PT6959_H
+# define FBXPANEL_HW_PT6959_H
+
+struct fbxspi_device;
+
+struct fbxpanel_pt6959_priv
+{
+	struct fbxspi_device *spi_dev;
+	uint16_t vram[0xe];
+
+	/* blink stuff */
+	struct task_struct *blink_kthread;
+	wait_queue_head_t blink_wq;
+	int blink_speed;
+	int blink_state; /* 0: off, 1: on */
+};
+
+/* */
+#define CMD_SHIFT	6
+
+/* PT6959 commands */
+#define CMD_SET_DISPLAY_MODE	0x0
+#define CMD_DATA_CTL		0x1
+#define CMD_DISPLAY_CTL		0x2
+#define CMD_ADDR_SET		0x3
+
+
+/* display mode parameters */
+#define DM_4DIGITS_14SEGS	0x0
+#define DM_5DIGITS_13SEGS	0x1
+#define DM_6DIGITS_12SEGS	0x2
+#define DM_7DIGITS_11SEGS	0x3
+
+/* data control parameters */
+#define DC_FIXED_ADDRESS	(1 << 2)
+#define DC_TEST_MODE		(1 << 3)
+
+/* display control parameters */
+#define PULSE_MASK		0x7
+
+#define PULSE_WIDTH_1_16	0x0
+#define PULSE_WIDTH_2_16	0x1
+#define PULSE_WIDTH_4_16	0x2
+#define PULSE_WIDTH_10_16	0x3
+#define PULSE_WIDTH_11_16	0x4
+#define PULSE_WIDTH_12_16	0x5
+#define PULSE_WIDTH_13_16	0x6
+#define PULSE_WIDTH_14_16	0x7
+
+#define DISPLAY_ON		(1 << 3)
+
+/*
+ * borrowed from tango2/fip.c
+ */
+#define DIGIT_L(b7,b6,b5,b4,b3,b2,b1,b0)	((b7 << 7) | (b6 << 6) | \
+						 (b5 << 5) | (b4 << 4) | \
+						 (b3 << 3) | (b2 << 2) | \
+						 (b1 << 1) | b0)
+
+#define DIGIT_H(b5,b4,b3,b2,b1,b0)		(((b5 << 5) | (b4 << 4) | \
+						  (b3 << 3) | (b2 << 2) | \
+						  (b1 << 1) | b0) << 8)
+
+#endif /* !FBXPANEL_HW_PT6959_H */
diff -Nruw linux-2.6.20.14-fbx/drivers/fbxpanel./fbxpanel_priv.h linux-2.6.20.14-fbx/drivers/fbxpanel/fbxpanel_priv.h
--- linux-2.6.20.14-fbx/drivers/fbxpanel./fbxpanel_priv.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.20.14-fbx/drivers/fbxpanel/fbxpanel_priv.h	2010-12-29 19:30:06.781440861 +0100
@@ -0,0 +1,47 @@
+/*
+ * fbxpanel_priv.h for linux-freebox
+ * Created by <nschichan@freebox.fr> on Thu Mar  8 16:51:04 2007
+ * Freebox SA
+ */
+
+#ifndef __FBXPANEL_PRIV_H
+# define __FBXPANEL_PRIV_H
+
+struct fbxpanel;
+
+int fbxpanel_animator_init(struct fbxpanel *p);
+int fbxpanel_animator_exit(struct fbxpanel *p);
+
+/* debug stuff */
+/* #define DEBUG */
+#ifdef DEBUG
+#define dprint(Fmt, Arg...)	printk(PFX Fmt, ##Arg)
+#else
+#define dprint(Fmt, Arg...)	do { } while (0)
+#endif
+
+
+struct anim_frame
+{
+	uint8_t d[4];
+};
+
+struct dev_anim_frame
+{
+	uint16_t d[4];
+};
+
+#define SEG_A	(1 << 6)
+#define SEG_B	(1 << 5)
+#define SEG_C	(1 << 4)
+#define SEG_D	(1 << 3)
+#define SEG_E	(1 << 2)
+#define SEG_F	(1 << 1)
+#define SEG_G	(1 << 0)
+
+int _fbxpanel_set_digit(struct fbxpanel *p, int digit, uint16_t value);
+int _fbxpanel_set_state(struct fbxpanel *p, int digit, int value);
+int _fbxpanel_set_blink_speed(struct fbxpanel *p, uint32_t value);
+
+
+#endif /* !__FBXPANEL_PRIV_H */
diff -Nruw linux-2.6.20.14-fbx/drivers/fbxpanel./Kconfig linux-2.6.20.14-fbx/drivers/fbxpanel/Kconfig
--- linux-2.6.20.14-fbx/drivers/fbxpanel./Kconfig	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.20.14-fbx/drivers/fbxpanel/Kconfig	2010-12-29 19:30:06.781440861 +0100
@@ -0,0 +1,25 @@
+menu "Freebox Panel Support"
+
+config FREEBOX_PANEL
+	tristate "Freebox Panel Management"
+	default n
+
+config FREEBOX_PANEL_HW_PIC_FBX
+	tristate "I2C PIC-based Panel driver."
+	default n
+	depends on FREEBOX_PANEL
+	select I2C
+
+config FREEBOX_PANEL_HW_PT6959
+	tristate "Princeton 6959-based Panel driver."
+	default n
+	depends on FREEBOX_PANEL
+	select FREEBOX_SPI
+
+config FREEBOX_PANEL_HW_PT6311
+	tristate "Princeton 6311-based Panel driver."
+	default n
+	depends on FREEBOX_PANEL
+	select FREEBOX_SPI
+
+endmenu
diff -Nruw linux-2.6.20.14-fbx/drivers/fbxpanel./Makefile linux-2.6.20.14-fbx/drivers/fbxpanel/Makefile
--- linux-2.6.20.14-fbx/drivers/fbxpanel./Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.20.14-fbx/drivers/fbxpanel/Makefile	2010-12-29 19:30:06.781440861 +0100
@@ -0,0 +1,9 @@
+obj-$(CONFIG_FREEBOX_PANEL)	+= fbxpanel.o
+
+fbxpanel-objs = fbxpanel_class.o fbxpanel_anim.o
+
+obj-$(CONFIG_FREEBOX_PANEL_HW_PIC_FBX)	+= fbxpanel_hw_pic_fbx.o
+obj-$(CONFIG_FREEBOX_PANEL_HW_PT6959)	+= fbxpanel_hw_pt6959.o
+obj-$(CONFIG_FREEBOX_PANEL_HW_PT6311)	+= fbxpanel_hw_pt6311.o
+
+EXTRA_CFLAGS += -Werror
diff -Nruw linux-2.6.20.14-fbx/drivers/fbxspi./fbxspi.c linux-2.6.20.14-fbx/drivers/fbxspi/fbxspi.c
--- linux-2.6.20.14-fbx/drivers/fbxspi./fbxspi.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.20.14-fbx/drivers/fbxspi/fbxspi.c	2010-12-29 19:30:06.781440861 +0100
@@ -0,0 +1,287 @@
+/*
+ * fbxspi.c for linux-freebox
+ * Created by <nschichan@freebox.fr> on Tue Mar 13 12:35:58 2007
+ * Freebox SA
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/fbxspi.h>
+
+#define PFX	"fbxspi: "
+
+/* debug stuff */
+/* #define DEBUG */
+#ifdef DEBUG
+#define dprint(Fmt, Arg...)	printk(PFX Fmt, ##Arg)
+#else
+#define dprint(Fmt, Arg...)	do { } while (0)
+#endif
+
+static DECLARE_MUTEX(list_lock);
+
+static LIST_HEAD(device_list);
+static LIST_HEAD(driver_list);
+static LIST_HEAD(master_list);
+
+/*
+ * walk master list and try to match device and drivers. if match is
+ * successfull, call probe on the device. if device is ok for this
+ * particular master, we are done for this device.
+ *
+ * MUST be called with list_lock HELD.
+ *
+ * if the device is unhappy with the selected master, it is the probe
+ * function responsability to return an error so that probe can be
+ * called with another master set in fbxspi_device->master.
+ */
+static void
+__match_and_probe(void)
+{
+	struct list_head *master_cur;
+	struct list_head *device_cur;
+	struct list_head *driver_cur;
+
+	/* walk each master */
+	list_for_each(master_cur, &master_list) {
+		struct fbxspi_master *master;
+
+		master = list_entry(master_cur, struct fbxspi_master, list);
+		dprint("now on master %s\n", master->name);
+
+		/* walk each devices */
+		list_for_each(device_cur, &device_list) {
+			struct fbxspi_device *device;
+
+			device = list_entry(device_cur, struct fbxspi_device,
+					    list);
+			dprint(" now on device %s\n", device->name);
+			if (device->probe_done)
+				/* probe already done */
+				continue ;
+
+			/* set a valid master for probe function */
+			list_for_each(driver_cur, &driver_list) {
+				struct fbxspi_driver *driver;
+
+				driver = list_entry(driver_cur,
+						    struct fbxspi_driver,
+						    list);
+
+				/* set fields for probe function */
+				device->drv = driver;
+				driver->dev = device;
+
+				dprint("  now on driver %s\n",
+				       driver->name);
+				if (!driver->probe)
+					continue ;
+				device->master = master;
+				if (!strncmp(driver->name, device->name,
+					     BUS_ID_SIZE) &&
+				    !driver->probe(device)) {
+					dprint("match %s on master %s\n",
+					       device->name, master->name);
+					device->probe_done = 1;
+					__module_get(master->owner);
+					return;
+				} else {
+					device->drv = NULL;
+					driver->dev = NULL;
+					device->master = NULL;
+				}
+			}
+		}
+	}
+}
+
+/*
+ * check name unicity. add device to the end of the list. match with
+ * drivers if at least one master has been registered.
+ */
+int
+fbxspi_register_device(struct fbxspi_device *dev)
+{
+	struct list_head *l;
+
+	down(&list_lock);
+	list_for_each(l, &device_list) {
+		struct fbxspi_device *d;
+
+		d = list_entry(l, struct fbxspi_device, list);
+		if (!strncmp(d->name, dev->name, BUS_ID_SIZE)) {
+			dprint("spi device %s already exists.\n", dev->name);
+			up(&list_lock);
+			return -EEXIST;
+		}
+	}
+	list_add_tail(&dev->list, &device_list);
+	dev->probe_done = 0;
+	__match_and_probe();
+	up(&list_lock);
+	return 0;
+}
+
+/*
+ * check name unicity. add driver at the end of the list. match with
+ * devices if at least on master has been registered.
+ */
+int
+fbxspi_register_driver(struct fbxspi_driver *drv)
+{
+	struct list_head *l;
+
+	down(&list_lock);
+	list_for_each(l, &driver_list) {
+		struct fbxspi_driver *d;
+
+		d = list_entry(l, struct fbxspi_driver, list);
+		if (!strncmp(d->name, drv->name, BUS_ID_SIZE)) {
+			dprint("spi driver %s already exists.\n", drv->name);
+			up(&list_lock);
+			return -EEXIST;
+		}
+	}
+
+	list_add_tail(&drv->list, &driver_list);
+	__match_and_probe();
+	up(&list_lock);
+	return 0;
+}
+
+/*
+ * check name unicity, add master at the end of the list, try to match
+ * already registered devices and drivers.
+ */
+int
+fbxspi_register_master(struct fbxspi_master *master)
+{
+	struct list_head *l;
+
+	dprint("register_master.\n");
+	spin_lock_init(&master->lock);
+
+	down(&list_lock);
+	list_for_each(l, &master_list) {
+		struct fbxspi_master *m;
+
+		m = list_entry(l, struct fbxspi_master, list);
+		if (!strncmp(m->name, master->name, BUS_ID_SIZE)) {
+			dprint("spi master %s already exists.\n",
+			       master->name);
+			up(&list_lock);
+			return -EEXIST;
+		}
+	}
+	list_add_tail(&master->list, &master_list);
+	__match_and_probe();
+	up(&list_lock);
+	return 0;
+}
+
+/*
+ * unregister an SPI master. __module_get and module_put called after
+ * probing / when unregistering drivers ensures us that there is no
+ * device attached to the master to remove.
+ */
+void
+fbxspi_unregister_master(struct fbxspi_master *master)
+{
+	dprint("unregister_master.\n");
+
+	down(&list_lock);
+	list_del(&master->list);
+	up(&list_lock);
+}
+
+/*
+ * unregister an SPI driver.
+ */
+void
+fbxspi_unregister_driver(struct fbxspi_driver *drv)
+{
+	dprint("unregister_driver.\n");
+
+	down(&list_lock);
+
+	if (drv->remove && drv->dev && drv->dev->probe_done) {
+		module_put(drv->dev->master->owner);
+		drv->remove(drv->dev);
+		drv->dev->probe_done = 0;
+		drv->dev->master = NULL;
+		drv->dev = NULL;
+	}
+
+	list_del(&drv->list);
+	up(&list_lock);
+}
+
+/*
+ * spi transfer functions.
+ */
+int
+fbxspi_write_then_read(struct fbxspi_device *dev,
+		       const uint8_t *tx, uint32_t tx_count,
+		       uint8_t *rx, uint32_t rx_count)
+{
+	struct fbxspi_message msg = {0};
+	int err;
+
+	spin_lock_bh(&dev->master->lock);
+
+	err = dev->master->setup(dev);
+	if (err)
+		goto out;
+
+	if (tx && tx_count) {
+		msg.tx = tx;
+		msg.tx_count = tx_count;
+	}
+	if (rx && rx_count) {
+		msg.rx = rx;
+		msg.rx_count = rx_count;
+	}
+	if (!msg.rx && !msg.tx) {
+		/* nothing to transmit */
+		err = 0;
+		goto out;
+	}
+
+	err = dev->master->transfer(dev, &msg);
+
+	dev->master->cleanup(dev);
+ out:
+	spin_unlock_bh(&dev->master->lock);
+	return err;
+}
+
+
+static int __init
+fbxspi_core_init(void)
+{
+	printk(PFX "2007, Freebox SA\n");
+	return 0;
+}
+
+static void __exit
+fbxspi_core_exit(void)
+{
+}
+
+module_init(fbxspi_core_init);
+module_exit(fbxspi_core_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Nicolas Schichan <nschichan@freebox.fr>");
+MODULE_DESCRIPTION("Freebox SPI core - www.freebox.fr");
+
+EXPORT_SYMBOL(fbxspi_register_driver);
+EXPORT_SYMBOL(fbxspi_register_device);
+EXPORT_SYMBOL(fbxspi_register_master);
+
+EXPORT_SYMBOL(fbxspi_unregister_driver);
+EXPORT_SYMBOL(fbxspi_unregister_master);
+
+EXPORT_SYMBOL(fbxspi_write_then_read);
diff -Nruw linux-2.6.20.14-fbx/drivers/fbxspi./fbxspi_hw_mv88f5181.c linux-2.6.20.14-fbx/drivers/fbxspi/fbxspi_hw_mv88f5181.c
--- linux-2.6.20.14-fbx/drivers/fbxspi./fbxspi_hw_mv88f5181.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.20.14-fbx/drivers/fbxspi/fbxspi_hw_mv88f5181.c	2010-12-29 19:30:06.781440861 +0100
@@ -0,0 +1,306 @@
+/*
+ * fbxspi_hw_mv88f5181.c for linux-freebox
+ * Created by <nschichan@freebox.fr> on Tue Mar 13 15:19:20 2007
+ * Freebox SA
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/fbxspi.h>
+#include <linux/delay.h>
+
+#include <asm/io.h>
+#include <asm/arch/regs.h>
+#include <asm/arch/gpio.h>
+#include <asm/arch/devices.h>
+#include <asm/arch/timer.h>
+
+#include "fbxspi_hw_mv88f5181.h"
+
+# define PFX	"fbxspi_hw_mv88f5181: "
+
+/*
+ * read a register using the ioremapped register area.
+ */
+static inline uint32_t
+fbxspi_hw_readl(struct fbxspi_hw_mv88f5181_priv *priv, uint32_t reg)
+{
+	return readl(priv->io_base + reg - MV_SPI_REGS_BASE);
+}
+
+/*
+ * write a register using the ioremapped register area.
+ */
+static inline void
+fbxspi_hw_writel(struct fbxspi_hw_mv88f5181_priv *priv, uint32_t data,
+	      uint32_t reg)
+{
+	writel(data, priv->io_base + reg - MV_SPI_REGS_BASE);
+}
+
+/*
+ * compute best divisor for device, set chip select.
+ */
+static int
+fbxspi_mv88f5181_setup(struct fbxspi_device *dev)
+{
+	uint32_t val;
+	uint32_t divisor;
+	struct fbxspi_hw_mv88f5181_priv *priv;
+
+	priv = dev->master->priv;
+
+	/* move chip select */
+	if (dev->cs)
+		mv_set_gpio_dataout(priv->spi_gpio_cs, 1);
+	else
+		mv_set_gpio_dataout(priv->spi_gpio_cs, 0);
+
+	if (dev->chip_select_cb)
+		dev->chip_select_cb(dev, 1);
+
+	divisor = tclk_get_rate() / dev->max_speed_hz + 1;
+	if (divisor & 0x1)
+		/* MV-S103480 says divisor must be an even value */
+		++divisor;
+
+	if (divisor > 0xff) {
+		printk(KERN_ERR PFX "%s: divisor %i is too high for "
+		       "required speed of %iMhz.\n", dev->name,
+		       divisor, dev->max_speed_hz);
+		return -EIO;
+	}
+
+	/* set divisor (use low divisor) */
+	val = fbxspi_hw_readl(priv, MV_SPI_CLK_PRESCALAR_REG);
+	val &= ~0xffff; /* clear all divisor */
+	val |= divisor | (divisor << 8); /* set hi & low divisor */
+	fbxspi_hw_writel(priv, val, MV_SPI_CLK_PRESCALAR_REG);
+
+	return 0;
+}
+
+/*
+ * poll MV_SPI_ACTIVE bit in MV_SPI_CTRL_REG.
+ */
+static inline void
+spi_poll(struct fbxspi_master *master)
+{
+	int loop = 50;
+
+	while (loop && fbxspi_hw_readl(master->priv, MV_SPI_CTRL_REG) &
+	       MV_SPI_ACTIVE) {
+		udelay(1);
+		--loop;
+	}
+	if (!loop)
+		printk("SPI ppoll timeout!\n");
+}
+
+/*
+ * write tx buf to spi and read rx buf from spi. due tu hw limitation
+ * we can only write maximum 4 bytes and read maximum 2 bytes.
+ */
+static int
+fbxspi_mv88f5181_transfer(struct fbxspi_device *dev,
+			  struct fbxspi_message *msg)
+{
+	int i;
+	uint32_t cmd_low;
+	uint32_t cmd_high;
+	uint32_t control;
+
+
+	spi_poll(dev->master);
+
+	if (msg->tx_count > 4 || msg->rx_count > 2)
+		/* hw won't allow more than 4 byte to be sent at a
+		   time and more than 2 bytes to be received */
+		return -EMSGSIZE;
+
+	cmd_low = cmd_high = 0;
+	for (i = 0; i < msg->tx_count; ++i) {
+		if (i < 2)
+			cmd_low |= msg->tx[i] << 8 * i;
+		else
+			cmd_high |= msg->tx[i] << 8 * (i - 2);
+	}
+	fbxspi_hw_writel(dev->master->priv, cmd_low, MV_SPI_CODEC_CMD_LO_REG);
+	fbxspi_hw_writel(dev->master->priv, cmd_high, MV_SPI_CODEC_CMD_HI_REG);
+
+	control = MV_SPI_TRANSFER_BYTES(msg->tx_count) |
+		MV_SPI_CLK_SPEED_LO_DIV;
+
+	if (dev->lsb_first)
+		control |= MV_SPI_ENDIANESS_LSB_MODE;
+
+	if (msg->rx_count) {
+		control |= MV_SPI_RD_MODE;
+		control |= MV_SPI_READ_BYTES(msg->rx_count);
+		control |= (0x1FF << MV_SPI_CS_HI_CNT_VAL_RD_OFFS);
+	}
+
+	fbxspi_hw_writel(dev->master->priv, control, MV_SPI_CODEC_CTRL_REG);
+
+	fbxspi_hw_writel(dev->master->priv,
+			 fbxspi_hw_readl(dev->master->priv, MV_SPI_CTRL_REG) |
+			 MV_SPI_ACTIVE,
+			 MV_SPI_CTRL_REG);
+
+	spi_poll(dev->master);
+
+	for (i = 0; i < msg->rx_count; ++i)
+		msg->rx[i] = (fbxspi_hw_readl(dev->master->priv,
+					      MV_SPI_CODEC_READ_DATA_REG)
+			      >> (8 * i)) & 0xff;
+
+
+
+	return 0;
+}
+
+/* not really much to do for now. */
+static int
+fbxspi_mv88f5181_cleanup(struct fbxspi_device *dev)
+{
+	if (dev->chip_select_cb)
+		dev->chip_select_cb(dev, 0);
+	return 0;
+}
+
+static int
+fbxspi_mv88f5181_probe(struct platform_device *pdev)
+{
+	int err;
+	uint32_t val;
+	struct mv88f5181_spi_platform_data *pdat;
+	struct fbxspi_hw_mv88f5181_priv *priv = NULL;
+	struct fbxspi_master *master = NULL;
+	struct resource *res;
+
+	pdat = pdev->dev.platform_data;
+	if (pdat == NULL) {
+		err = -ENODEV;
+		goto out_error;
+	}
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (res == NULL) {
+		err = -ENODEV;
+		goto out_error;
+	}
+
+	master = kzalloc(sizeof (*master), GFP_KERNEL);
+	if (master == NULL) {
+		err = -ENOMEM;
+		goto out_error;
+	}
+
+	priv = kzalloc(sizeof (*priv), GFP_KERNEL);
+	if (priv == NULL) {
+		err = -ENOMEM;
+		goto out_error;
+	}
+
+	priv->spi_gpio_cs = pdat->gpio_cs;
+	priv->io_base = ioremap(res->start, res->end - res->start);
+	if (priv->io_base == NULL) {
+		err = -EIO;
+		goto out_error;
+	}
+
+
+	master->priv = priv;
+	master->num_cs = pdat->num_cs;
+	master->owner = THIS_MODULE;
+	master->setup = fbxspi_mv88f5181_setup;
+	master->transfer = fbxspi_mv88f5181_transfer;
+	master->cleanup = fbxspi_mv88f5181_cleanup;
+	strlcpy(master->name, pdev->name, BUS_ID_SIZE);
+
+
+	err = fbxspi_register_master(master);
+	if (err) {
+		printk(KERN_ERR PFX "unable to register master %s\n",
+		       master->name);
+		goto out_error;
+	}
+
+	platform_set_drvdata(pdev, master);
+
+	/* enable SPI operations */
+	val = fbxspi_hw_readl(priv, MV_SPI_GLOBAL_CTRL_REG);
+	val |= MV_SPI_GLOBAL_ENABLE;
+	fbxspi_hw_writel(priv, val, MV_SPI_GLOBAL_CTRL_REG);
+
+	/* set cs gpio to output */
+	mv_set_gpio_direction(priv->spi_gpio_cs, 0);
+
+	return 0;
+
+ out_error:
+	if (master)
+		kfree(master);
+	if (priv && priv->io_base)
+		iounmap(priv->io_base);
+	if (priv)
+		kfree(priv);
+	return err;
+}
+
+static int
+fbxspi_mv88f5181_remove(struct platform_device *pdev)
+{
+	uint32_t val;
+	struct fbxspi_master *master;
+	struct fbxspi_hw_mv88f5181_priv *priv;
+
+	master = platform_get_drvdata(pdev);
+	if (master == NULL)
+		return -ENODEV;
+	priv = master->priv;
+
+	/* Disable SPI operations */
+	val = fbxspi_hw_readl(priv, MV_SPI_GLOBAL_CTRL_REG);
+	val &= ~MV_SPI_GLOBAL_ENABLE;
+	fbxspi_hw_writel(priv, val, MV_SPI_GLOBAL_CTRL_REG);
+
+	/* unregister & free memory */
+	fbxspi_unregister_master(master);
+	iounmap(priv->io_base);
+	kfree(priv);
+	kfree(master);
+
+	return 0;
+}
+
+struct platform_driver fbxspi_mv88f5181_driver =
+{
+	.probe	= fbxspi_mv88f5181_probe,
+	.remove	= fbxspi_mv88f5181_remove,
+	.driver = {
+		.name	= "spi_mv88f5181",
+		.owner	= THIS_MODULE,
+	},
+};
+
+static int __init
+fbxspi_hw_mv88f5181_init(void)
+{
+	printk(PFX "2007, Freebox SA\n");
+	return platform_driver_register(&fbxspi_mv88f5181_driver);
+}
+
+static void __exit
+fbxspi_hw_mv88f5181_exit(void)
+{
+	platform_driver_unregister(&fbxspi_mv88f5181_driver);
+}
+
+module_init(fbxspi_hw_mv88f5181_init);
+module_exit(fbxspi_hw_mv88f5181_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Nicolas Schichan <nschichan@freebox.fr>");
+MODULE_DESCRIPTION("Freebox SPI Marvell 88f5181 bits - www.freebox.fr");
diff -Nruw linux-2.6.20.14-fbx/drivers/fbxspi./fbxspi_hw_mv88f5181.h linux-2.6.20.14-fbx/drivers/fbxspi/fbxspi_hw_mv88f5181.h
--- linux-2.6.20.14-fbx/drivers/fbxspi./fbxspi_hw_mv88f5181.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.20.14-fbx/drivers/fbxspi/fbxspi_hw_mv88f5181.h	2010-12-29 19:30:06.781440861 +0100
@@ -0,0 +1,16 @@
+/*
+ * fbxspi_hw_mv88f5181.h for linux-freebox
+ * Created by <nschichan@freebox.fr> on Tue Mar 13 15:55:53 2007
+ * Freebox SA
+ */
+
+#ifndef __FBXSPI_HW_MV88F5181_H
+# define __FBXSPI_HW_MV88F5181_H
+
+struct fbxspi_hw_mv88f5181_priv
+{
+	void __iomem	*io_base;
+	int		spi_gpio_cs;
+};
+
+#endif /* !__FBXSPI_HW_MV88F5181_H */
diff -Nruw linux-2.6.20.14-fbx/drivers/fbxspi./Kconfig linux-2.6.20.14-fbx/drivers/fbxspi/Kconfig
--- linux-2.6.20.14-fbx/drivers/fbxspi./Kconfig	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.20.14-fbx/drivers/fbxspi/Kconfig	2010-12-29 19:30:06.781440861 +0100
@@ -0,0 +1,13 @@
+menu "Freebox SPI support"
+
+config FREEBOX_SPI
+	bool "Freebox SPI core"
+	default n
+
+config FREEBOX_SPI_HW_MV88F5181
+	tristate "Support Marvell 88F5181 SPI Hardware."
+	default n
+	depends on FREEBOX_SPI
+	depends on ARCH_MV88FXX81
+
+endmenu
diff -Nruw linux-2.6.20.14-fbx/drivers/fbxspi./Makefile linux-2.6.20.14-fbx/drivers/fbxspi/Makefile
--- linux-2.6.20.14-fbx/drivers/fbxspi./Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.20.14-fbx/drivers/fbxspi/Makefile	2010-12-29 19:30:06.781440861 +0100
@@ -0,0 +1,10 @@
+##
+##  Makefile for linux-freebox
+##  Created by <nschichan@freebox.fr> on Tue Mar 13 12:35:37 2007
+##  Freebox SA
+##
+
+obj-$(CONFIG_FREEBOX_SPI)		+= fbxspi.o
+obj-$(CONFIG_FREEBOX_SPI_HW_MV88F5181)	+= fbxspi_hw_mv88f5181.o
+
+EXTRA_CFLAGS += -Werror
diff -Nruw linux-2.6.20.14-fbx/drivers/fbxwatchdog./fbxwatchdog_core.c linux-2.6.20.14-fbx/drivers/fbxwatchdog/fbxwatchdog_core.c
--- linux-2.6.20.14-fbx/drivers/fbxwatchdog./fbxwatchdog_core.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.20.14-fbx/drivers/fbxwatchdog/fbxwatchdog_core.c	2010-12-29 19:30:06.781440861 +0100
@@ -0,0 +1,328 @@
+/*
+ * fbxwatchdog.c for fbxwatchdog
+ * Created by <nschichan@freebox.fr> on Mon Jun 11 19:26:20 2007
+ * Freebox SA
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/reboot.h>
+#include <linux/timer.h>
+#include <linux/jiffies.h>
+
+#include <linux/fbxwatchdog.h>
+
+#define SOFTTIMER_FREQ	(HZ / 10)
+
+#define PFX "fbxwatchdog: "
+
+static ssize_t
+show_enabled(struct class_device *dev, char *buf)
+{
+	struct fbxwatchdog *wdt;
+
+	wdt = dev->class_data;
+	if (!wdt) {
+		printk(KERN_DEBUG "ignoring request to dead watchdog.\n");
+		return -ENODEV;
+	}
+
+	return snprintf(buf, PAGE_SIZE, "%i\n", wdt->enabled);
+}
+
+/*
+ * start/stop watchdog depending on the value of the first character
+ * of buf. set countdown_min to a sane value.
+ */
+static ssize_t
+store_enabled(struct class_device *dev, const char *buf, size_t count)
+{
+	struct fbxwatchdog *wdt;
+	unsigned long flags;
+
+	wdt = dev->class_data;
+	if (!wdt) {
+		printk(KERN_DEBUG "ignoring request to dead watchdog.\n");
+		return -ENODEV;
+	}
+
+	if (count == 0)
+		return 0;
+
+
+	spin_lock_irqsave(&wdt->lock, flags);
+	switch (*buf) {
+	case '0':
+		if (wdt->enabled) {
+			wdt->enabled = 0;
+			wdt->wdt_stop(wdt);
+		}
+		break;
+
+	case '1':
+		if (!wdt->enabled) {
+			wdt->enabled = 1;
+			wdt->wdt_start(wdt);
+			wdt->countdown_min = INT_MAX;
+		}
+		break;
+
+	default:
+		break;
+	}
+	spin_unlock_irqrestore(&wdt->lock, flags);
+
+	return count;
+}
+
+static ssize_t
+show_countdown(struct class_device *dev, char *buf)
+{
+	struct fbxwatchdog *wdt;
+
+	wdt = dev->class_data;
+	if (!wdt) {
+		printk(KERN_DEBUG "ignoring request to dead watchdog.\n");
+		return -ENODEV;
+	}
+
+	return snprintf(buf, PAGE_SIZE, "%i\n", wdt->countdown);
+}
+
+/*
+ * update watchdog countdown with the userland value given in buf.
+ */
+static ssize_t
+store_countdown(struct class_device *dev, const char *buf, size_t count)
+{
+	struct fbxwatchdog *wdt;
+	int countdown;
+	char *ptr;
+
+	wdt = dev->class_data;
+	if (!wdt) {
+		printk(KERN_DEBUG "ignoring request to dead watchdog.\n");
+		return -ENODEV;
+	}
+
+	if (count == 0)
+		return 0;
+
+	ptr = kzalloc(count + 1, GFP_KERNEL);
+	if (!ptr)
+		return -ENOMEM;
+	strlcpy(ptr, buf, count + 1);
+
+	countdown = simple_strtoul(ptr, NULL, 10);
+	wdt->countdown = countdown;
+	kfree(ptr);
+
+	return count;
+}
+
+static ssize_t
+show_countdown_min(struct class_device *dev, char *buf)
+{
+	struct fbxwatchdog *wdt;
+
+	wdt = dev->class_data;
+	if (!wdt) {
+		printk(KERN_DEBUG "ignoring request to dead watchdog.\n");
+		return -ENODEV;
+	}
+
+	return snprintf(buf, PAGE_SIZE, "%i\n", wdt->countdown_min);
+}
+
+static struct class_device_attribute fbxwatchdog_attributes[] = {
+	__ATTR(enabled, 0600, show_enabled, store_enabled),
+	__ATTR(countdown, 0600, show_countdown, store_countdown),
+	__ATTR(countdown_min, 0400, show_countdown_min, NULL),
+	{ },
+};
+
+static struct class fbxwatchdog_class =
+{
+	.name			= "fbxwatchdog",
+	.owner			= THIS_MODULE,
+	.class_dev_attrs	= fbxwatchdog_attributes,
+};
+
+/*
+ * software timer callback: decrement countdown and update
+ * countdown_min if needed. this is called 10 times per second.
+ */
+static void fbxwatchdog_timer_cb(unsigned long data)
+{
+	struct fbxwatchdog *wdt;
+
+	wdt = (struct fbxwatchdog *)data;
+
+	if (wdt->enabled) {
+		wdt->countdown -= jiffies_to_msecs(SOFTTIMER_FREQ);
+		if (wdt->countdown < wdt->countdown_min)
+			wdt->countdown_min = wdt->countdown;
+	}
+
+	wdt->timer.expires = jiffies + SOFTTIMER_FREQ;
+	add_timer(&wdt->timer);
+}
+
+/*
+ * called from half life interrupt handler, panic if countdown is too
+ * low (ie if userland has not reset countdown to before it reached
+ * 0).
+ */
+static void fbxwatchdog_halflife_cb(struct fbxwatchdog *wdt)
+{
+	if (wdt->countdown <= 0) {
+		wdt->wdt_stop(wdt);
+		panic("software fbxwatchdog triggered");
+	}
+}
+
+/*
+ * called when there are no reference on the class device. only free
+ * dev, most of the cleanup stuff has alread been done in the
+ * fbxwatchdog_unregister.
+ */
+static void
+fbxwatchdog_release(struct class_device *dev)
+{
+	printk(KERN_DEBUG PFX "releasing dead device: %s\n", dev->class_id);
+	kfree(dev);
+}
+
+/*
+ * register a new watchdog device.
+ */
+int
+fbxwatchdog_register(struct fbxwatchdog *wdt)
+{
+	int err = 0;
+
+	if (wdt == NULL)
+		return -EFAULT;
+
+	printk(KERN_INFO PFX "registering watchdog %s\n", wdt->name);
+
+	wdt->class_dev = kzalloc(sizeof (struct class_device), GFP_KERNEL);
+	if (!wdt->class_dev) {
+		printk(KERN_ERR PFX "unable to allocate class device.\n");
+		err = -ENOMEM;
+		goto out_error;
+	}
+
+	class_device_initialize(wdt->class_dev);
+	wdt->class_dev->class = &fbxwatchdog_class;
+	wdt->class_dev->class_data = wdt;
+	wdt->class_dev->release = fbxwatchdog_release;
+	strlcpy(wdt->class_dev->class_id, wdt->name, BUS_ID_SIZE);
+
+	err = class_device_add(wdt->class_dev);
+	if (err) {
+		printk(KERN_ERR PFX "unable to register class device %s\n",
+		       wdt->name);
+		goto out_error;
+	}
+
+	/* start countdown soft timer */
+	init_timer(&wdt->timer);
+	wdt->timer.function = fbxwatchdog_timer_cb;
+	wdt->timer.data = (unsigned long)wdt;
+	wdt->timer.expires = jiffies + SOFTTIMER_FREQ;
+	add_timer(&wdt->timer);
+
+	spin_lock_init(&wdt->lock);
+
+	wdt->cb = fbxwatchdog_halflife_cb;
+	err = wdt->wdt_init(wdt);
+	if (err) {
+		printk(KERN_ERR PFX "unable to do low level init of "
+		       "watchdog %s.\n", wdt->name);
+		goto out_del_timer;
+	}
+
+#ifdef CONFIG_FREEBOX_WATCHDOG_CHAR
+	err = fbxwatchdog_char_add(wdt);
+	if (err) {
+		printk(KERN_ERR PFX "unable to add %s to the fbxwatchdog char "
+		       "device interface.\n", wdt->name);
+		goto out_wdt_cleanup;
+	}
+#endif
+
+	return 0;
+
+#ifdef CONFIG_FREEBOX_WATCHDOG_CHAR
+ out_wdt_cleanup:
+	wdt->wdt_cleanup(wdt);
+#endif
+
+ out_del_timer:
+	del_timer_sync(&wdt->timer);
+	class_device_del(wdt->class_dev);
+	class_device_put(wdt->class_dev);
+ out_error:
+	if (wdt->class_dev)
+		kfree(wdt);
+	return err;
+}
+
+int
+fbxwatchdog_unregister(struct fbxwatchdog *wdt)
+{
+	printk(KERN_INFO PFX "registering watchdog %s\n", wdt->name);
+
+	if (wdt->enabled) {
+		unsigned long flags;
+
+		printk(KERN_WARNING "removing enabled watchdog.\n");
+		spin_lock_irqsave(&wdt->lock, flags);
+		wdt->wdt_stop(wdt);
+		spin_unlock_irqrestore(&wdt->lock, flags);
+	}
+
+#ifdef CONFIG_FREEBOX_WATCHDOG_CHAR
+	fbxwatchdog_char_remove(wdt);
+#endif
+	wdt->wdt_cleanup(wdt);
+	del_timer_sync(&wdt->timer);
+	class_device_del(wdt->class_dev);
+	class_device_put(wdt->class_dev);
+	wdt->class_dev->class_data = NULL;
+	return 0;
+}
+
+static int __init
+fbxwatchdog_init(void)
+{
+	int err;
+
+	printk(KERN_INFO PFX "2007, Freebox SA.\n");
+	err = class_register(&fbxwatchdog_class);
+	if (err) {
+		printk(KERN_ERR PFX "");
+		return -1;
+	}
+
+	return 0;
+}
+
+static void __exit
+fbxwatchdog_exit(void)
+{
+	class_unregister(&fbxwatchdog_class);
+}
+
+
+EXPORT_SYMBOL_GPL(fbxwatchdog_register);
+EXPORT_SYMBOL_GPL(fbxwatchdog_unregister);
+
+module_init(fbxwatchdog_init);
+module_exit(fbxwatchdog_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Nicolas Schichan <nschichan@freebox.fr>");
+MODULE_DESCRIPTION("Freebox Watchdog Core - www.freebox.fr");
diff -Nruw linux-2.6.20.14-fbx/drivers/fbxwatchdog./fbxwatchdog_mv88f5181.c linux-2.6.20.14-fbx/drivers/fbxwatchdog/fbxwatchdog_mv88f5181.c
--- linux-2.6.20.14-fbx/drivers/fbxwatchdog./fbxwatchdog_mv88f5181.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.20.14-fbx/drivers/fbxwatchdog/fbxwatchdog_mv88f5181.c	2010-12-29 19:30:06.781440861 +0100
@@ -0,0 +1,236 @@
+/*
+ * fbxwatchdog_mv88f5181.c for fbxwatchdog
+ * Created by <nschichan@freebox.fr> on Mon Jun 11 21:15:46 2007
+ * Freebox SA
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/timer.h>
+#include <linux/jiffies.h>
+#include <linux/fbxwatchdog.h>
+
+#include <asm/arch/timer.h>
+#include <asm/arch/regs.h>
+#include <asm/arch/io.h>
+
+#define PFX "fbxwatchdog_mv88f5181: "
+
+struct fbxwatchdog_mv88f5181_priv
+{
+	void __iomem *base;
+	struct timer_list half_life_timer;
+};
+
+/*
+ * mv88f5181 does not trigger interrupts each times the watchdog
+ * reaches the half of it's count down. we emulate this behaviour
+ * using a linux timer that fires every 500 msec.
+ */
+static void
+half_life_timer_cb(unsigned long data)
+{
+	struct fbxwatchdog *wdt;
+	struct fbxwatchdog_mv88f5181_priv *priv;
+	unsigned long flags;
+
+	wdt = (struct fbxwatchdog *)data;
+	priv = wdt->priv;
+
+	spin_lock_irqsave(&wdt->lock, flags);
+
+	timer_load_counter(TIMERWDT, tclk_get_rate());
+
+	if (wdt->cb)
+		wdt->cb(wdt);
+
+	priv->half_life_timer.expires = jiffies + HZ / 2;
+	add_timer(&priv->half_life_timer);
+
+	spin_unlock_irqrestore(&wdt->lock, flags);
+}
+
+/*
+ * setup half life timer.
+ */
+int
+mv88f5181_wdt_init(struct fbxwatchdog *wdt)
+{
+	struct fbxwatchdog_mv88f5181_priv *priv;
+
+	priv = wdt->priv;
+	init_timer(&priv->half_life_timer);
+	priv->half_life_timer.function = half_life_timer_cb;
+	priv->half_life_timer.data = (unsigned long)wdt;
+	return 0;
+}
+
+int
+mv88f5181_wdt_cleanup(struct fbxwatchdog *wdt)
+{
+	return 0;
+}
+
+int
+mv88f5181_wdt_start(struct fbxwatchdog *wdt)
+{
+	uint32_t val;
+	struct fbxwatchdog_mv88f5181_priv *priv;
+
+	printk(KERN_INFO PFX "starting watchdog ...\n");
+
+	priv = wdt->priv;
+	val = mv_readl(MV_TIMER_CTL_REG);
+	if (val & (1 << (2 * TIMERWDT))) {
+		printk(KERN_WARNING PFX "watchdog has been enabled by "
+		       "bootloader.!\n");
+	} else {
+		printk(KERN_DEBUG PFX "watchdog default boot state OK.\n");
+	}
+
+	timer_load_counter(TIMERWDT, tclk_get_rate());
+	timer_enable(TIMERWDT, 0);
+
+	/*
+	 * thy shall die here with great pain and suffer if thy
+	 * bootloader make the watchdog timer underflow for some
+	 * reason.
+	 */
+	val = mv_readl(MV_CPU_RSTOUTN_MASK_REG);
+	val |= (1 << 1);
+	mv_writel(val, MV_CPU_RSTOUTN_MASK_REG);
+
+	/* watchdog will blow up after 1 second if not refreshed */
+	timer_load_counter(TIMERWDT, tclk_get_rate());
+	timer_enable(TIMERWDT, 1);
+
+	/* will fire every 500 ms */
+	priv->half_life_timer.expires = jiffies + HZ / 2;
+	add_timer(&priv->half_life_timer);
+
+	return 0;
+}
+
+int
+mv88f5181_wdt_stop(struct fbxwatchdog *wdt)
+{
+	uint32_t val;
+	struct fbxwatchdog_mv88f5181_priv *priv = NULL;
+
+	printk(KERN_INFO PFX "stopping watchdog ...\n");
+
+	priv = wdt->priv;
+	del_timer_sync(&priv->half_life_timer);
+
+	timer_enable(TIMERWDT, 0);
+	val = mv_readl(MV_CPU_RSTOUTN_MASK_REG);
+	val &= ~(1 << 1);
+	mv_writel(val, MV_CPU_RSTOUTN_MASK_REG);
+
+	return 0;
+}
+
+static int
+fbxwatchdog_platform_probe(struct platform_device *pdev)
+{
+	struct fbxwatchdog *wdt = NULL;
+	struct fbxwatchdog_mv88f5181_priv *priv = NULL;
+	int err = 0;
+
+	printk(PFX "probe\n");
+
+	wdt = kzalloc(sizeof (*wdt), GFP_KERNEL);
+	if (!wdt) {
+		printk(KERN_ERR PFX "unable allocate memory for watchdog.\n");
+		err = -ENOMEM;
+		goto out_error;
+	}
+
+	priv = kzalloc(sizeof (*priv), GFP_KERNEL);
+	if (!priv) {
+		printk(KERN_ERR PFX "unable to allocate memory for private "
+		       "structure.\n");
+		err = -ENOMEM;
+		goto out_error;
+	}
+
+	wdt->priv = priv;
+	wdt->name = pdev->name;
+
+	wdt->wdt_init = mv88f5181_wdt_init;
+	wdt->wdt_cleanup = mv88f5181_wdt_cleanup;
+	wdt->wdt_start = mv88f5181_wdt_start;
+	wdt->wdt_stop = mv88f5181_wdt_stop;
+
+	err = fbxwatchdog_register(wdt);
+	if (err) {
+		printk(KERN_ERR PFX "unable to register watchdog %s\n",
+		       wdt->name);
+		goto out_error;
+	}
+
+	platform_set_drvdata(pdev, wdt);
+
+	return 0;
+
+ out_error:
+	if (wdt)
+		kfree(wdt);
+	if (priv)
+		kfree(priv);
+	return err;
+}
+
+/*
+ * unregister and free memory allocated by the probe function.
+ */
+static int
+fbxwatchdog_platform_remove(struct platform_device *pdev)
+{
+	struct fbxwatchdog *wdt;
+
+	printk(PFX "remove\n");
+	wdt = platform_get_drvdata(pdev);
+	if (!wdt) {
+		BUG();
+		return -ENODEV;
+	}
+
+	fbxwatchdog_unregister(wdt);
+	kfree(wdt->priv);
+	kfree(wdt);
+
+	return 0;
+}
+
+struct platform_driver fbxwatchdog_platform_driver =
+{
+	.probe	= fbxwatchdog_platform_probe,
+	.remove	= fbxwatchdog_platform_remove,
+	.driver	= {
+		.name	= "mv88f5181_wdt",
+	}
+};
+
+static int __init
+fbxwatchdog_mv88f5181_init(void)
+{
+	printk(KERN_INFO PFX "2007, Freebox SA.\n");
+	platform_driver_register(&fbxwatchdog_platform_driver);
+	return 0;
+}
+
+static void __exit
+fbxwatchdog_mv88f5181_exit(void)
+{
+	platform_driver_unregister(&fbxwatchdog_platform_driver);
+}
+
+module_init(fbxwatchdog_mv88f5181_init);
+module_exit(fbxwatchdog_mv88f5181_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Nicolas Schichan <nschichan@freebox.fr>");
+MODULE_DESCRIPTION("Freebox Watchdog, mv88f5181 specific bits - www.freebox.fr");
+
diff -Nruw linux-2.6.20.14-fbx/drivers/fbxwatchdog./Kconfig linux-2.6.20.14-fbx/drivers/fbxwatchdog/Kconfig
--- linux-2.6.20.14-fbx/drivers/fbxwatchdog./Kconfig	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.20.14-fbx/drivers/fbxwatchdog/Kconfig	2010-12-29 19:30:06.781440861 +0100
@@ -0,0 +1,24 @@
+menu "Freebox Watchdog Support"
+
+config FREEBOX_WATCHDOG
+	tristate "Freebox Watchdog"
+	default n
+
+config FREEBOX_WATCHDOG_CHAR
+	bool "Freebox Watchdog char device interface."
+	depends on FREEBOX_WATCHDOG
+	default n
+
+config FREEBOX_WATCHDOG_MV88F5181
+	tristate "Marvell 88f5181 Freebox Watchdog support"
+	depends on FREEBOX_WATCHDOG
+	depends on ARCH_MV88FXX81
+	default n
+
+config FREEBOX_WATCHDOG_BCM963XX
+	tristate "Broadcom 963xx Freebox Watchdog support"
+	depends on FREEBOX_WATCHDOG
+	depends on BCM963XX
+	default n
+
+endmenu
diff -Nruw linux-2.6.20.14-fbx/drivers/fbxwatchdog./Makefile linux-2.6.20.14-fbx/drivers/fbxwatchdog/Makefile
--- linux-2.6.20.14-fbx/drivers/fbxwatchdog./Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.20.14-fbx/drivers/fbxwatchdog/Makefile	2010-12-29 19:30:06.781440861 +0100
@@ -0,0 +1,10 @@
+
+obj-$(CONFIG_FREEBOX_WATCHDOG) += fbxwatchdog.o
+
+fbxwatchdog-objs = fbxwatchdog_core.o
+ifeq ($(CONFIG_FREEBOX_WATCHDOG_CHAR),y)
+fbxwatchdog-objs += fbxwatchdog_char.o
+endif
+
+obj-$(CONFIG_FREEBOX_WATCHDOG_MV88F5181) += fbxwatchdog_mv88f5181.o
+obj-$(CONFIG_FREEBOX_WATCHDOG_BCM963XX)	+= fbxwatchdog_bcm963xx.o
diff -Nruw linux-2.6.20.14-fbx/drivers/media/dvb/tango2./Kconfig linux-2.6.20.14-fbx/drivers/media/dvb/tango2/Kconfig
--- linux-2.6.20.14-fbx/drivers/media/dvb/tango2./Kconfig	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.20.14-fbx/drivers/media/dvb/tango2/Kconfig	2010-12-29 19:30:06.961441087 +0100
@@ -0,0 +1,11 @@
+config DVB_TANGO2
+	tristate "Tango2 DVB adapter"
+	depends on DVB_CORE && ARCH_FBX5_B
+	select I2C
+	select I2C_ALGOBIT
+	select DVB_TDA1004X
+	select DVB_PLL
+
+config DVB_TANGO2_TESTBED
+	bool "extended testing and useful error codes"
+	depends on DVB_TANGO2
diff -Nruw linux-2.6.20.14-fbx/drivers/media/dvb/tango2./Makefile linux-2.6.20.14-fbx/drivers/media/dvb/tango2/Makefile
--- linux-2.6.20.14-fbx/drivers/media/dvb/tango2./Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.20.14-fbx/drivers/media/dvb/tango2/Makefile	2010-12-29 19:30:06.961441087 +0100
@@ -0,0 +1,5 @@
+obj-$(CONFIG_DVB_TANGO2) = tango2_dvb.o
+
+tango2_dvb-objs	:= tango2.o
+
+EXTRA_CFLAGS = -Idrivers/media/dvb/dvb-core/ -Idrivers/media/dvb/frontends/
diff -Nruw linux-2.6.20.14-fbx/drivers/misc/bcm963xx_dsl./Makefile linux-2.6.20.14-fbx/drivers/misc/bcm963xx_dsl/Makefile
--- linux-2.6.20.14-fbx/drivers/misc/bcm963xx_dsl./Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.20.14-fbx/drivers/misc/bcm963xx_dsl/Makefile	2010-12-29 19:30:07.031445324 +0100
@@ -0,0 +1,13 @@
+obj-$(CONFIG_BCM963XX_DSL_ALT) += bcm963xx_dsl.o
+
+bcm963xx_dsl-objs +=	bcm_dsl.o		\
+			bcm_dsl_cdata.o		\
+			bcm_dsl_ovhdata.o
+
+bcm963xx_dsl-objs +=	adsl_fbxprocfs.o
+
+bcm963xx_dsl-objs +=	bcm_dsl_debug.o
+bcm963xx_dsl-objs +=	phy_strings.o
+bcm963xx_dsl-objs +=	g992_strings.o
+
+bcm963xx_dsl-objs +=	hdlc.o
--- /dev/null	2011-06-03 14:51:38.633053002 +0200
+++ linux-2.6.20.14-fbx/drivers/misc/crash_zone.c	2010-12-29 19:30:07.031445324 +0100
@@ -0,0 +1,145 @@
+#include <linux/init.h>
+#include <linux/crash_zone.h>
+#include <linux/proc_fs.h>
+#include <asm/checksum.h>
+
+static unsigned char *dead_zone = NULL;
+static unsigned int dead_zone_size;
+
+#define CRASH_DATA_SIZE	(dead_zone_size - sizeof (struct crash_zone))
+
+static inline unsigned short int csum_data(const unsigned char *buff, int len)
+{
+	return csum_fold(csum_partial(buff, len, 0));
+}
+
+static int crash_zone_read_proc(char *page, char **start, off_t off,
+				int count, int *eof, void *data)
+{
+	struct crash_header *crash_zone;
+	int datalen;
+
+	/* check crash zone */
+	if (!dead_zone) {
+		printk(KERN_DEBUG "dead zone unset\n");
+		goto empty;
+	}
+
+	crash_zone = (struct crash_header *)dead_zone;
+	if (crash_zone->magic != CRASH_MAGIC) {
+		printk(KERN_DEBUG "bad crash zone magic\n");
+		goto empty;
+	}
+
+	if (crash_zone->len > dead_zone_size) {
+		printk(KERN_DEBUG "bad crash zone len\n");
+		goto empty;
+	}
+
+	if (csum_data((unsigned char *)crash_zone, crash_zone->len)) {
+		printk(KERN_DEBUG "bad crash zone checksum\n");
+		goto empty;
+	}
+
+	/* copy crash data */
+	datalen = crash_zone->len - sizeof (struct crash_header);
+	if (off + count >= datalen) {
+		*eof = 1;
+		count = datalen - off;
+	}
+
+	*start = page;
+	if (count > 0) {
+		memcpy(page, &crash_zone->data + off, count);
+		return count;
+	}
+
+	return 0;
+
+empty:
+	*eof = 1;
+	return 0;
+}
+
+static int crash_zone_write_proc(struct file *file, const char *buffer,
+				 unsigned long count, void *data)
+{
+	struct crash_header *crash_zone;
+	int len;
+
+	if (!dead_zone)
+		return count;
+
+	/* empty the crash zone */
+	crash_zone = (struct crash_header *)dead_zone;
+	len = sizeof (struct crash_header);
+	crash_zone->magic = CRASH_MAGIC;
+	crash_zone->len = len;
+	crash_zone->checksum = 0;
+	crash_zone->checksum = csum_data((unsigned char *)crash_zone, len);
+
+	return count;
+}
+
+static int crash_zone_panic_event(struct notifier_block *self,
+				  unsigned long event, void *data)
+{
+	struct crash_header *crash_zone;
+	int data_len, len;
+
+	if (!dead_zone)
+		return NOTIFY_DONE;
+
+	crash_zone = (struct crash_header *)dead_zone;
+
+	/* copy current kernel log into crash zone */
+	data_len = dead_zone_size - sizeof (struct crash_header);
+	console_emergency_dump(&crash_zone->data, &data_len);
+
+	/* checksum area */
+	len = data_len + sizeof (struct crash_header);
+	crash_zone->magic = CRASH_MAGIC;
+	crash_zone->len = len;
+	crash_zone->checksum = 0;
+	crash_zone->checksum = csum_data((unsigned char *)crash_zone, len);
+	printk("Panic log saved in crash zone\n");
+
+        return NOTIFY_DONE;
+}
+
+static struct notifier_block crash_panic_block = {
+	crash_zone_panic_event,
+        NULL,
+        INT_MAX /* try to do it first */
+};
+
+void __init crash_zone_set_param(unsigned char *zone, unsigned int size)
+{
+	dead_zone = zone;
+	dead_zone_size = size;
+}
+
+static int __init crash_zone_init(void)
+{
+	struct proc_dir_entry	*proc;
+
+	/* create crash proc entry */
+	proc = create_proc_entry("crash_zone", 0, NULL);
+	if (!proc)
+		return 1;
+	proc->read_proc  = crash_zone_read_proc;
+	proc->write_proc = crash_zone_write_proc;
+
+	/* register panic notifier */
+	atomic_notifier_chain_register(&panic_notifier_list,
+				       &crash_panic_block);
+	return 0;
+}
+
+static void __exit crash_zone_exit(void)
+{
+
+}
+
+module_init(crash_zone_init);
+module_exit(crash_zone_exit);
--- /dev/null	2011-06-03 14:51:38.633053002 +0200
+++ linux-2.6.20.14-fbx/drivers/net/mv88fxx81_eth.c	2010-12-29 19:30:07.131445252 +0100
@@ -0,0 +1,2194 @@
+/*
+ * Ethernet driver for gigabit ethernet mac in Marvell 88Fxx81 SOC
+ *
+ * This driver uses NAPI and interrupt coalescing.
+ *
+ * The driver won't  do any phy operation (probing,  polling, ...)  if
+ * it's configured to ignore the phy. This behaviour can be configured
+ * only at initialization  via platform data.  Note that  the phy code
+ * has _not_ been tested.
+ *
+ *
+ * Copyright (C) 2006 Maxime Bizon <mbizon@freebox.fr>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/etherdevice.h>
+#include <linux/delay.h>
+#include <linux/ethtool.h>
+#include <linux/dma-mapping.h>
+#include <linux/platform_device.h>
+
+#include <linux/net.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <linux/tcp.h>
+
+#include <asm/io.h>
+#include <asm/arch/regs.h>
+#include <asm/arch/timer.h>
+
+#include "mv88fxx81_eth.h"
+
+#define PFX	"mv88fxx81_eth: "
+
+
+static char mv88fxx81_driver_name[] = "mv88fxx81_eth";
+static char mv88fxx81_driver_version[] = "1.0";
+
+/*
+ * read data from mdio
+ */
+static void eth_port_read_smi_reg(struct mv88fxx81_private *priv,
+				  unsigned int phy_addr,
+				  unsigned int phy_reg,
+				  unsigned int *value)
+{
+	int i;
+
+	/* wait for the SMI register to become available */
+	for (i = 0; mv_eth_read(priv, MV_ETH_SMI_REG) & ETH_SMI_BUSY; i++) {
+		if (i == PHY_WAIT_ITERATIONS) {
+			printk(KERN_ERR PFX "PHY busy timeout, port %d\n",
+			       priv->port_num);
+			return;
+		}
+		udelay(PHY_WAIT_MICRO_SECONDS);
+	}
+
+	mv_eth_write(priv, MV_ETH_SMI_REG,
+		     (phy_addr << 16) | (phy_reg << 21) | ETH_SMI_OPCODE_READ);
+
+	/* now wait for the data to be valid */
+	for (i = 0; !(mv_eth_read(priv, MV_ETH_SMI_REG) &
+		      ETH_SMI_READ_VALID); i++) {
+		if (i == PHY_WAIT_ITERATIONS) {
+			printk(KERN_ERR PFX "PHY read timeout, port %d\n",
+			       priv->port_num);
+			return;
+		}
+		udelay(PHY_WAIT_MICRO_SECONDS);
+	}
+
+	*value = mv_eth_read(priv, MV_ETH_SMI_REG) & 0xffff;
+}
+
+/*
+ * write given data to given address on mdio
+ */
+static void eth_port_write_smi_reg(struct mv88fxx81_private *priv,
+				   unsigned int phy_addr,
+				   unsigned int phy_reg,
+				   unsigned int value)
+{
+	int i;
+
+	/* wait for the SMI register to become available */
+	for (i = 0; mv_eth_read(priv, MV_ETH_SMI_REG) & ETH_SMI_BUSY; i++) {
+		if (i == PHY_WAIT_ITERATIONS) {
+			printk(KERN_ERR PFX "PHY busy timeout, port %d\n",
+			       priv->port_num);
+			return;
+		}
+		udelay(PHY_WAIT_MICRO_SECONDS);
+	}
+
+	mv_eth_write(priv, MV_ETH_SMI_REG, (phy_addr << 16) | (phy_reg << 21) |
+		     ETH_SMI_OPCODE_WRITE | (value & 0xffff));
+}
+
+/*
+ * look for a phy
+ */
+static int ethernet_phy_detect(struct mv88fxx81_private *priv)
+{
+	unsigned int val;
+	int auto_neg, phy_addr;
+
+	phy_addr = priv->mii.phy_id;
+
+	/* invert autoneg settings and see if the change take effect */
+	eth_port_read_smi_reg(priv, phy_addr, MII_BMCR, &val);
+	auto_neg = val & BMCR_ANENABLE;
+	val ^= BMCR_ANENABLE;
+	eth_port_write_smi_reg(priv, phy_addr, MII_BMCR, val);
+
+	eth_port_read_smi_reg(priv, phy_addr, MII_BMCR, &val);
+	if ((val & BMCR_ANENABLE) == auto_neg)
+		return -ENODEV;
+
+	/* restore original settings */
+	val ^= 0x1000;
+	eth_port_write_smi_reg(priv, phy_addr, MII_BMCR, val);
+
+	return 0;
+}
+
+/*
+ * reset ethernet port PHY.
+ */
+static void ethernet_phy_reset(struct mv88fxx81_private *priv)
+{
+	unsigned int val, loop;
+	int phy_addr;
+
+	phy_addr = priv->mii.phy_id;
+
+	/* Reset the PHY */
+	eth_port_read_smi_reg(priv, phy_addr, MII_BMCR, &val);
+	val |= BMCR_RESET;
+	eth_port_write_smi_reg(priv, phy_addr, MII_BMCR, val);
+	udelay(100);
+
+	/* wait for PHY to come out of reset (1s max) */
+	loop = 1000;
+	while (loop) {
+		eth_port_read_smi_reg(priv, phy_addr, MII_BMCR, &val);
+		if (!(val & BMCR_RESET))
+			break;
+
+		msleep(1);
+		loop--;
+	}
+}
+
+/*
+ * enable tx operation
+ */
+static void mv88fxx81_eth_port_enable_tx(struct mv88fxx81_private *priv)
+{
+	mv_eth_write(priv, MV_ETH_TRANSMIT_QUEUE_COMMAND_REG(priv->port_num), 1);
+}
+
+/*
+ * disable tx operation and wait for tx fifo to reach empty state
+ */
+static int mv88fxx81_eth_port_disable_tx(struct mv88fxx81_private *priv)
+{
+	int enabled, port;
+
+	port = priv->port_num;
+
+	/* Stop Tx port activity. Check port Tx activity. */
+	enabled = mv_eth_read(priv, MV_ETH_TRANSMIT_QUEUE_COMMAND_REG(port));
+	enabled &= 0x1;
+
+	if (enabled) {
+		int loop;
+
+		/* Issue stop command for active queues only */
+		mv_eth_write(priv, MV_ETH_TRANSMIT_QUEUE_COMMAND_REG(port),
+			     (1 << 8));
+
+		/* Wait for all Tx activity to terminate. */
+		/* Check port cause register that all Tx queues are stopped */
+		loop = 1000;
+		while (mv_eth_read(priv, MV_ETH_TRANSMIT_QUEUE_COMMAND_REG(port))
+		       & 0x1 && loop-- > 0)
+			udelay(PHY_WAIT_MICRO_SECONDS);
+
+		if (!loop)
+			printk(KERN_ERR PFX "timeout waiting for tx operation "
+			       "to end\n");
+
+		/* Wait for Tx FIFO to empty */
+		loop = 1000;
+		while (mv_eth_read(priv, MV_ETH_PORT_STATUS_REG(port)) &
+		       MV_ETH_PORT_STATUS_TX_FIFO_EMPTY && loop-- > 0)
+			udelay(PHY_WAIT_MICRO_SECONDS);
+
+		if (!loop)
+			printk(KERN_ERR PFX "timeout waiting for tx fifo "
+			       "to empty\n");
+	}
+
+	return enabled;
+}
+
+/*
+ * Set  the port  serial configuration  register for  the speed/duplex
+ * mode.
+ */
+static void mv88fxx81_eth_update_pscr(struct net_device *dev,
+				      struct ethtool_cmd *ecmd)
+{
+	struct mv88fxx81_private *priv;
+	int port_num;
+	u32 o_pscr, n_pscr;
+
+	priv = netdev_priv(dev);
+	port_num = priv->port_num;
+	o_pscr = mv_eth_read(priv, MV_ETH_PORT_SERIAL_CONTROL_REG(port_num));
+	n_pscr = o_pscr;
+
+	/* clear speed, duplex and rx buffer size fields */
+	n_pscr &= ~(MV_ETH_SET_MII_SPEED_TO_100 |
+		   MV_ETH_SET_GMII_SPEED_TO_1000 |
+		   MV_ETH_SET_FULL_DUPLEX_MODE |
+		   MV_ETH_MAX_RX_PACKET_MASK);
+
+	if (ecmd->duplex == DUPLEX_FULL)
+		n_pscr |= MV_ETH_SET_FULL_DUPLEX_MODE;
+
+	if (ecmd->speed == SPEED_1000)
+		n_pscr |= MV_ETH_SET_GMII_SPEED_TO_1000 |
+			  MV_ETH_MAX_RX_PACKET_9700BYTE;
+	else {
+		if (ecmd->speed == SPEED_100)
+			n_pscr |= MV_ETH_SET_MII_SPEED_TO_100;
+		n_pscr |= MV_ETH_MAX_RX_PACKET_1522BYTE;
+	}
+
+	if (priv->ignore_phy)
+		n_pscr |= MV_ETH_FORCE_LINK_PASS;
+
+	if (n_pscr != o_pscr) {
+		if ((o_pscr & MV_ETH_SERIAL_PORT_ENABLE) == 0)
+			mv_eth_write(priv, MV_ETH_PORT_SERIAL_CONTROL_REG(
+					     port_num), n_pscr);
+		else {
+			int was_enabled;
+
+			was_enabled = mv88fxx81_eth_port_disable_tx(priv);
+
+			/* disable first */
+			o_pscr &= ~(MV_ETH_SERIAL_PORT_ENABLE |
+				    MV_ETH_FORCE_LINK_PASS);
+			mv_eth_write(priv, MV_ETH_PORT_SERIAL_CONTROL_REG(
+					     port_num), o_pscr);
+
+			/* update all bits */
+			mv_eth_write(priv, MV_ETH_PORT_SERIAL_CONTROL_REG(
+					     port_num),
+				     n_pscr & ~(MV_ETH_SERIAL_PORT_ENABLE));
+
+			/* enable */
+			mv_eth_write(priv, MV_ETH_PORT_SERIAL_CONTROL_REG(
+					     port_num), n_pscr);
+
+			if (was_enabled)
+				mv88fxx81_eth_port_enable_tx(priv);
+		}
+	}
+}
+
+
+/*
+ * set the  ethernet port PHY  address in mac  register, so it  can be
+ * polled by hw
+ */
+static void mv88fxx81_eth_update_phy_addr(struct mv88fxx81_private *priv,
+					  int phy_addr)
+{
+	int addr_shift;
+	u32 reg_data;
+
+	addr_shift = 5 * priv->port_num;
+	reg_data = mv_eth_read(priv, MV_ETH_PHY_ADDR_REG);
+	reg_data &= ~(0x1f << addr_shift);
+	reg_data |= (phy_addr & 0x1f) << addr_shift;
+	mv_eth_write(priv, MV_ETH_PHY_ADDR_REG, reg_data);
+}
+
+
+/*
+ * handle link change notification, called by schedule_work
+ */
+static void mv88fxx81_eth_handle_link_change(struct work_struct *ugly)
+{
+	struct mv88fxx81_private *priv;
+	struct net_device *dev;
+	struct ethtool_cmd cmd;
+
+	priv = container_of(ugly, struct mv88fxx81_private, link_update_task);
+	dev = priv->mii.dev;
+	mutex_lock(&priv->mii_mutex);
+
+	if (priv->ignore_phy || mii_link_ok(&priv->mii)) {
+		/* link is UP */
+
+		/* we may need to disable  tx, make sure we don't mess
+		 * with _xmit and tx_reclaim */
+		spin_lock_bh(&priv->tx_lock);
+
+		/* if we have a phy,  read the new settings and update
+		 * mac */
+		if (!priv->ignore_phy) {
+			mii_check_media(&priv->mii, 1, 0);
+			mii_ethtool_gset(&priv->mii, &cmd);
+			mv88fxx81_eth_update_pscr(dev, &cmd);
+		}
+
+		/* enable tx queue if hw queue is not full */
+		if (netif_queue_stopped(dev) && priv->tx_desc_count != 0)
+			netif_wake_queue(dev);
+
+		spin_unlock_bh(&priv->tx_lock);
+		netif_carrier_on(dev);
+	} else {
+		/* link is DOWN, stop queue */
+		if (!priv->ignore_phy)
+			mii_check_media(&priv->mii, 1, 0);
+		netif_stop_queue(dev);
+		netif_carrier_off(dev);
+	}
+
+	mutex_unlock(&priv->mii_mutex);
+}
+
+#ifdef CONFIG_SKB_RECYCLE
+static int mv88fxx81_skb_recycle(void *recycle_data, struct sk_buff *skb)
+{
+	struct mv88fxx81_private *priv;
+
+	priv = (struct mv88fxx81_private *)recycle_data;
+	if (skb_queue_len(&priv->skb_recycle_queue) >
+	    MV88FXX81_MAX_SKB_RECYCLE)
+		return 0;
+
+	skb_queue_head(&priv->skb_recycle_queue, skb);
+	return 1;
+}
+#endif
+
+/*
+ * rx queue refill
+ */
+static void mv88fxx81_eth_rx_refill_descs(struct net_device *dev)
+{
+	struct mv88fxx81_private *priv;
+	struct sk_buff *skb;
+	int unaligned;
+
+	priv = netdev_priv(dev);
+
+	while (priv->rx_desc_count < priv->rx_ring_size) {
+		struct eth_rx_desc *p_used_rx_desc;
+		int used_rx_desc;
+		dma_addr_t p;
+#ifdef CONFIG_SKB_RECYCLE
+		int cache_clean;
+
+		/* try to get an skb from recycle queue */
+		skb = skb_dequeue(&priv->skb_recycle_queue);
+		if (skb) {
+			cache_clean = skb->cache_clean;
+			skb_clean_state(skb);
+			skb_reserve(skb, NET_SKB_PAD);
+		} else {
+			cache_clean = 0;
+			skb = dev_alloc_skb(ETH_RX_SKB_SIZE + ETH_DMA_ALIGN);
+			if (!skb)
+				break;
+			skb->recycle = mv88fxx81_skb_recycle;
+			skb->recycle_data = priv;
+		}
+#else
+		/* create a new skb  */
+		skb = dev_alloc_skb(ETH_RX_SKB_SIZE + ETH_DMA_ALIGN);
+		if (!skb)
+			break;
+#endif
+
+		/* make sure it is 8 bytes aligned */
+		unaligned = (u32)skb->data & (ETH_DMA_ALIGN - 1);
+		if (unaligned)
+			skb_reserve(skb, ETH_DMA_ALIGN - unaligned);
+
+		/* get hardware address of skb data */
+#ifdef CONFIG_SKB_RECYCLE
+		if (cache_clean)
+			p = dma_map_single(NULL, skb->data, 0,
+					   DMA_FROM_DEVICE);
+		else
+#endif
+			p = dma_map_single(NULL, skb->data, ETH_RX_SKB_SIZE,
+					   DMA_FROM_DEVICE);
+
+		/* program next dma desc entry */
+		priv->rx_desc_count++;
+		used_rx_desc = priv->rx_used_desc;
+		p_used_rx_desc = &priv->p_rx_desc_area[used_rx_desc];
+		p_used_rx_desc->buf_ptr = p;
+		p_used_rx_desc->buf_size = ETH_RX_SKB_SIZE;
+
+		wmb();
+		p_used_rx_desc->cmd_sts =
+			ETH_BUFFER_OWNED_BY_DMA | ETH_RX_ENABLE_INTERRUPT;
+		wmb();
+
+		/* keep track of skb */
+		priv->rx_skb[used_rx_desc] = skb;
+
+		/* the hw align the ip header by itself */
+		skb_reserve(skb, 2);
+
+		priv->rx_used_desc = (used_rx_desc + 1) % priv->rx_ring_size;
+	}
+
+	/*
+	 * If RX  ring is still empty,  set a timer  to try allocating
+	 * again at a later time.
+	 */
+	if (priv->rx_desc_count == 0) {
+		printk(KERN_INFO PFX "Rx ring is empty\n");
+		priv->rx_timeout.expires = jiffies + (HZ / 10);
+		add_timer(&priv->rx_timeout);
+	}
+}
+
+/*
+ * timer callback for queue refill in case of OOM
+ */
+static void mv88fxx81_eth_rx_refill_descs_timer_wrapper(unsigned long data)
+{
+	mv88fxx81_eth_rx_refill_descs((struct net_device *)data);
+}
+
+/*
+ * extract packets from receive queue
+ */
+static int mv88fxx81_eth_receive_queue(struct net_device *dev, int budget)
+{
+	struct mv88fxx81_private *priv;
+	int processed;
+
+	priv = netdev_priv(dev);
+	processed = 0;
+
+	do {
+		struct eth_rx_desc *p_curr_rx_desc;
+		struct sk_buff *skb;
+		u32 cmd_sts;
+		u32 byte_cnt;
+
+		p_curr_rx_desc = &priv->p_rx_desc_area[priv->rx_curr_desc];
+
+		/* read command status of descriptor */
+		cmd_sts = p_curr_rx_desc->cmd_sts;
+		if (cmd_sts & ETH_BUFFER_OWNED_BY_DMA)
+			break;
+
+		/* ensure other fields of  descriptor will not be read
+		 * before command status */
+		rmb();
+
+		skb = priv->rx_skb[priv->rx_curr_desc];
+		priv->rx_skb[priv->rx_curr_desc] = NULL;
+
+		/* release dma mapping */
+		dma_unmap_single(NULL, p_curr_rx_desc->buf_ptr,
+				 skb->len + 2, DMA_FROM_DEVICE);
+
+		priv->rx_curr_desc = (priv->rx_curr_desc + 1) % priv->rx_ring_size;
+		priv->rx_desc_count--;
+		priv->stats.rx_packets++;
+		processed++;
+
+		/* remove  2  bytes junk  at  beginning  caused by  ip
+		 * header alignment */
+		byte_cnt = p_curr_rx_desc->byte_cnt - 2;
+		priv->stats.rx_bytes += byte_cnt;
+
+		/*
+		 * In case received a packet without first / last bits
+		 * on, packets needs to be dropped.
+		 */
+		if ((cmd_sts & (ETH_RX_FIRST_DESC | ETH_RX_LAST_DESC)) !=
+		    (ETH_RX_FIRST_DESC | ETH_RX_LAST_DESC)) {
+			priv->stats.rx_dropped++;
+			dev_kfree_skb_irq(skb);
+			continue;
+		}
+
+		/* if the packet has error bit set, then drop it */
+		if ((cmd_sts & ETH_ERROR_SUMMARY)) {
+			priv->stats.rx_dropped++;
+			priv->stats.rx_errors++;
+			dev_kfree_skb_irq(skb);
+			continue;
+		}
+
+		/* strip CRC */
+		skb_put(skb, byte_cnt - 4);
+		skb->dev = dev;
+		skb->protocol = eth_type_trans(skb, dev);
+		netif_receive_skb(skb);
+		dev->last_rx = jiffies;
+	} while (--budget > 0 && processed < priv->rx_ring_size);
+
+	if (processed)
+		mv88fxx81_eth_rx_refill_descs(dev);
+
+	return processed;
+}
+
+/*
+ * try or force reclaim of transmitted buffers
+ */
+static int mv88fxx81_eth_tx_reclaim(struct net_device *dev, int force)
+{
+	struct mv88fxx81_private *priv;
+	u32 cmd_sts;
+	int released;
+
+	priv = netdev_priv(dev);
+	released = 0;
+
+	while (priv->tx_desc_count < priv->tx_ring_size) {
+		struct eth_tx_desc *p_tx_used_desc;
+		struct sk_buff *skb;
+
+		/* We run in a  bh and fight against start_xmit, which
+		 * is called with bh disabled  */
+		spin_lock(&priv->tx_lock);
+
+		p_tx_used_desc = &priv->p_tx_desc_area[priv->tx_used_desc];
+		cmd_sts = p_tx_used_desc->cmd_sts;
+
+		if (!force && (cmd_sts & ETH_BUFFER_OWNED_BY_DMA)) {
+			/* not owned, */
+			spin_unlock(&priv->tx_lock);
+			break;
+		}
+
+		/* ensure other  part of  the descriptor are  not read
+		 * before we check ownership */
+		rmb();
+
+		skb = priv->tx_skb[priv->tx_used_desc];
+		priv->tx_skb[priv->tx_used_desc] = NULL;
+		dma_unmap_single(NULL, p_tx_used_desc->buf_ptr, skb->len,
+				 DMA_TO_DEVICE);
+
+		priv->tx_used_desc = (priv->tx_used_desc + 1) % priv->tx_ring_size;
+		priv->tx_desc_count++;
+
+		spin_unlock(&priv->tx_lock);
+
+		if (cmd_sts & ETH_ERROR_SUMMARY)
+			priv->stats.tx_errors++;
+
+		dev_kfree_skb(skb);
+		released++;
+	}
+
+	if (netif_queue_stopped(dev) && released)
+		netif_wake_queue(dev);
+
+	return released;
+}
+
+/*
+ * Queue given skb for transmission
+ */
+static int mv88fxx81_eth_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+	struct eth_tx_desc *p_curr_tx_desc;
+	struct mv88fxx81_private *priv;
+	u32 cmd_sts;
+	int ret;
+	unsigned int flush_len;
+
+	priv = netdev_priv(dev);
+
+	/* lock against tx reclaim */
+	spin_lock(&priv->tx_lock);
+
+	/* make sure  the tx hw queue  is not full,  should not happen
+	 * since we stop queue before it's the case */
+	if (unlikely(!priv->tx_desc_count)) {
+		netif_stop_queue(dev);
+		printk(KERN_ERR PFX "bug: xmit called with no tx desc "
+		       "available\n");
+		ret = 1;
+		goto out_unlock;
+	}
+
+	/* point to the next available desc */
+	p_curr_tx_desc = &priv->p_tx_desc_area[priv->tx_curr_desc];
+
+	/* prepare command status value */
+	cmd_sts = ETH_TX_FIRST_DESC |
+		ETH_GEN_CRC |
+		ETH_BUFFER_OWNED_BY_DMA |
+		ETH_ZERO_PADDING |
+		ETH_TX_LAST_DESC |
+		ETH_TX_ENABLE_INTERRUPT;
+
+	/* Errata BTS #50, IHL must be 5 if no HW checksum */
+	cmd_sts |= 5 << ETH_TX_IHL_SHIFT;
+
+	priv->tx_skb[priv->tx_curr_desc] = skb;
+	p_curr_tx_desc->byte_cnt = skb->len;
+#ifndef CONFIG_IP_FFN
+	flush_len = skb->len;
+#else
+
+	/* don't flush past best case ethernet + IP + tcp header */
+#define MAX_FLUSH_LEN	(18 + 20 + sizeof (struct tcphdr))
+
+	if (skb->ffn_state == 2 && skb->len > MAX_FLUSH_LEN)
+		flush_len = MAX_FLUSH_LEN;
+	else
+		flush_len = skb->len;
+#endif
+	p_curr_tx_desc->buf_ptr = dma_map_single(NULL, skb->data, flush_len,
+						 DMA_TO_DEVICE);
+#ifdef CONFIG_SKB_RECYCLE
+	if (!skb->cloned && !skb_shared(skb))
+		skb->cache_clean = 1;
+	else
+		skb->cache_clean = 0;
+#endif
+
+	p_curr_tx_desc->l4i_chk = 0;
+
+	/* ensure all other descriptors are written before first cmd_sts */
+	wmb();
+	p_curr_tx_desc->cmd_sts = cmd_sts;
+
+	/* ensure all descriptors are written before poking hardware */
+	wmb();
+	mv88fxx81_eth_port_enable_tx(priv);
+
+	/* mark descriptor as used */
+	priv->tx_curr_desc = (priv->tx_curr_desc + 1) % priv->tx_ring_size;
+	priv->tx_desc_count--;
+
+	/* stop queue if no more desc available */
+	if (!priv->tx_desc_count)
+		netif_stop_queue(dev);
+
+	priv->stats.tx_bytes += skb->len;
+	priv->stats.tx_packets++;
+	dev->trans_start = jiffies;
+	ret = 0;
+
+out_unlock:
+	spin_unlock(&priv->tx_lock);
+	return ret;
+}
+
+/*
+ * poll, used by NAPI
+ */
+static int mv88fxx81_eth_poll(struct net_device *dev, int *budget)
+{
+	struct mv88fxx81_private *priv;
+	int orig_budget, rx_work_done, tx_work_done;
+
+	priv = netdev_priv(dev);
+
+	/* reclaim sent skb */
+	tx_work_done = mv88fxx81_eth_tx_reclaim(dev, 0);
+
+	orig_budget = *budget;
+	if (orig_budget > dev->quota)
+		orig_budget = dev->quota;
+	rx_work_done = mv88fxx81_eth_receive_queue(dev, orig_budget);
+	*budget -= rx_work_done;
+	dev->quota -= rx_work_done;
+
+	if (rx_work_done >= orig_budget || tx_work_done > 0) {
+		/* rx/tx queue is not yet empty */
+		return 1;
+	}
+
+	/* no  more packet  in rx/tx  queue, remove  device  from poll
+	 * queue */
+	netif_rx_complete(dev);
+
+	/* restore rx/tx interrupt */
+	mv_eth_write(priv, MV_ETH_INTERRUPT_MASK_REG(priv->port_num),
+		     MV_ETH_INT_MASK);
+	mv_eth_write(priv, MV_ETH_INTERRUPT_EXTEND_MASK_REG(priv->port_num),
+		     MV_ETH_INT_MASK_EXT);
+
+	return 0;
+}
+
+/*
+ * interrupt handler for ethernet unit irq (mainly memory errors)
+ */
+static irqreturn_t mv88fxx81_eth_unit_int_handler(int irq, void *dev_id)
+{
+	struct net_device *dev;
+	struct mv88fxx81_private *priv;
+	u32 cause, address;
+
+	dev = (struct net_device *)dev_id;
+	priv = netdev_priv(dev);
+
+	/* read unit cause register */
+	cause = mv_eth_read(priv, MV_ETH_UNIT_INTERRUPT_CAUSE_REG);
+	cause &= MV_ETH_UNIT_INT_MASK;
+
+	if (!cause)
+		return IRQ_NONE;
+
+	/* clear cause */
+	mv_eth_write(priv, MV_ETH_UNIT_INTERRUPT_CAUSE_REG, ~cause);
+
+	/* read offending address */
+	address = mv_eth_read(priv, MV_ETH_UNIT_EUEA_REG);
+	if (cause & MV_ETH_UNIT_INT_ADDR_VIOLATION)
+		printk(KERN_ERR PFX "Window access violation "
+		       "for bus address 0x%08X\n", address);
+	if (cause & MV_ETH_UNIT_INT_ADDR_NOMATCH)
+		printk(KERN_ERR PFX "No window match for "
+		       "bus address 0x%08X\n", address);
+
+	return IRQ_HANDLED;
+}
+
+/*
+ * main interrupt handler
+ */
+static irqreturn_t mv88fxx81_eth_int_handler(int irq, void *dev_id)
+{
+	struct net_device *dev;
+	struct mv88fxx81_private *priv;
+	unsigned int port_num;
+	unsigned int reg, reg_ext;
+	u32 eth_int_cause, eth_int_cause_ext = 0;
+
+	dev = (struct net_device *)dev_id;
+	priv = netdev_priv(dev);
+	port_num = priv->port_num;
+
+	/* read cause register */
+	reg = MV_ETH_INTERRUPT_CAUSE_REG(port_num);
+
+	/* mask wanted interrupt only */
+	eth_int_cause = mv_eth_read(priv, reg);
+	if (!eth_int_cause)
+		return IRQ_NONE;
+
+	/* acknowledge interrupt */
+	mv_eth_write(priv, reg, ~eth_int_cause);
+	eth_int_cause &= MV_ETH_INT_MASK;
+
+	/* check for extended cause */
+	if (eth_int_cause & MV_ETH_INT_EXTEND) {
+		reg_ext = MV_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num);
+		eth_int_cause_ext = mv_eth_read(priv, reg_ext);
+		eth_int_cause_ext &= MV_ETH_INT_MASK_EXT;
+
+		/* ack them */
+		mv_eth_write(priv, reg_ext, ~eth_int_cause_ext);
+	}
+
+	/* handle PHY status changed */
+	if (eth_int_cause_ext & MV_ETH_INTEXT_PHYSTC) {
+		/* schedule work to handle the link status change */
+		schedule_work(&priv->link_update_task);
+	}
+
+	/* rx or tx done interrupt */
+	if (!((eth_int_cause & MV_ETH_INT_RXMASK) ||
+	      (eth_int_cause_ext & MV_ETH_INT_TXMASK)))
+		return IRQ_HANDLED;
+
+	/* schedule poll to handle rx or tx done */
+	if (netif_rx_schedule_prep(dev)) {
+		/* tell system we have work to be done. */
+		__netif_rx_schedule(dev);
+	} else {
+		/* schedule  prep  will return  false  when device  is
+		 * going down */
+		if (netif_running(dev))
+			printk(KERN_ERR PFX "driver bug! rx/tx interrupt while "
+			       "in poll\n");
+	}
+
+	/* mask the rx/tx interrupt */
+	mv_eth_write(priv, MV_ETH_INTERRUPT_MASK_REG(priv->port_num), 0);
+	mv_eth_write(priv, MV_ETH_INTERRUPT_EXTEND_MASK_REG(priv->port_num), 0);
+
+	/* clear cause again to  prevent irq from triggering again, we
+	 * are going  to poll queue later  anyway so we  will not lose
+	 * anything */
+	mv_eth_write(priv, MV_ETH_INTERRUPT_CAUSE_REG(port_num),
+		     ~MV_ETH_INT_RXMASK);
+	mv_eth_write(priv, MV_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num),
+		     ~MV_ETH_INT_TXMASK);
+
+	return IRQ_HANDLED;
+}
+
+/*
+ * create the tx descriptor chain
+ */
+static void ether_init_tx_desc_ring(struct mv88fxx81_private *priv)
+{
+	struct eth_tx_desc *p_tx_desc;
+	int tx_desc_num, i;
+
+	tx_desc_num = priv->tx_ring_size;
+
+	/* Initialize the next_desc_ptr links in the Tx descriptors ring */
+	p_tx_desc = (struct eth_tx_desc *)priv->p_tx_desc_area;
+	for (i = 0; i < tx_desc_num; i++) {
+		p_tx_desc[i].cmd_sts = 0;
+		p_tx_desc[i].next_desc_ptr = priv->tx_desc_dma +
+			((i + 1) % tx_desc_num) * sizeof (struct eth_tx_desc);
+	}
+
+	priv->tx_curr_desc = 0;
+	priv->tx_used_desc = 0;
+	priv->tx_desc_count = priv->tx_ring_size;
+}
+
+/*
+ * create the rx descriptor chain
+ */
+static void ether_init_rx_desc_ring(struct mv88fxx81_private *priv)
+{
+	struct eth_rx_desc *p_rx_desc;
+	int rx_desc_num, i;
+
+	rx_desc_num = priv->rx_ring_size;
+
+	/* initialize the next_desc_ptr links in the Rx descriptors ring */
+	p_rx_desc = (struct eth_rx_desc *)priv->p_rx_desc_area;
+	for (i = 0; i < rx_desc_num; i++) {
+		p_rx_desc[i].next_desc_ptr = priv->rx_desc_dma +
+			((i + 1) % rx_desc_num) * sizeof (struct eth_rx_desc);
+	}
+
+	priv->rx_curr_desc = 0;
+	priv->rx_used_desc = 0;
+	priv->rx_desc_count = 0;
+}
+
+/*
+ * reset all mac filter (unicast / mcast / special mcast) of device
+ */
+static void eth_port_init_mac_tables(struct mv88fxx81_private *priv)
+{
+	unsigned int port_num, offset;
+	int table_index;
+
+	port_num = priv->port_num;
+
+	/* Clear DA filter unicast table (Ex_dFUT) */
+	for (table_index = 0; table_index <= 0xC; table_index += 4) {
+		offset = MV_ETH_DA_FILTER_UNICAST_TABLE_BASE(port_num);
+		offset += table_index;
+		mv_eth_write(priv, offset, 0);
+	}
+
+	for (table_index = 0; table_index <= 0xFC; table_index += 4) {
+		/* Clear DA filter special multicast table (Ex_dFSMT) */
+		offset = MV_ETH_DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE(port_num);
+		offset += table_index;
+		mv_eth_write(priv, offset, 0);
+
+		/* Clear DA filter other multicast table (Ex_dFOMT) */
+		offset = MV_ETH_DA_FILTER_OTHER_MULTICAST_TABLE_BASE(port_num);
+		offset += table_index;
+		mv_eth_write(priv, offset, 0);
+	}
+}
+
+/*
+ * set accept bit at given entry of given filter mac table
+ */
+static void eth_port_set_filter_table_entry(struct mv88fxx81_private *priv,
+					    int table, unsigned char entry)
+{
+	unsigned int table_reg;
+	unsigned int tbl_offset;
+	unsigned int reg_offset;
+
+	/* Register offset of DA table entry */
+	tbl_offset = (entry / 4) * 4;
+	/* Entry offset within the register */
+	reg_offset = entry % 4;
+
+	/* Set "accepts frame bit" at specified table entry */
+	table_reg = mv_eth_read(priv, table + tbl_offset);
+	table_reg |= 0x01 << (8 * reg_offset);
+	mv_eth_write(priv, table + tbl_offset, table_reg);
+}
+
+/*
+ * program own mac in device  register and set unicast da filter table
+ * to accept it
+ */
+static void eth_port_uc_addr_set(struct mv88fxx81_private *priv,
+				 unsigned char *p_addr)
+{
+	unsigned int port_num;
+	unsigned int mac_h;
+	unsigned int mac_l;
+	int table;
+
+	port_num = priv->port_num;
+	mac_l = (p_addr[4] << 8) | (p_addr[5]);
+	mac_h = (p_addr[0] << 24) | (p_addr[1] << 16) | (p_addr[2] << 8) |
+		(p_addr[3] << 0);
+
+	mv_eth_write(priv, MV_ETH_MAC_ADDR_LOW(port_num), mac_l);
+	mv_eth_write(priv, MV_ETH_MAC_ADDR_HIGH(port_num), mac_h);
+
+	/* Accept frames of this address */
+	table = MV_ETH_DA_FILTER_UNICAST_TABLE_BASE(port_num);
+	eth_port_set_filter_table_entry(priv, table, p_addr[5] & 0x0f);
+}
+
+/*
+ * Change the interface's mac address.
+ */
+static int mv88fxx81_eth_set_mac_address(struct net_device *dev, void *p)
+{
+	struct mv88fxx81_private *priv;
+	struct sockaddr *addr = p;
+
+	priv = netdev_priv(dev);
+	memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
+	eth_port_init_mac_tables(priv);
+	eth_port_uc_addr_set(priv, dev->dev_addr);
+
+	return 0;
+}
+
+/*
+ * set phy configuration to given settings
+ */
+static int mv88fxx81_eth_set_settings(struct net_device *dev,
+				      struct ethtool_cmd *cmd)
+{
+	struct mv88fxx81_private *priv;
+	int ret;
+
+	priv = netdev_priv(dev);
+	mutex_lock(&priv->mii_mutex);
+
+	if (!priv->ignore_phy) {
+		/* set  the  phy settings,  which  we  trigger a  link
+		 * change  */
+		ret = mii_ethtool_sset(&priv->mii, cmd);
+	} else {
+		ret = 0;
+
+		/* you shall not set autoneg */
+		if (cmd->autoneg == AUTONEG_DISABLE) {
+			/* update  mac settings now.   We may  need to
+			 * disable  tx, make sure  we don't  mess with
+			 * _xmit and tx_reclaim */
+			spin_lock(&priv->tx_lock);
+			mv88fxx81_eth_update_pscr(dev, cmd);
+			spin_unlock(&priv->tx_lock);
+		} else {
+			ret = -EINVAL;
+		}
+	}
+
+	mutex_unlock(&priv->mii_mutex);
+	return ret;
+}
+
+/*
+ * fetch phy configuration
+ */
+static int mv88fxx81_eth_get_settings(struct net_device *dev,
+				      struct ethtool_cmd *cmd)
+{
+	struct mv88fxx81_private *priv;
+	int ret;
+
+	priv = netdev_priv(dev);
+	mutex_lock(&priv->mii_mutex);
+
+	if (!priv->ignore_phy) {
+		/* read and return the phy settings */
+		ret = mii_ethtool_gset(&priv->mii, cmd);
+
+		/* The  PHY   may  support  1000baseT_Half,   but  the
+		 * mv88fxx81 does not */
+		cmd->supported &= ~SUPPORTED_1000baseT_Half;
+		cmd->advertising &= ~ADVERTISED_1000baseT_Half;
+	} else {
+		unsigned int val;
+
+		/* read pscr and return current mac settings */
+		val = mv_eth_read(priv,
+				  MV_ETH_PORT_SERIAL_CONTROL_REG(priv->port_num));
+
+		cmd->port = PORT_MII;
+		cmd->transceiver = XCVR_INTERNAL;
+		cmd->autoneg = AUTONEG_DISABLE;
+		cmd->supported = SUPPORTED_10baseT_Half |
+			SUPPORTED_10baseT_Full |
+			SUPPORTED_100baseT_Half |
+			SUPPORTED_100baseT_Full |
+			SUPPORTED_1000baseT_Full |
+			SUPPORTED_MII;
+		cmd->advertising = 0;
+
+		if (val & MV_ETH_SET_FULL_DUPLEX_MODE)
+			cmd->duplex = DUPLEX_FULL;
+		else
+			cmd->duplex = DUPLEX_HALF;
+
+		if (val & MV_ETH_SET_GMII_SPEED_TO_1000)
+			cmd->speed = SPEED_1000;
+		else if (val & MV_ETH_SET_MII_SPEED_TO_100)
+			cmd->speed = SPEED_100;
+		else
+			cmd->speed = SPEED_10;
+		ret = 0;
+	}
+	mutex_unlock(&priv->mii_mutex);
+
+	return ret;
+}
+
+/*
+ * set coalescing parameter
+ */
+static void mv88fxx81_eth_apply_coal(struct mv88fxx81_private *priv)
+{
+	int port_num;
+	unsigned int rx_coal, tx_coal;
+
+	port_num = priv->port_num;
+
+	rx_coal = ((tclk_get_rate() / 1000000) * priv->rx_coal_usec) / 64;
+	if (rx_coal > 0x3fff)
+		return;
+
+	tx_coal = ((tclk_get_rate() / 1000000) * priv->tx_coal_usec) / 64;
+	if (tx_coal > 0x3fff)
+		return;
+
+	/* set rx coal */
+	mv_eth_write(priv, MV_ETH_SDMA_CONFIG_REG(port_num),
+		     ((rx_coal & 0x3fff) << 8) |
+		     (mv_eth_read(priv, MV_ETH_SDMA_CONFIG_REG(port_num))
+		      & 0xffc000ff));
+
+	/* set tx coal */
+	mv_eth_write(priv, MV_ETH_TX_FIFO_URGENT_THRESHOLD_REG(port_num),
+		     tx_coal << 4);
+}
+
+/*
+ * start rx & tx operation on device
+ */
+static void mv88fxx81_eth_port_start(struct net_device *dev)
+{
+	struct mv88fxx81_private *priv;
+	unsigned int port_num;
+	struct ethtool_cmd ethtool_cmd;
+	u32 pscr;
+
+	priv = netdev_priv(dev);
+	port_num = priv->port_num;
+
+	/* Assignment of Tx CTRP of given queue */
+	mv_eth_write(priv, MV_ETH_TX_CURRENT_QUEUE_DESC_PTR_0(port_num),
+		     (u32)priv->tx_desc_dma);
+
+	/* Assignment of Rx CRDP of given queue */
+	mv_eth_write(priv, MV_ETH_RX_CURRENT_QUEUE_DESC_PTR_0(port_num),
+		     (u32)priv->rx_desc_dma);
+
+	/* Assign port configuration and command. */
+	mv_eth_write(priv, MV_ETH_PORT_CONFIG_REG(port_num),
+		     MV_ETH_UNICAST_NORMAL_MODE |
+		     MV_ETH_DEFAULT_RX_QUEUE_0 |
+		     MV_ETH_DEFAULT_RX_ARP_QUEUE_0 |
+		     MV_ETH_RECEIVE_BC_IF_NOT_IP_OR_ARP |
+		     MV_ETH_RECEIVE_BC_IF_IP |
+		     MV_ETH_RECEIVE_BC_IF_ARP |
+		     MV_ETH_CAPTURE_TCP_FRAMES_DIS |
+		     MV_ETH_CAPTURE_UDP_FRAMES_DIS |
+		     MV_ETH_DEFAULT_RX_TCP_QUEUE_0 |
+		     MV_ETH_DEFAULT_RX_UDP_QUEUE_0 |
+		     MV_ETH_DEFAULT_RX_BPDU_QUEUE_0);
+
+	mv_eth_write(priv, MV_ETH_PORT_CONFIG_EXTEND_REG(port_num),
+		     MV_ETH_SPAN_BPDU_PACKETS_AS_NORMAL);
+
+	pscr = mv_eth_read(priv, MV_ETH_PORT_SERIAL_CONTROL_REG(port_num));
+
+	pscr &= ~(MV_ETH_SERIAL_PORT_ENABLE | MV_ETH_FORCE_LINK_PASS);
+	mv_eth_write(priv, MV_ETH_PORT_SERIAL_CONTROL_REG(port_num), pscr);
+
+	/* disable mac autoneg feature, enable link reporting */
+	pscr |= MV_ETH_DISABLE_AUTO_NEG_FOR_FLOW_CTRL |
+		MV_ETH_DISABLE_AUTO_NEG_SPEED_GMII |
+		MV_ETH_DISABLE_AUTO_NEG_FOR_DUPLX |
+		MV_ETH_DO_NOT_FORCE_LINK_FAIL |
+		MV_ETH_DO_NOT_FORCE_LINK_PASS |
+		MV_ETH_SERIAL_PORT_CONTROL_RESERVED;
+
+	if (priv->ignore_phy) {
+		/* no phy  status to  poll, so we  need to  force link
+		 * status */
+		pscr |= MV_ETH_FORCE_LINK_PASS;;
+	}
+
+	mv_eth_write(priv, MV_ETH_PORT_SERIAL_CONTROL_REG(port_num), pscr);
+
+	/* enable port */
+	pscr |= MV_ETH_SERIAL_PORT_ENABLE;;
+	mv_eth_write(priv, MV_ETH_PORT_SERIAL_CONTROL_REG(port_num), pscr);
+
+	/* Assign port SDMA configuration */
+	mv_eth_write(priv, MV_ETH_SDMA_CONFIG_REG(port_num),
+		     MV_ETH_RX_BURST_SIZE_16_64BIT |
+		     MV_ETH_BLM_RX_NO_SWAP |
+		     MV_ETH_BLM_TX_NO_SWAP |
+		     MV_ETH_DESCRIPTORS_NO_SWAP |
+		     MV_ETH_TX_BURST_SIZE_16_64BIT |
+		     MV_ETH_IPG_INT_RX(0));
+
+	/* Load coalescing value */
+	mv88fxx81_eth_apply_coal(priv);
+
+	/* Enable port Rx. */
+	mv_eth_write(priv, MV_ETH_RECEIVE_QUEUE_COMMAND_REG(port_num),
+		     MV_ETH_RECEIVE_QUEUE_ENABLE_0);
+
+	/* Disable port bandwidth limits by clearing MTU register */
+	mv_eth_write(priv, MV_ETH_MAXIMUM_TRANSMIT_UNIT(port_num), 0);
+
+	/* save phy settings across reset */
+	if (!priv->ignore_phy) {
+		mv88fxx81_eth_get_settings(dev, &ethtool_cmd);
+		ethernet_phy_reset(priv);
+		mv88fxx81_eth_set_settings(dev, &ethtool_cmd);
+		/* the phy/link change interrupt will wake us */
+	} else {
+		mv88fxx81_eth_handle_link_change(&priv->link_update_task);
+	}
+}
+
+
+/*
+ * Open network device, init hardware and start rx and tx
+ */
+static int mv88fxx81_eth_open(struct net_device *dev)
+{
+	struct mv88fxx81_private *priv;
+	unsigned int size, unaligned;
+	struct sockaddr addr;
+	int ret;
+	void *p;
+
+	priv = netdev_priv(dev);
+
+	/* mask all irq */
+	mv_eth_write(priv, MV_ETH_UNIT_INTERRUPT_MASK_REG, 0);
+	mv_eth_write(priv, MV_ETH_INTERRUPT_MASK_REG(priv->port_num), 0);
+	mv_eth_write(priv, MV_ETH_INTERRUPT_EXTEND_MASK_REG(priv->port_num), 0);
+	/* request irq */
+	ret = request_irq(dev->irq, mv88fxx81_eth_int_handler,
+			  SA_INTERRUPT | SA_SAMPLE_RANDOM, dev->name, dev);
+	if (ret) {
+		printk(KERN_ERR PFX "request irq failed\n");
+		return -EBUSY;
+	}
+
+	ret = request_irq(priv->err_irq, mv88fxx81_eth_unit_int_handler,
+			  SA_INTERRUPT, dev->name, dev);
+	if (ret) {
+		free_irq(dev->irq, dev);
+		printk(KERN_ERR PFX "request irq failed\n");
+		return -EBUSY;
+	}
+
+#ifdef CONFIG_SKB_RECYCLE
+	skb_queue_head_init(&priv->skb_recycle_queue);
+#endif
+
+	/* allocate rx/tx skb queue */
+	priv->rx_skb = kmalloc(sizeof (struct sk_buff *) * priv->rx_ring_size,
+			       GFP_KERNEL);
+	if (!priv->rx_skb) {
+		printk(KERN_ERR PFX "%s: Cannot allocate Rx skb ring\n",
+		       dev->name);
+		ret = -ENOMEM;
+		goto out_err;
+	}
+
+	priv->tx_skb = kmalloc(sizeof (struct sk_buff *) * priv->tx_ring_size,
+			       GFP_KERNEL);
+	if (!priv->tx_skb) {
+		printk(KERN_ERR PFX "%s: Cannot allocate Tx skb ring\n",
+		       dev->name);
+		ret = -ENOMEM;
+		goto out_err;
+	}
+
+	/*
+	 * Allocate TX dma ring (need 16 bytes alignement)
+	 */
+	size = priv->tx_ring_size * sizeof (struct eth_tx_desc) + TX_DMA_DESC_ALIGN;
+	p = dma_alloc_coherent(NULL, size, &priv->tx_desc_dma, GFP_KERNEL);
+
+	if (!p) {
+		printk(KERN_ERR PFX "%s: Cannot allocate Tx Ring (%d bytes)\n",
+		       dev->name, size);
+		ret = -ENOMEM;
+		goto out_err;
+	}
+
+	/* keep track of allocated data */
+	priv->tx_desc_area_alloc_size = size;
+	priv->p_tx_desc_area = priv->p_tx_desc_area_alloc = p;
+
+	/* align if needed */
+	unaligned = (u32)p & (TX_DMA_DESC_ALIGN - 1);
+	if (unaligned) {
+		size = size - (TX_DMA_DESC_ALIGN - unaligned);
+		priv->p_tx_desc_area += TX_DMA_DESC_ALIGN - unaligned;
+		priv->tx_desc_dma += TX_DMA_DESC_ALIGN - unaligned;
+	}
+
+	memset((void *)priv->p_tx_desc_area, 0, size);
+	ether_init_tx_desc_ring(priv);
+
+	/*
+	 * Allocate RX ring (need 16 bytes alignement)
+	 */
+	size = priv->rx_ring_size * sizeof (struct eth_rx_desc) + RX_DMA_DESC_ALIGN;
+	p = dma_alloc_coherent(NULL, size, &priv->rx_desc_dma, GFP_KERNEL);
+
+	if (!p) {
+		printk(KERN_ERR PFX "%s: Cannot allocate Rx ring (%d bytes)\n",
+		       dev->name, size);
+		ret = -ENOMEM;
+		goto out_err;
+	}
+
+	/* keep track of allocated data */
+	priv->rx_desc_area_alloc_size = size;
+	priv->p_rx_desc_area = priv->p_rx_desc_area_alloc = p;
+
+	/* align if needed */
+	unaligned = (u32)p & (RX_DMA_DESC_ALIGN - 1);
+	if (unaligned) {
+		size = size - (RX_DMA_DESC_ALIGN - unaligned);
+		priv->p_rx_desc_area += RX_DMA_DESC_ALIGN - unaligned;
+		priv->rx_desc_dma += RX_DMA_DESC_ALIGN - unaligned;
+	}
+
+	memset((void *)priv->p_rx_desc_area, 0, size);
+	ether_init_rx_desc_ring(priv);
+
+	/* fill the rx ring with skb */
+	mv88fxx81_eth_rx_refill_descs(dev);
+
+	/* set da filter & mac address register correctly */
+	memcpy(addr.sa_data, dev->dev_addr, ETH_ALEN);
+	mv88fxx81_eth_set_mac_address(dev, &addr);
+
+	/* Clear any pending ethernet unit interrupts */
+	mv_eth_write(priv, MV_ETH_UNIT_INTERRUPT_CAUSE_REG, 0);
+
+	/* Clear any pending ethernet port interrupts */
+	mv_eth_write(priv, MV_ETH_INTERRUPT_CAUSE_REG(priv->port_num), 0);
+	mv_eth_write(priv, MV_ETH_INTERRUPT_CAUSE_EXTEND_REG(priv->port_num), 0);
+
+	/* start rx */
+	mv88fxx81_eth_port_start(dev);
+
+	/* Unmask tx buffer and phy status interrupts */
+	mv_eth_write(priv,
+		     MV_ETH_INTERRUPT_EXTEND_MASK_REG(priv->port_num),
+		     MV_ETH_INT_MASK_EXT);
+
+	/* Unmask rX buffer interrupt */
+	mv_eth_write(priv, MV_ETH_INTERRUPT_MASK_REG(priv->port_num),
+		     MV_ETH_INT_MASK);
+
+	/* Unmask ethernet uint interrupt */
+	mv_eth_write(priv, MV_ETH_UNIT_INTERRUPT_MASK_REG,
+		     MV_ETH_UNIT_INT_MASK);
+
+	return 0;
+
+out_err:
+	free_irq(dev->irq, dev);
+	free_irq(priv->err_irq, dev);
+	if (priv->rx_skb)
+		kfree(priv->rx_skb);
+	if (priv->tx_skb)
+		kfree(priv->tx_skb);
+	if (priv->p_tx_desc_area)
+		dma_free_coherent(NULL, priv->tx_desc_area_alloc_size,
+				  priv->p_tx_desc_area_alloc, priv->tx_desc_dma);
+	if (priv->p_rx_desc_area)
+		dma_free_coherent(NULL, priv->rx_desc_area_alloc_size,
+				  priv->p_rx_desc_area_alloc, priv->rx_desc_dma);
+
+	return ret;
+}
+
+#ifdef CONFIG_SKB_RECYCLE
+/*
+ * flush the recycle queue
+ */
+static void mv88fxx81_eth_release_recycled_skb(struct mv88fxx81_private *priv)
+{
+	struct sk_buff *skb;
+
+	while ((skb = skb_dequeue(&priv->skb_recycle_queue))) {
+		skb->recycle = NULL;
+		dev_kfree_skb(skb);
+	}
+}
+#endif
+
+/*
+ * stop rx/tx and free all allocated memory
+ */
+static int mv88fxx81_eth_stop(struct net_device *dev)
+{
+	struct mv88fxx81_private *priv;
+	int i, port_num;
+
+	priv = netdev_priv(dev);
+	port_num = priv->port_num;
+
+	/* stop software tx queue */
+	netif_stop_queue(dev);
+
+	/* mask all interrupt */
+	mv_eth_write(priv, MV_ETH_UNIT_INTERRUPT_MASK_REG, 0);
+	mv_eth_write(priv, MV_ETH_INTERRUPT_MASK_REG(port_num), 0);
+	mv_eth_write(priv, MV_ETH_INTERRUPT_EXTEND_MASK_REG(port_num), 0);
+
+	/* disable tx & force reclaim of all buffers */
+	mv88fxx81_eth_port_disable_tx(priv);
+	mv88fxx81_eth_tx_reclaim(dev, 1);
+
+	/* disable rx and wait for current transaction to finish */
+	mv_eth_write(priv, MV_ETH_RECEIVE_QUEUE_COMMAND_REG(port_num),
+		     MV_ETH_RECEIVE_QUEUE_DISABLE_0);
+	for (i = 0; i < 100; i++) {
+		u32 val;
+
+		val = mv_eth_read(priv,
+				  MV_ETH_RECEIVE_QUEUE_COMMAND_REG(port_num));
+		if (!(val & MV_ETH_RECEIVE_QUEUE_ENABLE_0))
+			break;
+		udelay(PHY_WAIT_MICRO_SECONDS);
+	}
+
+	if (i == 100)
+		printk(KERN_WARNING PFX "timeout waiting for rx to stop\n");
+
+	/* free the rx skb ring */
+	for (i = 0; i < priv->rx_ring_size; i++) {
+		if (priv->rx_skb[i])
+			dev_kfree_skb(priv->rx_skb[i]);
+	}
+
+	/* free remaining allocated memory */
+	free_irq(dev->irq, dev);
+	free_irq(priv->err_irq, dev);
+	kfree(priv->rx_skb);
+	priv->rx_skb = NULL;
+	kfree(priv->tx_skb);
+	priv->tx_skb = NULL;
+	dma_free_coherent(NULL, priv->tx_desc_area_alloc_size,
+			  priv->p_tx_desc_area_alloc, priv->tx_desc_dma);
+	priv->p_tx_desc_area_alloc = NULL;
+	dma_free_coherent(NULL, priv->rx_desc_area_alloc_size,
+			  priv->p_rx_desc_area_alloc, priv->rx_desc_dma);
+	priv->p_rx_desc_area_alloc = NULL;
+
+#ifdef CONFIG_SKB_RECYCLE
+	mv88fxx81_eth_release_recycled_skb(priv);
+#endif
+	return 0;
+}
+
+/*
+ * Returns a pointer to the interface statistics.
+ */
+static struct net_device_stats *mv88fxx81_eth_get_stats(struct net_device *dev)
+{
+	struct mv88fxx81_private *priv;
+
+	priv = netdev_priv(dev);
+	return &priv->stats;
+}
+
+/*
+ * calculate CRC-8 of given mac address
+ */
+static u8 crc8(u8 *p_addr)
+{
+	unsigned int mac_h;
+	unsigned int mac_l;
+	int mac_array[48];
+	int crc[8];
+	int i;
+	u8 crc_result = 0;
+
+	mac_h = (p_addr[0] << 8) | (p_addr[1]);
+	mac_l = (p_addr[2] << 24) | (p_addr[3] << 16) |
+			(p_addr[4] << 8) | (p_addr[5] << 0);
+
+	for (i = 0; i < 32; i++)
+		mac_array[i] = (mac_l >> i) & 0x1;
+	for (i = 32; i < 48; i++)
+		mac_array[i] = (mac_h >> (i - 32)) & 0x1;
+
+	crc[0] = mac_array[45] ^ mac_array[43] ^ mac_array[40] ^ mac_array[39] ^
+		 mac_array[35] ^ mac_array[34] ^ mac_array[31] ^ mac_array[30] ^
+		 mac_array[28] ^ mac_array[23] ^ mac_array[21] ^ mac_array[19] ^
+		 mac_array[18] ^ mac_array[16] ^ mac_array[14] ^ mac_array[12] ^
+		 mac_array[8]  ^ mac_array[7]  ^ mac_array[6]  ^ mac_array[0];
+
+	crc[1] = mac_array[46] ^ mac_array[45] ^ mac_array[44] ^ mac_array[43] ^
+		 mac_array[41] ^ mac_array[39] ^ mac_array[36] ^ mac_array[34] ^
+		 mac_array[32] ^ mac_array[30] ^ mac_array[29] ^ mac_array[28] ^
+		 mac_array[24] ^ mac_array[23] ^ mac_array[22] ^ mac_array[21] ^
+		 mac_array[20] ^ mac_array[18] ^ mac_array[17] ^ mac_array[16] ^
+		 mac_array[15] ^ mac_array[14] ^ mac_array[13] ^ mac_array[12] ^
+		 mac_array[9]  ^ mac_array[6]  ^ mac_array[1]  ^ mac_array[0];
+
+	crc[2] = mac_array[47] ^ mac_array[46] ^ mac_array[44] ^ mac_array[43] ^
+		 mac_array[42] ^ mac_array[39] ^ mac_array[37] ^ mac_array[34] ^
+		 mac_array[33] ^ mac_array[29] ^ mac_array[28] ^ mac_array[25] ^
+		 mac_array[24] ^ mac_array[22] ^ mac_array[17] ^ mac_array[15] ^
+		 mac_array[13] ^ mac_array[12] ^ mac_array[10] ^ mac_array[8]  ^
+		 mac_array[6]  ^ mac_array[2]  ^ mac_array[1]  ^ mac_array[0];
+
+	crc[3] = mac_array[47] ^ mac_array[45] ^ mac_array[44] ^ mac_array[43] ^
+		 mac_array[40] ^ mac_array[38] ^ mac_array[35] ^ mac_array[34] ^
+		 mac_array[30] ^ mac_array[29] ^ mac_array[26] ^ mac_array[25] ^
+		 mac_array[23] ^ mac_array[18] ^ mac_array[16] ^ mac_array[14] ^
+		 mac_array[13] ^ mac_array[11] ^ mac_array[9]  ^ mac_array[7]  ^
+		 mac_array[3]  ^ mac_array[2]  ^ mac_array[1];
+
+	crc[4] = mac_array[46] ^ mac_array[45] ^ mac_array[44] ^ mac_array[41] ^
+		 mac_array[39] ^ mac_array[36] ^ mac_array[35] ^ mac_array[31] ^
+		 mac_array[30] ^ mac_array[27] ^ mac_array[26] ^ mac_array[24] ^
+		 mac_array[19] ^ mac_array[17] ^ mac_array[15] ^ mac_array[14] ^
+		 mac_array[12] ^ mac_array[10] ^ mac_array[8]  ^ mac_array[4]  ^
+		 mac_array[3]  ^ mac_array[2];
+
+	crc[5] = mac_array[47] ^ mac_array[46] ^ mac_array[45] ^ mac_array[42] ^
+		 mac_array[40] ^ mac_array[37] ^ mac_array[36] ^ mac_array[32] ^
+		 mac_array[31] ^ mac_array[28] ^ mac_array[27] ^ mac_array[25] ^
+		 mac_array[20] ^ mac_array[18] ^ mac_array[16] ^ mac_array[15] ^
+		 mac_array[13] ^ mac_array[11] ^ mac_array[9]  ^ mac_array[5]  ^
+		 mac_array[4]  ^ mac_array[3];
+
+	crc[6] = mac_array[47] ^ mac_array[46] ^ mac_array[43] ^ mac_array[41] ^
+		 mac_array[38] ^ mac_array[37] ^ mac_array[33] ^ mac_array[32] ^
+		 mac_array[29] ^ mac_array[28] ^ mac_array[26] ^ mac_array[21] ^
+		 mac_array[19] ^ mac_array[17] ^ mac_array[16] ^ mac_array[14] ^
+		 mac_array[12] ^ mac_array[10] ^ mac_array[6]  ^ mac_array[5]  ^
+		 mac_array[4];
+
+	crc[7] = mac_array[47] ^ mac_array[44] ^ mac_array[42] ^ mac_array[39] ^
+		 mac_array[38] ^ mac_array[34] ^ mac_array[33] ^ mac_array[30] ^
+		 mac_array[29] ^ mac_array[27] ^ mac_array[22] ^ mac_array[20] ^
+		 mac_array[18] ^ mac_array[17] ^ mac_array[15] ^ mac_array[13] ^
+		 mac_array[11] ^ mac_array[7]  ^ mac_array[6]  ^ mac_array[5];
+
+	for (i = 0; i < 8; i++)
+		crc_result = crc_result | (crc[i] << i);
+	return crc_result;
+}
+
+/*
+ * Change rx mode (promiscous or not) and update multicast list
+ */
+static void mv88fxx81_eth_set_multicast_list(struct net_device *dev)
+{
+	struct mv88fxx81_private *priv;
+	struct dev_mc_list *mc_list;
+	unsigned int oreg, sreg;
+	int i, port;
+	u32 reg;
+	u32 otable[64], stable[64];
+
+	priv = netdev_priv(dev);
+
+	/* deal with unicast promisc mode */
+	reg = mv_eth_read(priv, MV_ETH_PORT_CONFIG_REG(priv->port_num));
+	if (dev->flags & IFF_PROMISC)
+		reg |= MV_ETH_UNICAST_PROMISCUOUS_MODE;
+	else
+		reg &= ~MV_ETH_UNICAST_PROMISCUOUS_MODE;
+	mv_eth_write(priv, MV_ETH_PORT_CONFIG_REG(priv->port_num), reg);
+
+
+	/* deal with multicast promisc/allmulti now */
+	port = priv->port_num;
+	sreg = MV_ETH_DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE(port);
+	oreg = MV_ETH_DA_FILTER_OTHER_MULTICAST_TABLE_BASE(port);
+
+	if ((dev->flags & IFF_PROMISC) || (dev->flags & IFF_ALLMULTI)) {
+		int table_index;
+
+		for (table_index = 0; table_index <= 0xFC; table_index += 4) {
+			/* Set all entries in DA filter special multicast
+			 * table (Ex_dFSMT)
+			 * Set for ETH_Q0 for now
+			 * Bits
+			 * 0	  Accept=1, Drop=0
+			 * 3-1  Queue	 ETH_Q0=0
+			 * 7-4  Reserved = 0;
+			 */
+			mv_eth_write(priv, sreg + table_index, 0x01010101);
+
+			/* Set all entries in DA filter other multicast
+			 * table (Ex_dFOMT)
+			 * Set for ETH_Q0 for now
+			 * Bits
+			 * 0	  Accept=1, Drop=0
+			 * 3-1  Queue	 ETH_Q0=0
+			 * 7-4  Reserved = 0;
+			 */
+			mv_eth_write(priv, oreg + table_index, 0x01010101);
+		}
+		return;
+	}
+
+	/* program multicast address to filter into hw, we will do the
+	 * work  in local  array  first  then update  all  hw regs  at
+	 * once */
+	memset(stable, 0, sizeof (stable));
+	memset(otable, 0, sizeof (otable));
+
+	for (i = 0, mc_list = dev->mc_list; (i < 256) && (mc_list != NULL) &&
+		     (i < dev->mc_count); i++, mc_list = mc_list->next) {
+		u8 *dmi_addr, crc;
+		int toffset, roffset;
+
+		dmi_addr = mc_list->dmi_addr;
+
+		/* filter non ethernet address */
+		if (mc_list->dmi_addrlen != 6)
+			continue;
+
+		/* handle special address */
+		if (!memcmp(dmi_addr, "\x01\x00\x5E\x00\x00", 5)) {
+			/*
+			 * Bits for each entry:
+			 * 0	Accept=1, Drop=0
+			 * 3-1	Queue  ETH_Q0=0
+			 * 7-4	Reserved = 0;
+			 */
+			toffset = (dmi_addr[5] / 4);
+			roffset = (dmi_addr[5] & 3);
+			stable[toffset] |= (0x1 << (roffset * 8));
+			continue;
+		}
+
+		/* handle other address */
+		crc = crc8(dmi_addr);
+		toffset = (crc / 4);
+		roffset = (crc & 3);
+		otable[toffset] |= (0x1 << (roffset * 8));
+	}
+
+	/* now update hw */
+	for (i = 0; i < 64; i++) {
+		mv_eth_write(priv, sreg + i * 4, stable[i]);
+		mv_eth_write(priv, oreg + i * 4, otable[i]);
+	}
+}
+
+/*
+ * Wrappers for MII support library.
+ */
+static int mv88fxx81_mdio_read(struct net_device *dev, int phy_id, int location)
+{
+	struct mv88fxx81_private *priv;
+	int val;
+
+	priv = netdev_priv(dev);
+	eth_port_read_smi_reg(priv, phy_id, location, &val);
+	return val;
+}
+
+static void mv88fxx81_mdio_write(struct net_device *dev, int phy_id,
+			       int location, int val)
+{
+	struct mv88fxx81_private *priv;
+
+	priv = netdev_priv(dev);
+	eth_port_write_smi_reg(priv, phy_id, location, val);
+}
+
+/*
+ * read mib counters from device
+ */
+static inline u32 read_mib(struct mv88fxx81_private *priv, int offset)
+{
+	return mv_eth_read(priv,
+			   MV_ETH_MIB_COUNTERS_BASE(priv->port_num) + offset);
+}
+
+static void eth_update_mib_counters(struct mv88fxx81_private *priv)
+{
+	struct mv88fxx81_mib_counters *p;
+	int offset;
+
+	p = &priv->mib_counters;
+
+	/* extract 64 bits value */
+	p->good_octets_received +=
+		read_mib(priv, ETH_MIB_GOOD_OCTETS_RECEIVED_LOW);
+	p->good_octets_received +=
+		(u64)read_mib(priv, ETH_MIB_GOOD_OCTETS_RECEIVED_HIGH) << 32;
+
+	for (offset = ETH_MIB_BAD_OCTETS_RECEIVED;
+			offset <= ETH_MIB_FRAMES_1024_TO_MAX_OCTETS;
+			offset += 4)
+		*(u32 *)((char *)p + offset) += read_mib(priv, offset);
+
+	/* extract 64 bits value */
+	p->good_octets_sent += read_mib(priv, ETH_MIB_GOOD_OCTETS_SENT_LOW);
+	p->good_octets_sent +=
+		(u64)read_mib(priv, ETH_MIB_GOOD_OCTETS_SENT_HIGH) << 32;
+
+	for (offset = ETH_MIB_GOOD_FRAMES_SENT;
+			offset <= ETH_MIB_LATE_COLLISION;
+			offset += 4)
+		*(u32 *)((char *)p + offset) += read_mib(priv, offset);
+}
+
+
+/*
+ * ethtool/ioctl support
+ */
+struct mv88fxx81_stats {
+	char stat_string[ETH_GSTRING_LEN];
+	int sizeof_stat;
+	int stat_offset;
+};
+
+#define GEN_STAT(m) sizeof (((struct mv88fxx81_private *)0)->m), \
+				offsetof(struct mv88fxx81_private, m)
+
+static const struct mv88fxx81_stats mv88fxx81_gstrings_stats[] = {
+	{ "rx_packets", GEN_STAT(stats.rx_packets) },
+	{ "tx_packets", GEN_STAT(stats.tx_packets) },
+	{ "rx_bytes", GEN_STAT(stats.rx_bytes) },
+	{ "tx_bytes", GEN_STAT(stats.tx_bytes) },
+	{ "rx_errors", GEN_STAT(stats.rx_errors) },
+	{ "tx_errors", GEN_STAT(stats.tx_errors) },
+	{ "rx_dropped", GEN_STAT(stats.rx_dropped) },
+	{ "tx_dropped", GEN_STAT(stats.tx_dropped) },
+	{ "good_octets_received", GEN_STAT(mib_counters.good_octets_received) },
+	{ "bad_octets_received", GEN_STAT(mib_counters.bad_octets_received) },
+	{ "internal_mac_transmit_err",
+	  GEN_STAT(mib_counters.internal_mac_transmit_err) },
+	{ "good_frames_received", GEN_STAT(mib_counters.good_frames_received) },
+	{ "bad_frames_received", GEN_STAT(mib_counters.bad_frames_received) },
+	{ "broadcast_frames_received",
+	  GEN_STAT(mib_counters.broadcast_frames_received) },
+	{ "multicast_frames_received",
+	  GEN_STAT(mib_counters.multicast_frames_received) },
+	{ "frames_64_octets", GEN_STAT(mib_counters.frames_64_octets) },
+	{ "frames_65_to_127_octets",
+	  GEN_STAT(mib_counters.frames_65_to_127_octets) },
+	{ "frames_128_to_255_octets",
+	  GEN_STAT(mib_counters.frames_128_to_255_octets) },
+	{ "frames_256_to_511_octets",
+	  GEN_STAT(mib_counters.frames_256_to_511_octets) },
+	{ "frames_512_to_1023_octets",
+	  GEN_STAT(mib_counters.frames_512_to_1023_octets) },
+	{ "frames_1024_to_max_octets",
+	  GEN_STAT(mib_counters.frames_1024_to_max_octets) },
+	{ "good_octets_sent", GEN_STAT(mib_counters.good_octets_sent) },
+	{ "good_frames_sent", GEN_STAT(mib_counters.good_frames_sent) },
+	{ "excessive_collision", GEN_STAT(mib_counters.excessive_collision) },
+	{ "multicast_frames_sent",
+	  GEN_STAT(mib_counters.multicast_frames_sent) },
+	{ "broadcast_frames_sent",
+	  GEN_STAT(mib_counters.broadcast_frames_sent) },
+	{ "unrec_mac_control_received",
+	  GEN_STAT(mib_counters.unrec_mac_control_received) },
+	{ "fc_sent", GEN_STAT(mib_counters.fc_sent) },
+	{ "good_fc_received", GEN_STAT(mib_counters.good_fc_received) },
+	{ "bad_fc_received", GEN_STAT(mib_counters.bad_fc_received) },
+	{ "undersize_received", GEN_STAT(mib_counters.undersize_received) },
+	{ "fragments_received", GEN_STAT(mib_counters.fragments_received) },
+	{ "oversize_received", GEN_STAT(mib_counters.oversize_received) },
+	{ "jabber_received", GEN_STAT(mib_counters.jabber_received) },
+	{ "mac_receive_error", GEN_STAT(mib_counters.mac_receive_error) },
+	{ "bad_crc_event", GEN_STAT(mib_counters.bad_crc_event) },
+	{ "collision", GEN_STAT(mib_counters.collision) },
+	{ "late_collision", GEN_STAT(mib_counters.late_collision) },
+};
+
+#define MV88FXX81_STATS_LEN	\
+	sizeof(mv88fxx81_gstrings_stats) / sizeof(struct mv88fxx81_stats)
+
+static void mv88fxx81_eth_get_drvinfo(struct net_device *netdev,
+				      struct ethtool_drvinfo *drvinfo)
+{
+	strncpy(drvinfo->driver, mv88fxx81_driver_name, 32);
+	strncpy(drvinfo->version, mv88fxx81_driver_version, 32);
+	strncpy(drvinfo->fw_version, "N/A", 32);
+	strncpy(drvinfo->bus_info, "mv88fxx81", 32);
+	drvinfo->n_stats = MV88FXX81_STATS_LEN;
+}
+
+static int mv88fxx81_eth_get_stats_count(struct net_device *netdev)
+{
+	return MV88FXX81_STATS_LEN;
+}
+
+static void mv88fxx81_eth_get_strings(struct net_device *netdev,
+				      uint32_t stringset, uint8_t *data)
+{
+	int i;
+
+	switch (stringset) {
+	case ETH_SS_STATS:
+		for (i=0; i < MV88FXX81_STATS_LEN; i++) {
+			memcpy(data + i * ETH_GSTRING_LEN,
+			       mv88fxx81_gstrings_stats[i].stat_string,
+			       ETH_GSTRING_LEN);
+		}
+		break;
+	}
+}
+
+static void mv88fxx81_eth_get_ethtool_stats(struct net_device *netdev,
+					    struct ethtool_stats *stats,
+					    uint64_t *data)
+{
+	struct mv88fxx81_private *priv;
+	int i;
+
+	priv = netdev->priv;
+	eth_update_mib_counters(priv);
+
+	for (i = 0; i < MV88FXX81_STATS_LEN; i++) {
+		char *p = (char *)priv + mv88fxx81_gstrings_stats[i].stat_offset;
+		data[i] = (mv88fxx81_gstrings_stats[i].sizeof_stat ==
+			   sizeof(uint64_t)) ? *(uint64_t *)p : *(uint32_t *)p;
+	}
+}
+
+static u32 mv88fxx81_eth_get_link(struct net_device *dev)
+{
+	struct mv88fxx81_private *priv;
+	int res;
+
+	priv = netdev_priv(dev);
+	if (priv->ignore_phy) {
+		/* no phy, no way to fetch link status */
+		return netif_running(dev) ? 1 : 0;
+	}
+
+	mutex_lock(&priv->mii_mutex);
+	res = mii_link_ok(&priv->mii);
+	mutex_unlock(&priv->mii_mutex);
+
+	return res;
+}
+
+static int mv88fxx81_eth_nway_restart(struct net_device *dev)
+{
+	struct mv88fxx81_private *priv;
+	int res;
+
+	priv = netdev_priv(dev);
+
+	if (priv->ignore_phy) {
+		/* makes no sense */
+		return -ENODEV;
+	}
+
+	mutex_lock(&priv->mii_mutex);
+	res = mii_nway_restart(&priv->mii);
+	mutex_unlock(&priv->mii_mutex);
+
+	return res;
+}
+
+static int mv88fxx81_eth_get_coalesce(struct net_device *dev,
+				      struct ethtool_coalesce *ec)
+{
+	struct mv88fxx81_private *priv;
+
+	priv = netdev_priv(dev);
+	ec->rx_coalesce_usecs = priv->rx_coal_usec;
+	ec->tx_coalesce_usecs = priv->tx_coal_usec;
+	ec->rx_max_coalesced_frames = 0;
+	ec->tx_max_coalesced_frames = 0;
+
+	return 0;
+}
+
+static int mv88fxx81_eth_set_coalesce(struct net_device *dev,
+				      struct ethtool_coalesce *ec)
+{
+	struct mv88fxx81_private *priv;
+	unsigned int rx_coal, tx_coal;
+
+	priv = netdev_priv(dev);
+
+	if ((ec->rx_max_coalesced_frames > 0) ||
+	    (ec->tx_max_coalesced_frames > 0) ||
+	    (ec->rx_coalesce_usecs_irq != 0) ||
+	    (ec->tx_coalesce_usecs_irq != 0) ||
+	    (ec->rx_max_coalesced_frames_irq != 0) ||
+	    (ec->tx_max_coalesced_frames_irq != 0))
+		return -EINVAL;
+
+	/* sanity check on value */
+	rx_coal = ((tclk_get_rate() / 1000000) * ec->rx_coalesce_usecs) / 64;
+	if (rx_coal > 0x3fff)
+		return -EINVAL;
+
+	tx_coal = ((tclk_get_rate() / 1000000) * ec->tx_coalesce_usecs) / 64;
+	if (tx_coal > 0x3fff)
+		return -EINVAL;
+
+	/* ok set */
+	priv->rx_coal_usec = ec->rx_coalesce_usecs;
+	priv->tx_coal_usec = ec->tx_coalesce_usecs;
+
+	mv88fxx81_eth_apply_coal(priv);
+
+	return 0;
+}
+
+static void mv88fxx81_eth_get_ringparam(struct net_device *dev,
+					struct ethtool_ringparam *ering)
+{
+	struct mv88fxx81_private *priv;
+
+	priv = netdev_priv(dev);
+
+	/* rx/tx ring is actually only limited by memory */
+	ering->rx_max_pending = 8192;
+	ering->tx_max_pending = 8192;
+	ering->rx_mini_max_pending = 0;
+	ering->rx_jumbo_max_pending = 0;
+
+	ering->rx_pending = priv->rx_ring_size;
+	ering->tx_pending = priv->tx_ring_size;
+}
+
+static int mv88fxx81_eth_set_ringparam(struct net_device *dev,
+				       struct ethtool_ringparam *ering)
+{
+	struct mv88fxx81_private *priv;
+
+	priv = netdev_priv(dev);
+	if (netif_running(dev))
+		mv88fxx81_eth_stop(dev);
+
+	priv->rx_ring_size = ering->rx_pending;
+	priv->tx_ring_size = ering->tx_pending;
+
+	if (netif_running(dev)) {
+		int err = mv88fxx81_eth_open(dev);
+		if (err)
+			dev_close(dev);
+		else
+			mv88fxx81_eth_set_multicast_list(dev);
+	}
+
+	return 0;
+}
+
+static int mv88fxx81_eth_do_ioctl(struct net_device *dev, struct ifreq *ifr,
+				  int cmd)
+{
+	struct mv88fxx81_private *priv;
+	int res;
+
+	priv = netdev_priv(dev);
+
+	mutex_lock(&priv->mii_mutex);
+	res = generic_mii_ioctl(&priv->mii, if_mii(ifr), cmd, NULL);
+	mutex_unlock(&priv->mii_mutex);
+
+	return res;
+}
+
+
+static struct ethtool_ops mv88fxx81_ethtool_ops = {
+	.get_settings           = mv88fxx81_eth_get_settings,
+	.set_settings           = mv88fxx81_eth_set_settings,
+	.get_drvinfo            = mv88fxx81_eth_get_drvinfo,
+	.get_link               = mv88fxx81_eth_get_link,
+	.get_strings            = mv88fxx81_eth_get_strings,
+	.get_stats_count        = mv88fxx81_eth_get_stats_count,
+	.get_ethtool_stats      = mv88fxx81_eth_get_ethtool_stats,
+	.get_coalesce		= mv88fxx81_eth_get_coalesce,
+	.set_coalesce		= mv88fxx81_eth_set_coalesce,
+	.get_ringparam		= mv88fxx81_eth_get_ringparam,
+	.set_ringparam		= mv88fxx81_eth_set_ringparam,
+	.nway_reset		= mv88fxx81_eth_nway_restart,
+};
+
+/*
+ * device probing callback
+ */
+static int mv88fxx81_eth_probe(struct platform_device *pdev)
+{
+	struct mv88fxx81_eth_platform_data *pd;
+	struct net_device *dev;
+	struct resource *res;
+	struct mv88fxx81_private *priv;
+	struct ethtool_cmd cmd;
+	int ret;
+
+	printk(KERN_NOTICE PFX "MV88fxx81 Ethernet Driver\n");
+	priv = NULL;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (res == NULL)
+		return -ENODEV;
+
+	dev = alloc_etherdev(sizeof (struct mv88fxx81_private));
+	if (!dev)
+		return -ENOMEM;
+
+	platform_set_drvdata(pdev, dev);
+	priv = netdev_priv(dev);
+
+	/* remap iomem */
+	priv->base = ioremap_nocache(res->start, res->end - res->start);
+	if (priv->base == NULL) {
+		ret = -ENOMEM;
+		goto error;
+	}
+
+	/* allocate small area of memory where the chip direct data in
+	 * case of address decoding error */
+	priv->bad_addr_area = kmalloc(BAD_ADDR_AREA_SIZE, GFP_KERNEL);
+	if (!priv->bad_addr_area) {
+		ret = -ENOMEM;
+		goto error;
+	}
+	memset(priv->bad_addr_area, 0, BAD_ADDR_AREA_SIZE);
+	priv->bad_addr_area_hw =
+		dma_map_single(NULL, priv->bad_addr_area,
+			       BAD_ADDR_AREA_SIZE, DMA_FROM_DEVICE);
+
+
+	mv_eth_write(priv, MV_ETH_UNIT_EUDA_REG, priv->bad_addr_area_hw);
+	platform_set_drvdata(pdev, dev);
+
+	/* check we have an irq associated */
+	res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+	BUG_ON(!res);
+	dev->irq = res->start;
+
+	res = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
+	BUG_ON(!res);
+	priv->err_irq = res->start;
+
+	/* fill netdevice callbacks */
+	dev->open = mv88fxx81_eth_open;
+	dev->stop = mv88fxx81_eth_stop;
+	dev->hard_start_xmit = mv88fxx81_eth_xmit;
+	dev->get_stats = mv88fxx81_eth_get_stats;
+	dev->set_mac_address = mv88fxx81_eth_set_mac_address;
+	dev->set_multicast_list = mv88fxx81_eth_set_multicast_list;
+	dev->do_ioctl = mv88fxx81_eth_do_ioctl;
+
+	dev->poll = mv88fxx81_eth_poll;
+	dev->weight = 64;
+
+	SET_ETHTOOL_OPS(dev, &mv88fxx81_ethtool_ops);
+
+	SET_MODULE_OWNER(dev);
+	SET_NETDEV_DEV(dev, &pdev->dev);
+
+	/* Hook up MII support for ethtool */
+	priv->mii.dev = dev;
+	priv->mii.mdio_read = mv88fxx81_mdio_read;
+	priv->mii.mdio_write = mv88fxx81_mdio_write;
+	priv->mii.phy_id = 0x1;
+	priv->mii.phy_id_mask = 0x3f;
+	priv->mii.reg_num_mask = 0x1f;
+
+	/* configure to use a phy with autoneg on */
+	memset(&cmd, 0, sizeof (cmd));
+	priv->ignore_phy = 0;
+	cmd.port = PORT_MII;
+	cmd.transceiver = XCVR_INTERNAL;
+	cmd.phy_address = priv->mii.phy_id;
+	cmd.autoneg = AUTONEG_ENABLE;
+	/* mii lib  checks sanity  of speed, but  doesn't use  it when
+	 * autoneg is set to  AUTONEG_ENABLE, so we need to initialize
+	 * it */
+	cmd.speed = SPEED_100;
+	cmd.advertising = ADVERTISED_10baseT_Half  |
+		ADVERTISED_10baseT_Full |
+		ADVERTISED_100baseT_Half |
+		ADVERTISED_100baseT_Full;
+
+	/* if platform data given, overwrite default settings */
+	pd = pdev->dev.platform_data;
+	if (pd) {
+		memcpy(dev->dev_addr, pd->mac_addr, 6);
+		priv->mii.phy_id = pd->default_param.phy_address;
+		priv->ignore_phy = pd->ignore_phy;
+		priv->disable_gmii = pd->disable_gmii;
+		memcpy(&cmd, &pd->default_param, sizeof (cmd));
+		if (pd->mii_init) {
+			/* call platform specific switch
+			   initialization function */
+			struct switch_ops ops;
+
+			ops.dev = dev;
+			ops.mdio_read = mv88fxx81_mdio_read;
+			ops.mdio_write = mv88fxx81_mdio_write;
+
+			pd->mii_init(&ops);
+		}
+	}
+
+	/* init mutex for all mii / pscr access */
+	mutex_init(&priv->mii_mutex);
+
+	if (!priv->ignore_phy) {
+		/* a phy is used, try to probe it  */
+		ret = ethernet_phy_detect(priv);
+		if (ret) {
+			printk(KERN_ERR PFX "no PHY detected at addr %d\n",
+			       priv->mii.phy_id);
+			goto error;
+		}
+
+		/* set mac phy id register to the right value */
+		mv88fxx81_eth_update_phy_addr(priv, priv->mii.phy_id);
+
+		/* reset the phy */
+		ethernet_phy_reset(priv);
+
+		if (!priv->disable_gmii) {
+			/* check if the phy support 1000baseT operation */
+			priv->mii.supports_gmii =
+				mii_check_gmii_support(&priv->mii);
+			if (priv->mii.supports_gmii)
+				cmd.advertising |= ADVERTISED_1000baseT_Full;
+		}
+	}
+
+	/* update port configuration */
+	mv88fxx81_eth_update_pscr(dev, &cmd);
+	if (!priv->ignore_phy) {
+		/* set phy settings */
+		mv88fxx81_eth_set_settings(dev, &cmd);
+	}
+
+	/* init rx timeout */
+	memset(&priv->rx_timeout, 0, sizeof (struct timer_list));
+	priv->rx_timeout.function = mv88fxx81_eth_rx_refill_descs_timer_wrapper;
+	priv->rx_timeout.data = (unsigned long)dev;
+
+	/* init the link check work */
+	INIT_WORK(&priv->link_update_task, mv88fxx81_eth_handle_link_change);
+
+	spin_lock_init(&priv->tx_lock);
+
+	/* set default values */
+	priv->rx_coal_usec = DEF_RX_COAL_US;
+	priv->tx_coal_usec = DEF_TX_COAL_US;
+	priv->rx_ring_size = DEF_RX_RING_SIZE;
+	priv->tx_ring_size = DEF_TX_RING_SIZE;
+
+	/* try to register device */
+	ret = register_netdev(dev);
+	if (ret)
+		goto error;
+
+
+	return 0;
+
+error:
+	if (priv) {
+		if (priv->base)
+			iounmap(priv->base);
+
+		if (priv->bad_addr_area) {
+			dma_unmap_single(NULL, priv->bad_addr_area_hw,
+					 BAD_ADDR_AREA_SIZE, DMA_FROM_DEVICE);
+			kfree(priv->bad_addr_area);
+		}
+	}
+	if (dev) {
+		platform_set_drvdata(pdev, NULL);
+		free_netdev(dev);
+	}
+	return ret;
+}
+
+static int mv88fxx81_eth_remove(struct platform_device *pdev)
+{
+	struct net_device *dev;
+	struct mv88fxx81_private *priv;
+
+	dev = platform_get_drvdata(pdev);
+	priv = netdev_priv(dev);
+
+	dma_unmap_single(NULL, priv->bad_addr_area_hw,
+			 BAD_ADDR_AREA_SIZE, DMA_FROM_DEVICE);
+	kfree(priv->bad_addr_area);
+
+	iounmap(priv->base);
+	free_netdev(dev);
+	platform_set_drvdata(pdev, NULL);
+
+	return 0;
+}
+
+
+/*
+ * platform driver declaration/registration
+ */
+static struct platform_driver mv88fxx81_eth_driver = {
+	.probe = mv88fxx81_eth_probe,
+	.remove = mv88fxx81_eth_remove,
+	.driver = {
+		.name = "mv88fxx81_eth",
+	},
+};
+
+int __init mv88fxx81_eth_init(void)
+{
+	return platform_driver_register(&mv88fxx81_eth_driver);
+}
+
+void __exit mv88fxx81_eth_exit(void)
+{
+	platform_driver_unregister(&mv88fxx81_eth_driver);
+}
+
+module_init(mv88fxx81_eth_init);
+module_exit(mv88fxx81_eth_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Maxime Bizon <mbizon@freebox.fr>")
+MODULE_DESCRIPTION("Ethernet driver for Marvell mv88fxx81");
--- /dev/null	2011-06-03 14:51:38.633053002 +0200
+++ linux-2.6.20.14-fbx/drivers/net/mv88fxx81_eth.h	2010-12-29 19:30:07.131445252 +0100
@@ -0,0 +1,353 @@
+
+#ifndef __MV88FXX81_H
+#define __MV88FXX81_H
+
+#include <linux/types.h>
+#include <linux/mii.h>
+#include <linux/mutex.h>
+
+#include <asm/arch/devices.h>
+
+/*
+ * various const you may adjust
+ */
+#define DEF_RX_RING_SIZE	400
+#define DEF_TX_RING_SIZE	512
+#define DEF_RX_COAL_US		800
+#define DEF_TX_COAL_US		800
+
+#define ETH_RX_SKB_SIZE		(1600)	/* FIXME */
+
+#define MV88FXX81_MAX_SKB_RECYCLE	256
+
+
+/*
+ * various const you may _not_ change
+ */
+#define RX_DMA_DESC_ALIGN	16
+#define TX_DMA_DESC_ALIGN	16
+#define ETH_DMA_ALIGN		8
+
+#define PHY_WAIT_ITERATIONS	1000
+#define PHY_WAIT_MICRO_SECONDS	10
+
+#define MV_ETH_INT_RXMASK	(MV_ETH_INT_RX_BUFFER_0 | \
+				MV_ETH_INT_RX_NOBUFFER_0)
+
+#define MV_ETH_INT_MASK		(MV_ETH_INT_RXMASK | \
+				MV_ETH_INT_EXTEND)
+
+
+#define MV_ETH_INT_TXMASK	(MV_ETH_INTEXT_TX_BUFFER)
+
+#define MV_ETH_INT_MASK_EXT	(MV_ETH_INT_TXMASK | \
+				 MV_ETH_INTEXT_PHYSTC)
+
+#define MV_ETH_UNIT_INT_MASK	(MV_ETH_UNIT_INT_ADDR_VIOLATION | \
+				 MV_ETH_UNIT_INT_ADDR_NOMATCH)
+
+#define BAD_ADDR_AREA_SIZE	4096
+
+/*
+ * Dma descriptor
+ */
+
+/* Tx & Rx descriptors status */
+#define ETH_ERROR_SUMMARY			0x00000001
+
+/* Tx & Rx descriptors command */
+#define ETH_BUFFER_OWNED_BY_DMA			0x80000000
+
+/* Tx descriptors status */
+#define ETH_LC_ERROR				0
+#define ETH_UR_ERROR				0x00000002
+#define ETH_RL_ERROR				0x00000004
+#define ETH_LLC_SNAP_FORMAT			0x00000200
+
+/* Rx descriptors status */
+#define ETH_OVERRUN_ERROR			0x00000002
+#define ETH_MAX_FRAME_LENGTH_ERROR		0x00000004
+#define ETH_RESOURCE_ERROR			0x00000006
+#define ETH_VLAN_TAGGED				0x00080000
+#define ETH_BPDU_FRAME				0x00100000
+#define ETH_UDP_FRAME_OVER_IP_V_4		0x00200000
+#define ETH_OTHER_FRAME_TYPE			0x00400000
+#define ETH_LAYER_2_IS_ETH_V_2			0x00800000
+#define ETH_FRAME_TYPE_IP_V_4			0x01000000
+#define ETH_FRAME_HEADER_OK			0x02000000
+#define ETH_RX_LAST_DESC			0x04000000
+#define ETH_RX_FIRST_DESC			0x08000000
+#define ETH_UNKNOWN_DESTINATION_ADDR		0x10000000
+#define ETH_RX_ENABLE_INTERRUPT			0x20000000
+#define ETH_LAYER_4_CHECKSUM_OK			0x40000000
+
+/* Rx descriptors byte count */
+#define ETH_FRAME_FRAGMENTED			0x00000004
+
+/* Tx descriptors command */
+#define ETH_LAYER_4_CHECKSUM_FIRST_DESC		0x00000400
+#define ETH_FRAME_SET_TO_VLAN			0x00008000
+#define ETH_UDP_FRAME				0x00010000
+#define ETH_GEN_TCP_UDP_CHECKSUM		0x00020000
+#define ETH_GEN_IP_V_4_CHECKSUM			0x00040000
+#define ETH_ZERO_PADDING			0x00080000
+#define ETH_TX_LAST_DESC			0x00100000
+#define ETH_TX_FIRST_DESC			0x00200000
+#define ETH_GEN_CRC				0x00400000
+#define ETH_TX_ENABLE_INTERRUPT			0x00800000
+#define ETH_AUTO_MODE				0x40000000
+
+#define ETH_TX_IHL_SHIFT			11
+
+
+/*
+ * Dma descriptors
+ */
+#if defined(__BIG_ENDIAN)
+struct eth_rx_desc {
+	u16 byte_cnt;		/* Descriptor buffer byte count		*/
+	u16 buf_size;		/* Buffer size				*/
+	u32 cmd_sts;		/* Descriptor command status		*/
+	u32 next_desc_ptr;	/* Next descriptor pointer		*/
+	u32 buf_ptr;		/* Descriptor buffer pointer		*/
+};
+
+struct eth_tx_desc {
+	u16 byte_cnt;		/* buffer byte count			*/
+	u16 l4i_chk;		/* CPU provided TCP checksum		*/
+	u32 cmd_sts;		/* Command/status field			*/
+	u32 next_desc_ptr;	/* Pointer to next descriptor		*/
+	u32 buf_ptr;		/* pointer to buffer for this descriptor*/
+};
+
+#elif defined(__LITTLE_ENDIAN)
+struct eth_rx_desc {
+	u32 cmd_sts;		/* Descriptor command status		*/
+	u16 buf_size;		/* Buffer size				*/
+	u16 byte_cnt;		/* Descriptor buffer byte count		*/
+	u32 buf_ptr;		/* Descriptor buffer pointer		*/
+	u32 next_desc_ptr;	/* Next descriptor pointer		*/
+};
+
+struct eth_tx_desc {
+	u32 cmd_sts;		/* Command/status field			*/
+	u16 l4i_chk;		/* CPU provided TCP checksum		*/
+	u16 byte_cnt;		/* buffer byte count			*/
+	u32 buf_ptr;		/* pointer to buffer for this descriptor*/
+	u32 next_desc_ptr;	/* Pointer to next descriptor		*/
+};
+#else
+#error One of __BIG_ENDIAN or __LITTLE_ENDIAN must be defined
+#endif
+
+
+/*
+ * Ethernet port specific infomation
+ */
+
+/* MIB Counters register definitions */
+#define ETH_MIB_GOOD_OCTETS_RECEIVED_LOW	0x0
+#define ETH_MIB_GOOD_OCTETS_RECEIVED_HIGH	0x4
+#define ETH_MIB_BAD_OCTETS_RECEIVED		0x8
+#define ETH_MIB_INTERNAL_MAC_TRANSMIT_ERR	0xc
+#define ETH_MIB_GOOD_FRAMES_RECEIVED		0x10
+#define ETH_MIB_BAD_FRAMES_RECEIVED		0x14
+#define ETH_MIB_BROADCAST_FRAMES_RECEIVED	0x18
+#define ETH_MIB_MULTICAST_FRAMES_RECEIVED	0x1c
+#define ETH_MIB_FRAMES_64_OCTETS		0x20
+#define ETH_MIB_FRAMES_65_TO_127_OCTETS		0x24
+#define ETH_MIB_FRAMES_128_TO_255_OCTETS	0x28
+#define ETH_MIB_FRAMES_256_TO_511_OCTETS	0x2c
+#define ETH_MIB_FRAMES_512_TO_1023_OCTETS	0x30
+#define ETH_MIB_FRAMES_1024_TO_MAX_OCTETS	0x34
+#define ETH_MIB_GOOD_OCTETS_SENT_LOW		0x38
+#define ETH_MIB_GOOD_OCTETS_SENT_HIGH		0x3c
+#define ETH_MIB_GOOD_FRAMES_SENT		0x40
+#define ETH_MIB_EXCESSIVE_COLLISION		0x44
+#define ETH_MIB_MULTICAST_FRAMES_SENT		0x48
+#define ETH_MIB_BROADCAST_FRAMES_SENT		0x4c
+#define ETH_MIB_UNREC_MAC_CONTROL_RECEIVED	0x50
+#define ETH_MIB_FC_SENT				0x54
+#define ETH_MIB_GOOD_FC_RECEIVED		0x58
+#define ETH_MIB_BAD_FC_RECEIVED			0x5c
+#define ETH_MIB_UNDERSIZE_RECEIVED		0x60
+#define ETH_MIB_FRAGMENTS_RECEIVED		0x64
+#define ETH_MIB_OVERSIZE_RECEIVED		0x68
+#define ETH_MIB_JABBER_RECEIVED			0x6c
+#define ETH_MIB_MAC_RECEIVE_ERROR		0x70
+#define ETH_MIB_BAD_CRC_EVENT			0x74
+#define ETH_MIB_COLLISION			0x78
+#define ETH_MIB_LATE_COLLISION			0x7c
+
+struct mv88fxx81_mib_counters {
+	u64 good_octets_received;
+	u32 bad_octets_received;
+	u32 internal_mac_transmit_err;
+	u32 good_frames_received;
+	u32 bad_frames_received;
+	u32 broadcast_frames_received;
+	u32 multicast_frames_received;
+	u32 frames_64_octets;
+	u32 frames_65_to_127_octets;
+	u32 frames_128_to_255_octets;
+	u32 frames_256_to_511_octets;
+	u32 frames_512_to_1023_octets;
+	u32 frames_1024_to_max_octets;
+	u64 good_octets_sent;
+	u32 good_frames_sent;
+	u32 excessive_collision;
+	u32 multicast_frames_sent;
+	u32 broadcast_frames_sent;
+	u32 unrec_mac_control_received;
+	u32 fc_sent;
+	u32 good_fc_received;
+	u32 bad_fc_received;
+	u32 undersize_received;
+	u32 fragments_received;
+	u32 oversize_received;
+	u32 jabber_received;
+	u32 mac_receive_error;
+	u32 bad_crc_event;
+	u32 collision;
+	u32 late_collision;
+};
+
+
+struct mv88fxx81_private {
+	/* port number of this interface */
+	unsigned int port_num;
+
+	/* base remapped address of device */
+	void __iomem *base;
+
+	/* in  case of address  decoding error,  the chip  will direct
+	 * data into this memory area */
+	u8 *bad_addr_area;
+
+	/* hw view of bad_address area */
+	dma_addr_t bad_addr_area_hw;
+
+	/* irq for ethernet unit error  */
+	unsigned int err_irq;
+
+	/*
+	 * RX related
+	 */
+
+	/* size of rx queue */
+	unsigned int rx_ring_size;
+
+	/* cpu view of dma descriptor (original & aligned) */
+	struct eth_rx_desc *p_rx_desc_area_alloc;
+	struct eth_rx_desc *p_rx_desc_area;
+
+	/* hw view of dma descritor */
+	dma_addr_t rx_desc_dma;
+
+	/* allocated size for rx dma queue */
+	int rx_desc_area_alloc_size;
+
+	/* list of skb given to hw */
+	struct sk_buff **rx_skb;
+
+	/* current number of desc available */
+	int rx_desc_count;
+
+	/* next rx descriptor to fetch from hardware */
+	int rx_curr_desc;
+
+	/* next rx descriptor to refill */
+	int rx_used_desc;
+
+	/*
+	 * Used in case RX Ring is empty, which can be caused when
+	 * system does not have resources (skb's)
+	 */
+	struct timer_list rx_timeout;
+
+	/* rx interrupt delay */
+	unsigned int rx_coal_usec;
+
+#ifdef CONFIG_SKB_RECYCLE
+	/* keep recycled skb here */
+	struct sk_buff_head skb_recycle_queue;
+#endif
+
+	/*
+	 * TX related
+	 */
+
+	/* size of tx queue */
+	unsigned int tx_ring_size;
+
+	/* cpu view of dma descriptor (original & aligned) */
+	struct eth_tx_desc *p_tx_desc_area_alloc;
+	struct eth_tx_desc *p_tx_desc_area;
+
+	/* hw view of dma descritor */
+	dma_addr_t tx_desc_dma;
+
+	/* allocated size for tx dma desc */
+	int tx_desc_area_alloc_size;
+
+	/* list of skb given to hw */
+	struct sk_buff **tx_skb;
+
+	/* used to lock start_xmit against tx lock */
+	spinlock_t tx_lock;
+
+	/* number of tx descriptor avaiable */
+	int tx_desc_count;
+
+	/* next tx descriptor avaiable */
+	int tx_curr_desc;
+
+	/* next tx descriptor to reclaim */
+	int tx_used_desc;
+
+	/* tx interrupt delay */
+	unsigned int tx_coal_usec;
+
+
+	/*
+	 * MII related
+	 */
+	struct mii_if_info mii;
+
+	/* set if no  phy is connected, so that  we manually force the
+	 * link status */
+	int ignore_phy;
+
+	/* set to disable autodetection of gigabit phy */
+	int disable_gmii;
+
+	/* all SMI and PSCR access are serialized through this mutex  */
+	struct mutex mii_mutex;
+
+	/* after link interrupt, mac  registers update is done in this
+	 * task */
+	struct work_struct link_update_task;
+
+	/* stats stuff */
+	struct net_device_stats stats;
+	struct mv88fxx81_mib_counters mib_counters;
+
+
+};
+
+/*
+ * io helpers
+ */
+static inline u32 mv_eth_read(struct mv88fxx81_private *priv,
+			      unsigned int offset)
+{
+	return readl(priv->base + offset - MV_ETH_REGS_BASE);
+}
+
+static inline void mv_eth_write(struct mv88fxx81_private *priv,
+				unsigned int offset, u32 data)
+{
+	writel(data, priv->base + offset - MV_ETH_REGS_BASE);
+}
+
+#endif /* __MV88FXX81_H */
--- /dev/null	2011-06-03 14:51:38.633053002 +0200
+++ linux-2.6.20.14-fbx/drivers/net/tango2_pcinet_d.c	2010-12-29 19:30:07.191437333 +0100
@@ -0,0 +1,915 @@
+/*
+ * Network device over PCI shared  memory driver.  This driver runs on
+ * remote  side  and  access  exported  memory  from  SMP863x.   Linux
+ * interface is a network device (pci%d).
+ *
+ * This is a point to point network device.
+ *
+ * Copyright (C) 2007 Maxime Bizon <mbizon@freebox.fr>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/if_arp.h>
+#include <linux/fbxdmamux.h>
+
+#include <asm/io.h>
+
+#include "tango2_pcinet.h"
+#include "tango2_pcinet_d.h"
+
+#ifdef CONFIG_BOARD_FBXO1_A
+#include <asm/arch/io.h>
+#endif
+
+#define PFX	"tango2_pcinet_d: "
+
+/*
+ * SMP863x sets up a special subvendor/subdevice when memory export is
+ * set up and ready.
+ */
+static struct pci_device_id tango2_pcinet_id_table[] = {
+	{ TANGO2_PCINET_VENDOR, TANGO2_PCINET_DEVICE,
+	  TANGO2_PCINET_SUBVENDOR, TANGO2_PCINET_SUBDEVICE, 0, 0, 0UL },
+	{ 0, }
+};
+
+MODULE_DEVICE_TABLE(pci, tango2_pcinet_id_table);
+
+/*
+ * implement shared mutex over PCI using Peterson's algorithm
+ */
+static int pcinet_mutex_lock(struct tango2_pcinet_d_priv *priv,
+			     int sleep_ok, int timeout_us)
+{
+	struct tango2_pcinet_regs *regs;
+
+	/* try to acquire lock */
+	regs = priv->regs;
+	regs->lock_agent = 1;
+	wmb();
+	regs->lock_turn = 1;
+	mb();
+
+	while (regs->lock_smp863x) {
+		rmb();
+		if (regs->lock_turn == 0)
+			break;
+
+		if (sleep_ok)
+			msleep(1);
+		else {
+			/* not needed  for the lock  itself, it's just
+			 * to  avoid  using  gettimeofday  to  do  the
+			 * timeout */
+			udelay(1);
+		}
+
+		if (timeout_us <= 0) {
+			regs->lock_agent = 0;
+			printk(KERN_ERR PFX "mutex lock timeout, ignoring\n");
+			return 1;
+		}
+		timeout_us -= sleep_ok ? 1000 : 1;
+	}
+
+	return 0;
+}
+
+/*
+ * disable pci device and memory/io decoder also
+ */
+static void pcinet_disable_device(struct tango2_pcinet_d_priv *priv)
+{
+	u16 pci_command;
+
+	pci_disable_device(priv->pdev);
+
+	pci_read_config_word(priv->pdev, PCI_COMMAND, &pci_command);
+	if (pci_command & (PCI_COMMAND_IO | PCI_COMMAND_MEMORY)) {
+		pci_command &= ~(PCI_COMMAND_MEMORY | PCI_COMMAND_IO);
+		pci_write_config_word(priv->pdev, PCI_COMMAND, pci_command);
+	}
+}
+
+static void pcinet_mutex_unlock(struct tango2_pcinet_d_priv *priv)
+{
+	priv->regs->lock_agent = 0;
+	wmb();
+}
+
+/*
+ * generate a remote interrupt
+ */
+static inline void pcinet_gen_interrupt(struct tango2_pcinet_d_priv *priv)
+{
+#ifdef CONFIG_BOARD_FBXO1_A
+	*((volatile unsigned int *)priv->irq_mem) = 1;
+#endif
+}
+
+/*
+ * check that given pci memory offset fall in pci memory area
+ */
+static int pcinet_pciaddr_check(struct tango2_pcinet_d_priv *priv,
+				uint32_t addr)
+{
+	if (addr >= priv->pci_mem_len) {
+		printk(KERN_ERR PFX "invalid pci address: %08x\n", addr);
+		return 1;
+	}
+	return 0;
+}
+
+/*
+ * convert pci address to cpu usable address
+ */
+static void *pcinet_pciaddr_to_cpu(struct tango2_pcinet_d_priv *priv,
+				   uint32_t addr)
+{
+	return (priv->pci_mem + addr);
+}
+
+#ifdef CONFIG_TANGO2_PCINET_D_DMAMUX
+/*
+ * convert pci address to cpu hw address
+ */
+static dma_addr_t pcinet_pciaddr_to_cpu_hw(struct tango2_pcinet_d_priv *priv,
+					   uint32_t addr)
+{
+	return (priv->pci_mem_hw + addr);
+}
+#endif
+
+/*
+ * apply link status change and log
+ */
+static inline void pcinet_link_change(struct net_device *dev, int status)
+{
+        if (status)
+                netif_carrier_on(dev);
+        else
+                netif_carrier_off(dev);
+        printk(KERN_INFO "%s: link is %s\n", dev->name,
+               status ? "UP" : "DOWN");
+}
+
+/*
+ * poll request called by network core, process rx ring, tx done
+ */
+static int pcinet_poll(struct net_device *dev, int *budget)
+{
+	struct tango2_pcinet_d_priv *priv;
+	struct tango2_pcinet_regs *regs;
+	int limit, received, i;
+
+	priv = netdev_priv(dev);
+	regs = priv->regs;
+
+again:
+	/* check for incoming mailbox message */
+	if (unlikely(regs->tx_msg_tail != regs->tx_msg_head)) {
+		uint32_t msg;
+
+		msg = le32_to_cpu(regs->tx_msg);
+		regs->tx_msg_tail = regs->tx_msg_head;
+
+		if (msg == MSG_STATE_CHANGE) {
+			uint32_t state;
+
+			/* lock mutex to access state data */
+			pcinet_mutex_lock(priv, 0, MUTEX_LOCK_TIMEOUT);
+			state = le32_to_cpu(regs->smp863x_state);
+			if (state == SMP863X_STATE_DEAD) {
+				/* oh my god */
+				pcinet_link_change(dev, 0);
+				priv->dead = 1;
+#ifdef CONFIG_TANGO2_PCINET_D_DMAMUX
+				fbxdmamux_flush_channel(priv->tx_chan_cookie);
+#endif
+				disable_irq(dev->irq);
+				netif_rx_complete(dev);
+				regs->agent_state = AGENT_STATE_DISCONNECTED;
+				wmb();
+				pcinet_mutex_unlock(priv);
+				pcinet_disable_device(priv);
+				return 0;
+			}
+			pcinet_mutex_unlock(priv);
+		}
+	}
+
+	/* check how many packet we can receive */
+	limit = *budget;
+	if (*budget > dev->quota)
+		limit = dev->quota;
+	received = 0;
+
+	/* try to bring tx tail to tx head */
+	for (i = le32_to_cpu(regs->tx_ring_tail);
+	     i != le32_to_cpu(regs->tx_ring_head);
+	     i = (i + 1) % priv->tx_ring_len) {
+		struct tango2_pci_desc *rx_desc;
+		struct sk_buff *skb;
+		unsigned int len, addr;
+		void *cpu_addr;
+
+		if (limit <= 0)
+			break;
+		--limit;
+		received++;
+
+		/* make sure we read tx_ring_head again */
+		rmb();
+
+		rx_desc = &priv->tx_ring[i];
+
+		len = le32_to_cpu(rx_desc->data_len);
+		if (unlikely(!len || len > RX_BUF_SIZE)) {
+			printk(KERN_ERR PFX "invalid rx len: %u\n", len);
+			priv->stats.rx_errors++;
+			continue;
+		}
+
+		addr = le32_to_cpu(rx_desc->buf_addr);
+		if (unlikely(pcinet_pciaddr_check(priv, addr))) {
+			printk(KERN_ERR PFX "invalid rx addr: %08x\n", addr);
+			priv->stats.rx_errors++;
+			continue;
+		}
+
+		skb = dev_alloc_skb(RX_BUF_SIZE);
+		if (unlikely(!skb)) {
+			priv->stats.rx_errors++;
+			priv->stats.rx_missed_errors++;
+			continue;
+		}
+
+		skb->dev = dev;
+		skb->protocol = htons(ETH_P_IP);
+		skb_put(skb, len);
+		cpu_addr = pcinet_pciaddr_to_cpu(priv, addr);
+		dma_sync_single_for_cpu(NULL, virt_to_dma(NULL, cpu_addr),
+					skb->len, DMA_FROM_DEVICE);
+		memcpy(skb->data, cpu_addr, len);
+
+		netif_receive_skb(skb);
+		priv->stats.rx_packets++;
+		priv->stats.rx_bytes += len;
+		dev->last_rx = jiffies;
+	}
+
+	regs->tx_ring_tail = cpu_to_le32(i);
+
+	dev->quota -= received;
+	*budget -= received;
+
+	if (limit <= 0) {
+		/* breaked, but there is still work to do */
+		return 1;
+	}
+
+	/* remove ourself from poll queue and reenable interrupt */
+	regs->h2d_rx_irq_enabled = 1;
+	mb();
+
+	netif_rx_complete(dev);
+
+	/* signal remote that we removed packet from rx queue if
+	 * interrupt is enabled */
+	if (received && regs->d2h_tx_done_irq_enabled) {
+		/* beware of posted write */
+		(void)readl(&regs->tx_ring_tail);
+		pcinet_gen_interrupt(priv);
+	}
+
+	/* might have been a tx done interrupt, check for stopped
+	 * queue */
+	if (unlikely(netif_queue_stopped(dev) &&
+		     priv->queue_stopped_cause == 0)) {
+		unsigned int head;
+
+#ifdef CONFIG_TANGO2_PCINET_D_DMAMUX
+		head = priv->local_rx_ring_head;
+#else
+		head = le32_to_cpu(regs->rx_ring_head);
+#endif
+		head = (head + 1) % priv->rx_ring_len;
+		if (head != le32_to_cpu(regs->rx_ring_tail)) {
+			/* okay queue is not full anymore */
+			regs->h2d_tx_done_irq_enabled = 0;
+			netif_wake_queue(dev);
+		}
+	}
+
+	/* we have  a race  here, new packets  may have  been inserted
+	 * between the time we scanned the list and enabled interrupt,
+	 * so rescan */
+	if (regs->tx_ring_tail != regs->tx_ring_head ||
+	    regs->tx_msg_tail != regs->tx_msg_head) {
+		if (netif_rx_reschedule(dev, received)) {
+			regs->h2d_rx_irq_enabled = 0;
+			wmb();
+			goto again;
+		}
+	}
+
+	return 0;
+}
+
+/*
+ * interrupt request from agent, can be a new buffer in rx, or a tx
+ * completion.
+ */
+static irqreturn_t pcinet_isr(int irq, void *dev_id)
+{
+	struct net_device *dev;
+	struct tango2_pcinet_d_priv *priv;
+
+	dev = (struct net_device *)dev_id;
+	priv = netdev_priv(dev);
+
+	/* add device to poll list to process all work */
+	if (netif_rx_schedule_prep(dev)) {
+		/* disable remote interrupt */
+		priv->regs->h2d_rx_irq_enabled = 0;
+		__netif_rx_schedule(dev);
+	}
+
+	return IRQ_HANDLED;
+}
+
+#ifdef CONFIG_TANGO2_PCINET_D_DMAMUX
+/*
+ * tx dma completion callback
+ */
+static void pcinet_tx_dmadone(void *cb_data, int error)
+{
+	struct net_device *dev;
+	struct tango2_pcinet_d_priv *priv;
+	struct tango2_pcinet_regs *regs;
+	struct sk_buff *skb;
+	uint32_t head;
+
+	dev = (struct net_device *)cb_data;
+	priv = netdev_priv(dev);
+	regs = priv->regs;
+
+	skb = __skb_dequeue(&priv->tx_skb_in_progress);
+	if (unlikely(!skb)) {
+		printk(KERN_ERR PFX "bug: no skb in tx queue after dma\n");
+		return;
+	}
+
+	if (priv->dead) {
+		dev_kfree_skb(skb);
+		return;
+	}
+
+	if (error) {
+		priv->stats.tx_dropped++;
+		priv->stats.tx_aborted_errors++;
+	} else {
+		priv->stats.tx_packets++;
+		priv->stats.tx_bytes += skb->len;
+	}
+
+	/* dma completion is strictly ordered, so we just read the
+	 * current value of head and increment it */
+	head = le32_to_cpu(regs->rx_ring_head);
+	head = (head + 1) % priv->rx_ring_len;
+	regs->rx_ring_head = cpu_to_le32(head);
+	mb();
+	(void)readl(&regs->rx_ring_head);
+
+	priv->stats.tx_packets++;
+	priv->stats.tx_bytes += skb->len;
+
+	/* signal remote if interrupt is enabled */
+	if (regs->d2h_tx_done_irq_enabled)
+		pcinet_gen_interrupt(priv);
+
+	dev_kfree_skb(skb);
+
+	/* this will release a dma pool request, if queue was stopped
+	 * waiting for one then wake it */
+	if (unlikely(netif_queue_stopped(dev) &&
+		     priv->queue_stopped_cause == 1))
+		netif_wake_queue(dev);
+}
+#endif
+
+/*
+ * tx request callback
+ */
+static int pcinet_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+	struct tango2_pcinet_d_priv *priv;
+	struct tango2_pcinet_regs *regs;
+	struct tango2_pci_desc *tx_desc;
+	unsigned int head, new_head;
+	uint32_t addr;
+#ifdef CONFIG_TANGO2_PCINET_D_DMAMUX
+	struct fbxdmamux_req *req;
+#else
+	void *cpu_addr;
+#endif
+
+	priv = netdev_priv(dev);
+	regs = priv->regs;
+
+	/* drop if remote is dead */
+	if (priv->dead) {
+		dev_kfree_skb(skb);
+		priv->stats.tx_dropped++;
+		priv->stats.tx_carrier_errors++;
+		return 0;
+	}
+
+	/* check for tx queue full */
+#ifdef CONFIG_TANGO2_PCINET_D_DMAMUX
+	head = priv->local_rx_ring_head;
+#else
+	head = le32_to_cpu(regs->rx_ring_head);
+#endif
+	new_head = (head + 1) % priv->rx_ring_len;
+	if (unlikely(new_head == le32_to_cpu(regs->rx_ring_tail))) {
+		/* should not happen, since we check for queue full
+		 * after leaving */
+		printk(PFX KERN_WARNING "unexpected full tx queue\n");
+		netif_stop_queue(dev);
+		return NETDEV_TX_BUSY;
+	}
+
+	/* fill tx descriptor */
+	tx_desc = &priv->rx_ring[head];
+	tx_desc->data_len = cpu_to_le32(skb->len);
+	addr = le32_to_cpu(tx_desc->buf_addr);
+
+#ifdef CONFIG_TANGO2_PCINET_D_DMAMUX
+	req = fbxdmamux_req_from_pool();
+	if (!req) {
+		/* something  we could  not foresee,  ask  for further
+		 * retransmit, and  mark queue stopped  with a special
+		 * flag  to remember  that lack  of dmamux  req caused
+		 * it  */
+		priv->queue_stopped_cause = 1;
+		netif_stop_queue(dev);
+		return NETDEV_TX_BUSY;
+	}
+	req->chan_cookie = priv->tx_chan_cookie;
+	req->priority = 0;
+	req->virt_src = skb->data;
+	req->hw_dst = pcinet_pciaddr_to_cpu_hw(priv, addr);
+	req->len = skb->len;
+	req->flags = FBXDMAMUX_FLAG_DST_HW;
+	req->callback = pcinet_tx_dmadone;
+	req->cb_data = dev;
+	if (fbxdmamux_submit(req)) {
+		printk(KERN_ERR PFX "fbxdmamux_submit failed, abort tx\n");
+		dev_kfree_skb(skb);
+		priv->stats.tx_dropped++;
+		priv->stats.tx_aborted_errors++;
+		return 0;
+	}
+	priv->local_rx_ring_head = new_head;
+	__skb_queue_tail(&priv->tx_skb_in_progress, skb);
+#else
+	/* memcpy skb data and update rx head */
+	if (!pcinet_pciaddr_check(priv, addr)) {
+		cpu_addr = pcinet_pciaddr_to_cpu(priv, addr);
+		memcpy(cpu_addr, skb->data, skb->len);
+		dma_sync_single_for_cpu(NULL, virt_to_dma(NULL, cpu_addr),
+					skb->len, DMA_TO_DEVICE);
+	}
+	wmb();
+	regs->rx_ring_head = cpu_to_le32(new_head);
+	mb();
+	/* beware of posted write */
+	(void)readl(&regs->rx_ring_head);
+
+	priv->stats.tx_packets++;
+	priv->stats.tx_bytes += skb->len;
+
+	/* signal remote if tx done interrupt is enabled */
+	if (regs->d2h_tx_done_irq_enabled)
+		pcinet_gen_interrupt(priv);
+
+	dev_kfree_skb(skb);
+#endif
+
+	/* stop queue if it's now full */
+	new_head = (new_head + 1) % priv->rx_ring_len;
+	if (new_head == le32_to_cpu(regs->rx_ring_tail)) {
+		regs->h2d_tx_done_irq_enabled = 1;
+		mb();
+		/* beware of posted write */
+		(void)readl(&regs->h2d_tx_done_irq_enabled);
+
+		/* race here, recheck for tx full in case host removed
+		 * buffers while we enabled irq */
+		if (new_head == le32_to_cpu(regs->rx_ring_tail)) {
+			priv->queue_stopped_cause = 0;
+			netif_stop_queue(dev);
+		} else {
+			regs->h2d_tx_done_irq_enabled = 0;
+		}
+	}
+
+	return 0;
+}
+
+/*
+ * get_stats callback
+ */
+static struct net_device_stats *pcinet_get_stats(struct net_device *dev)
+{
+	struct tango2_pcinet_d_priv *priv;
+	priv = netdev_priv(dev);
+	return &priv->stats;
+}
+
+/*
+ * device open callback
+ */
+static int pcinet_open(struct net_device *dev)
+{
+	struct tango2_pcinet_d_priv *priv;
+	struct tango2_pcinet_regs *regs;
+	unsigned int region_size, rx_ring_size;
+	int ret;
+
+	priv = netdev_priv(dev);
+	regs = priv->regs;
+
+	/* refuse to load if we are marked dead */
+	if (priv->dead) {
+		printk(KERN_ERR PFX "device is dead, PCI rescan needed\n");
+		return -EIO;
+	}
+
+#ifdef CONFIG_BOARD_FBXO1_A
+	/* fbxo1_a uses a chip select output to generate remote
+	 * interrupt, map the area that enables this CS */
+	priv->irq_mem = ioremap_nocache(DEVCS0_BASE, 4);
+	if (!priv->irq_mem) {
+		printk(KERN_ERR PFX "ioremap(2) failed\n");
+		ret = -ENOMEM;
+		goto error;
+	}
+#endif
+
+	/* ioremap remaining memory */
+	region_size = (pci_resource_len(priv->pdev, 0) / 8);
+	priv->pci_mem_hw = pci_resource_start(priv->pdev, 0) + region_size * 2;
+	priv->pci_mem_len = region_size * 6;
+	priv->pci_mem = ioremap_cached(priv->pci_mem_hw, priv->pci_mem_len);
+
+	if (!priv->pci_mem) {
+		printk(KERN_ERR PFX "ioremap(3) failed\n");
+		ret = -ENOMEM;
+		goto error;
+	}
+
+	/* recheck magic, remote may have disappeared  */
+	if (le32_to_cpu(regs->magic) != PCINET_MAGIC) {
+		printk(KERN_ERR PFX "bad pcinet magic: %08x - marking dead\n",
+		       le32_to_cpu(regs->magic));
+		ret = -ENODEV;
+		goto dead;
+	}
+
+	/* lock state mutex and mark us as connecting */
+	ret = pcinet_mutex_lock(priv, 1, MUTEX_LOCK_TIMEOUT);
+	if (ret) {
+		printk(KERN_ERR PFX "mutex lock failed, marking dead\n");
+		ret = -ENODEV;
+		goto dead;
+	}
+
+	if (le32_to_cpu(regs->smp863x_state) == SMP863X_STATE_DEAD) {
+		printk(KERN_ERR PFX "remote marked dead, marking dead\n");
+		pcinet_mutex_unlock(priv);
+		ret = -ENODEV;
+		goto dead;
+	}
+	regs->agent_state = cpu_to_le32(AGENT_STATE_CONNECTING);
+	wmb();
+	pcinet_mutex_unlock(priv);
+
+	/* fetch rx/tx queue address and len */
+	priv->rx_ring = ((void *)regs + sizeof (*regs));
+	priv->rx_ring_len = le32_to_cpu(regs->rx_ring_len);
+	rx_ring_size = priv->rx_ring_len * sizeof (struct tango2_pci_desc);
+
+	priv->tx_ring = ((void *)regs + sizeof (*regs) + rx_ring_size);
+	priv->tx_ring_len = le32_to_cpu(regs->tx_ring_len);
+
+#ifdef CONFIG_TANGO2_PCINET_D_DMAMUX
+	priv->local_rx_ring_head = le32_to_cpu(regs->rx_ring_head);
+
+	/* allocate chan cookie for fbxdmamux to guarantee in order
+	 * completion of request */
+	priv->tx_chan_cookie = fbxdmamux_alloc_channel_cookie();
+
+	skb_queue_head_init(&priv->tx_skb_in_progress);
+#endif
+	priv->queue_stopped_cause = 0;
+
+	/* enable remote rx interrupt */
+	regs->h2d_rx_irq_enabled = 1;
+
+        /* register rx/tx done interrupt */
+        ret = request_irq(dev->irq, pcinet_isr, IRQF_SHARED,
+			  "tango2_pcinet_d", dev);
+        if (ret)
+                goto error;
+
+	/* mark us connected */
+	ret = pcinet_mutex_lock(priv, 1, MUTEX_LOCK_TIMEOUT);
+	if (ret) {
+		printk(KERN_ERR PFX "mutex lock failed, marking dead\n");
+		ret = -ENODEV;
+		goto dead;
+	}
+
+	if (le32_to_cpu(regs->smp863x_state) == SMP863X_STATE_DEAD) {
+		printk(KERN_ERR PFX "remote marked dead, marking dead\n");
+		pcinet_mutex_unlock(priv);
+		ret = -ENODEV;
+		goto dead;
+	}
+
+	/* signal the state change */
+	regs->agent_state = cpu_to_le32(AGENT_STATE_CONNECTED);
+	regs->rx_msg = cpu_to_le32(MSG_STATE_CHANGE);
+	wmb();
+	regs->rx_msg_head++;
+	wmb();
+	pcinet_mutex_unlock(priv);
+	/* beware of posted write */
+	(void)readl(&regs->rx_msg_head);
+	pcinet_gen_interrupt(priv);
+
+	netif_start_queue(dev);
+	pcinet_link_change(dev, 1);
+
+	return 0;
+
+dead:
+	priv->dead = 1;
+	pcinet_disable_device(priv);
+error:
+	free_irq(dev->irq, dev);
+#ifdef CONFIG_BOARD_FBXO1_A
+	if (priv->irq_mem)
+		iounmap(priv->irq_mem);
+#endif
+	if (priv->pci_mem)
+		iounmap(priv->pci_mem);
+	return ret;
+}
+
+/*
+ * device stop callback
+ */
+static int pcinet_stop(struct net_device *dev)
+{
+	struct tango2_pcinet_d_priv *priv;
+	struct tango2_pcinet_regs *regs;
+	int ret;
+
+	priv = netdev_priv(dev);
+	regs = priv->regs;
+
+	priv->queue_stopped_cause = 2;
+        netif_stop_queue(dev);
+
+	if (priv->dead)
+		goto out_free;
+
+#ifdef CONFIG_TANGO2_PCINET_D_DMAMUX
+	fbxdmamux_flush_channel(priv->tx_chan_cookie);
+	if (skb_queue_len(&priv->tx_skb_in_progress))
+		printk(KERN_ERR PFX "oops, skb in queue after flush\n");
+#endif
+	pcinet_link_change(dev, 0);
+
+	/* from this point, neither pcinet_poll nor pcinet_xmit can be
+	 * called */
+
+	/* mark us disconnected */
+	ret = pcinet_mutex_lock(priv, 1, MUTEX_LOCK_TIMEOUT);
+	if (ret) {
+		printk(KERN_ERR PFX "mutex lock failed, marking dead\n");
+		priv->dead = 1;
+		pcinet_disable_device(priv);
+		goto out_free;
+	}
+
+	if (le32_to_cpu(regs->smp863x_state) == SMP863X_STATE_DEAD) {
+		printk(KERN_ERR PFX "remote marked dead, marking dead\n");
+		pcinet_mutex_unlock(priv);
+		priv->dead = 1;
+		pcinet_disable_device(priv);
+		goto out_free;
+	}
+
+	/* signal the state change */
+	regs->agent_state = cpu_to_le32(AGENT_STATE_DISCONNECTED);
+	regs->rx_msg = cpu_to_le32(MSG_STATE_CHANGE);
+	wmb();
+	regs->rx_msg_head++;
+	wmb();
+	pcinet_mutex_unlock(priv);
+	/* beware of posted write */
+	(void)readl(&regs->rx_msg_head);
+	pcinet_gen_interrupt(priv);
+
+out_free:
+	free_irq(dev->irq, dev);
+#ifdef CONFIG_BOARD_FBXO1_A
+	iounmap(priv->irq_mem);
+#endif
+	iounmap(priv->pci_mem);
+	return 0;
+}
+
+/*
+ * netdev device setup callback
+ */
+static void pcinet_netdev_setup(struct net_device *dev)
+{
+	dev->hard_header_len = 0;
+	dev->mtu = RX_BUF_SIZE;
+	dev->addr_len = 0;
+	dev->tx_queue_len = 64;
+	dev->type = ARPHRD_PPP;
+	dev->flags = IFF_POINTOPOINT | IFF_NOARP;
+}
+
+/*
+ * pci device probe callback
+ */
+static int __devinit tango2_pcinet_probe(struct pci_dev *pdev,
+					 const struct pci_device_id *ent)
+{
+	struct tango2_pcinet_d_priv *priv = NULL;
+	struct tango2_pcinet_regs *regs = NULL;
+	struct net_device *dev = NULL;
+	unsigned int region_size;
+	int ret;
+
+	if (pci_enable_device(pdev)) {
+		printk(KERN_ERR PFX "Cannot enable PCI device, aborting.\n");
+		return -ENODEV;
+	}
+
+	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
+		printk(KERN_ERR PFX "Cannot find proper PCI device "
+		       "base address, aborting.\n");
+		ret = -ENODEV;
+		goto error;
+	}
+
+	if (pci_request_regions(pdev, "tango2_pcinet_d")) {
+		printk(KERN_ERR PFX "Cannot request region\n");
+		ret = -ENODEV;
+		goto error;
+	}
+
+	/* guess SMP863x internal region size, and ioremap region1 */
+	region_size = (pci_resource_len(pdev, 0) / 8);
+	regs = ioremap_nocache(pci_resource_start(pdev, 0) + region_size,
+			       sizeof (*regs));
+
+	if (!regs) {
+		printk(KERN_ERR PFX "ioremap failed\n");
+		ret = -ENOMEM;
+		goto error;
+	}
+
+	/* check magic */
+	if (le32_to_cpu(regs->magic) != PCINET_MAGIC) {
+		printk(KERN_ERR PFX "bad pcinet magic: %08x\n",
+		       le32_to_cpu(regs->magic));
+		ret = -ENODEV;
+		goto error;
+	}
+
+
+	/* allocate  netdevice structure  with enough  length  for our
+	 * context data */
+	dev = alloc_netdev(sizeof (*priv), "pci%d", pcinet_netdev_setup);
+	if (!dev)
+		return -ENOMEM;
+
+	priv = netdev_priv(dev);
+	memset(priv, 0, sizeof (*priv));
+	priv->regs = regs;
+	priv->pdev = pdev;
+
+	/* install driver callbacks and register netdevice */
+	dev->open = pcinet_open;
+	dev->stop = pcinet_stop;
+	dev->hard_start_xmit = pcinet_xmit;
+	dev->get_stats = pcinet_get_stats;
+	dev->poll = pcinet_poll;
+	dev->weight = 16;
+	dev->hard_header_len = 0;
+        dev->irq = pdev->irq;
+        SET_MODULE_OWNER(dev);
+
+	if ((ret = register_netdev(dev))) {
+		printk(KERN_ERR PFX "unable to register netdevice\n");
+		goto error;
+	}
+
+	pci_set_drvdata(pdev, dev);
+	netif_carrier_off(dev);
+
+	return 0;
+
+error:
+	if (regs)
+		iounmap(regs);
+	if (dev)
+		free_netdev(dev);
+	pci_release_regions(pdev);
+	pcinet_disable_device(priv);
+	return ret;
+}
+
+/*
+ * pci device remove callback (hotplug/rmmod)
+ */
+static void __devexit tango2_pcinet_remove(struct pci_dev *pdev)
+{
+	struct tango2_pcinet_d_priv *priv;
+	struct net_device *dev;
+
+	dev = pci_get_drvdata(pdev);
+	priv = netdev_priv(dev);
+
+	unregister_netdev(dev);
+	iounmap(priv->regs);
+	free_netdev(dev);
+
+	pci_release_regions(pdev);
+	if (!priv->dead)
+		pcinet_disable_device(priv);
+	pci_set_drvdata(pdev, NULL);
+}
+
+static struct pci_driver tango2_pcinet_driver = {
+	.name = "tango2_pcinet_d",
+	.id_table = tango2_pcinet_id_table,
+	.probe = tango2_pcinet_probe,
+	.remove = __devexit_p(tango2_pcinet_remove),
+};
+
+/*
+ * module init, just register network device
+ */
+int __init tango2_pcinet_d_init(void)
+{
+	return pci_module_init(&tango2_pcinet_driver);
+}
+
+/*
+ * module exit
+ */
+void __exit tango2_pcinet_d_exit(void)
+{
+	pci_unregister_driver(&tango2_pcinet_driver);
+}
+
+module_init(tango2_pcinet_d_init);
+module_exit(tango2_pcinet_d_exit);
+
+MODULE_DESCRIPTION("SMP863x PCI shared memory network device (agent)");
+MODULE_AUTHOR("Maxime Bizon <mbizon@freebox.fr>");
+MODULE_LICENSE("GPL");
--- /dev/null	2011-06-03 14:51:38.633053002 +0200
+++ linux-2.6.20.14-fbx/drivers/net/tango2_pcinet_d.h	2010-12-29 19:30:07.191437333 +0100
@@ -0,0 +1,51 @@
+
+#ifndef TANGO2_PCINET_D_H_
+#define TANGO2_PCINET_D_H_
+
+#define MUTEX_LOCK_TIMEOUT	1000
+
+struct tango2_pcinet_d_priv
+{
+	unsigned int dead;
+
+	/* pci device we are attached to */
+	struct pci_dev *pdev;
+
+	/* cpu view of pci regs */
+	struct tango2_pcinet_regs *regs;
+
+        /* rx queue (from smp863x point of view) */
+	struct tango2_pci_desc *rx_ring;
+	unsigned int rx_ring_len;
+
+        /* tx queue (from smp863x point of view) */
+	struct tango2_pci_desc *tx_ring;
+	unsigned int tx_ring_len;
+
+#ifdef CONFIG_TANGO2_PCINET_D_DMAMUX
+	/* we need a local value for rx head, since we do async tx */
+	unsigned int local_rx_ring_head;
+
+	/* tx chan cookie */
+	unsigned int tx_chan_cookie;
+
+	/* tx in progress skb queue */
+	struct sk_buff_head tx_skb_in_progress;
+#endif
+
+	unsigned int queue_stopped_cause;
+
+        struct net_device_stats stats;
+
+#ifdef CONFIG_BOARD_FBXO1_A
+	/* used to generate remote interrupt */
+	unsigned int *irq_mem;
+#endif
+
+	/* cpu view of pci memory */
+	unsigned char *pci_mem;
+	unsigned int pci_mem_len;
+	dma_addr_t pci_mem_hw;
+};
+
+#endif /* TANGO2_PCINET_D_H_ */
--- /dev/null	2011-06-03 14:51:38.633053002 +0200
+++ linux-2.6.20.14-fbx/drivers/net/tango2_pcinet.h	2010-12-29 19:30:07.191437333 +0100
@@ -0,0 +1,117 @@
+
+#ifndef TANGO2_PCINET_H_
+#define TANGO2_PCINET_H_
+
+/*
+ * vendor id / device id
+ */
+#define TANGO2_PCINET_VENDOR		0x1105
+#define TANGO2_PCINET_DEVICE		PCI_ANY_ID
+#define TANGO2_PCINET_SUBVENDOR		0x4242
+#define TANGO2_PCINET_SUBDEVICE		0x02
+
+#define	PCINET_MAGIC			0x617D18B4
+
+/*
+ * buffer allocation size on rx side
+ */
+#define RX_BUF_SIZE			1500
+
+/*
+ * rx/tx ring point to these descriptors
+ */
+struct tango2_pci_desc
+{
+	/* relative bus address of data if any (address is relative to
+	 * BAR address) */
+	uint32_t	buf_addr;
+
+	/* data len */
+	uint32_t	data_len;
+
+	/* private data used by SMP863x only */
+	void		*priv;
+};
+
+/*
+ * list of message type that can be exchanged with mailbox
+ */
+#define MSG_STATE_CHANGE	1
+
+/*
+ * possible state for agent and smp863x
+ */
+#define SMP863X_STATE_DEAD		0
+#define SMP863X_STATE_RUNNING		1
+
+#define AGENT_STATE_DISCONNECTED	0
+#define AGENT_STATE_CONNECTING		1
+#define AGENT_STATE_CONNECTED		2
+
+/*
+ * this structure is shared between host and agent on PCI bus, all
+ * fields are little endian.
+ */
+struct tango2_pcinet_regs
+{
+	/* so that agent are sure they are reading valid register
+	 * data */
+	uint32_t	magic;
+
+	/* shared mutex over pci bus */
+	uint32_t	lock_smp863x;
+	uint32_t	lock_agent;
+	uint32_t	lock_turn;
+
+	/*
+	 * fields below are protected by shared mutex
+	 */
+	uint32_t	rx_ring_len;
+	uint32_t	tx_ring_len;
+
+	uint32_t	smp863x_state;
+	uint32_t	agent_state;
+
+	/*
+	 * fields below are  lockless, but they must not  be access by
+	 * agent when it's state is DISCONNECTED.
+	 */
+
+	/* rx (SMP863x side) ring, empty buffers allocated on SMP863x
+	 * side, remote agent fills the descriptor and move
+	 * rx_ring_head */
+
+	/* rx head is moved by agent */
+	uint32_t	rx_ring_head;
+
+	/* rx tail is moved by SMP863x */
+	uint32_t	rx_ring_tail;
+
+
+	/* tx (SMP863x side) ring, SMP863x fills descriptor and wait
+	 * for remote agent to clear them */
+
+	/* tx head is moved by SMP863x */
+	uint32_t	tx_ring_head;
+
+	/* tx tail is moved by agent */
+	uint32_t	tx_ring_tail;
+
+	/* smp/agent irq enabled */
+	uint32_t	h2d_rx_irq_enabled;
+	uint32_t	h2d_tx_done_irq_enabled;
+	uint32_t	d2h_rx_irq_enabled;
+	uint32_t	d2h_tx_done_irq_enabled;
+
+	/* mailbox SMP8634 -> remote agent */
+	uint32_t	tx_msg;
+	uint32_t	tx_msg_head;
+	uint32_t	tx_msg_tail;
+
+	/* mailbox remote -> SMP8634 */
+	uint32_t	rx_msg;
+	uint32_t	rx_msg_head;
+	uint32_t	rx_msg_tail;
+};
+
+#endif /* TANGO2_PCINET_H_ */
diff -Nruw linux-2.6.20.14-fbx/drivers/tango2./Makefile linux-2.6.20.14-fbx/drivers/tango2/Makefile
--- linux-2.6.20.14-fbx/drivers/tango2./Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.20.14-fbx/drivers/tango2/Makefile	2010-12-29 19:30:07.471444848 +0100
@@ -0,0 +1,6 @@
+# Makefile for the TANGO2 device drivers
+
+obj-$(CONFIG_TANGO2_FIP) += fip.o
+obj-$(CONFIG_TANGO2_GPIO) += gpio.o
+obj-$(CONFIG_TANGO2_IR) += ir.o
+obj-$(CONFIG_TANGO2_FB) += fb.o
diff -Nruw linux-2.6.20.14-fbx/extdrivers./include/freebox/fbxgpio.h linux-2.6.20.14-fbx/extdrivers/include/freebox/fbxgpio.h
--- linux-2.6.20.14-fbx/extdrivers./include/freebox/fbxgpio.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.20.14-fbx/extdrivers/include/freebox/fbxgpio.h	2011-08-30 20:20:50.487745256 +0200
@@ -0,0 +1,103 @@
+
+#ifndef GPIO_H_
+# define GPIO_H_
+
+/*
+ * Gpio mapping for the Freebox v4
+ */
+#ifdef CONFIG_BOARD_FBX4
+# define GPIO_SLAC_RESET	0
+# define GPIO_TEST_MODE		1
+# define GPIO_DSP_RESET		2
+# define GPIO_PIC_RESET		3
+# define GPIO_OMG_RESET		4
+# define GPIO_FPGA_RESET	5
+# define GPIO_FPGA_DONE		6
+# define GPIO_I2C_SDA		28
+# define GPIO_I2C_SCL		29
+# define GPIO_BOARD_ID_LOW	30
+# define GPIO_BOARD_ID_HIGH	31
+# define GPIO_FPGA_IRQ		33
+#endif
+
+/*
+ * Gpio mapping for the Freebox v5a
+ */
+#ifdef CONFIG_BOARD_FBX5A
+# define GPIO_TEST_MODE		1
+# define GPIO_DSP_RESET		2
+# define GPIO_RANDOM		6
+# define GPIO_I2C_SDA		28
+# define GPIO_I2C_SCL		29
+# define GPIO_SLAC_IRQ		33
+# define GPIO_SWITCH_IRQ	34
+# define GPIO_BOARD_RESET	36
+#endif
+
+/*
+ * Gpio mapping for the Freebox v5b
+ */
+#ifdef CONFIG_ARCH_FBX5_B
+
+/* gpio above 32 are uart0 gpio */
+/* gpio above 64 are uart1 gpio */
+
+# define GPIO_DEMOD_I2C_CLK	0
+# define GPIO_DEMOD_I2C_DATA	1
+# define GPIO_FIP_DIN		2
+# define GPIO_FIP_DOUT		3
+# define GPIO_FIP_STB		4
+# define GPIO_FIP_CLK		5 /* fbx5bx */
+# define GPIO_MARVELL_INT	5 /* fbxo1_b */
+# define GPIO_IDE_RESET		6
+# define GPIO_IDE_T7		7
+# define GPIO_PCI_INTA		8
+# define GPIO_PCI_INTB		9
+# define GPIO_USB_PWR_FLT0	10
+# define GPIO_USB_PWR_FLT1	11
+# define GPIO_IR		12
+# define GPIO_DEMOD_IRQ		13 /* fbx5b1 */
+# define GPIO_BOARD_RESET_2	13 /* fbx5b2 and fbxo1_b */
+# define GPIO_DEMOD_RESET	14
+# define GPIO_DEMOD_SLEEP	15
+# define GPIO_DEMOD_STATUS	16
+# define GPIO_USB_ENABLE	17
+# define GPIO_SEL_HD		18
+# define GPIO_RESET_FIP		19
+# define GPIO_MUTE		20
+# define GPIO_PCI_ACT		21
+# define GPIO_BOARD_RESET_3	21 /* fbxo1_b */
+# define GPIO_PCI_RESET		22
+# define GPIO_ETH_PHY_RESET	23
+# define GPIO_COMMUTATION_LENTE_0	24
+# define GPIO_COMMUTATION_LENTE_1	25
+# define GPIO_COMMUTATION_RAPIDE	26
+# define GPIO_IDCARD0		27
+# define GPIO_IDCARD1		28
+# define GPIO_IDCARD2		29
+# define GPIO_IDCARD3		30
+# define GPIO_BOOT_ETHERNET	31
+
+# define GPIO_PCI_CLK_RUN	(32 + 2)
+# define GPIO_DEMOD_BKERR	(32 + 3)
+# define GPIO_IDE_CSEL		(32 + 5)
+# define GPIO_BOARD_RESET	(32 + 6)
+
+#endif
+
+/*
+ * Gpio manipulation for the smp83xx
+ */
+#ifdef CONFIG_TANGO2
+
+#include <asm/tango2/tango2_gpio.h>
+
+#define SET_GPIO(N)		em86xx_gpio_write((N), 1)
+#define CLEAR_GPIO(N)		em86xx_gpio_write((N), 0)
+#define SetGPIODir_in(N)	em86xx_gpio_setdirection((N), 0)
+#define SetGPIODir_out(N)	em86xx_gpio_setdirection((N), 1)
+#define GET_GPIO(N)		em86xx_gpio_read((N))
+
+#endif
+
+#endif /* GPIO_H_ */
diff -Nruw linux-2.6.20.14-fbx/extdrivers./include/freebox/fbximagetag.h linux-2.6.20.14-fbx/extdrivers/include/freebox/fbximagetag.h
--- linux-2.6.20.14-fbx/extdrivers./include/freebox/fbximagetag.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.20.14-fbx/extdrivers/include/freebox/fbximagetag.h	2011-08-30 20:20:50.487745256 +0200
@@ -0,0 +1,28 @@
+
+#ifndef FBXIMAGETAG_H_
+# define FBXIMAGETAG_H_
+
+#define FBX_IMAGETAG_MAGIC		0x3658382b
+#define FBX_IMAGETAG_VERSION		1
+
+#define FBX_IMAGETAG_FLAGS_HAS_KERNEL		(1 << 0)
+#define FBX_IMAGETAG_FLAGS_HAS_FS		(1 << 1)
+#define FBX_IMAGETAG_FLAGS_SKRYPTED_KERNEL	(1 << 2)
+#define FBX_IMAGETAG_FLAGS_COPY_FS		(1 << 3)
+
+struct fbx_imagetag {
+	uint32_t        crc32;		/* for whole image minus this field */
+	uint32_t        magic;
+	uint32_t        version;
+	uint32_t        total_size;	/* in bytes */
+	char		name[128];	/* zero terminated */
+	uint32_t	build_date;	/* seconds since epoch */
+	char		builder[32];	/* builder name */
+	uint32_t        flags;
+	uint32_t        kernel_offset;	/* in bytes from origin */
+	uint32_t	kernel_size;	/* in bytes */
+	uint32_t        fs_offset;	/* in bytes from origin */
+	uint32_t        fs_size;	/* in bytes from origin */
+} __attribute__((packed));
+
+#endif /* FBXIMAGETAG_H_ */
diff -Nruw linux-2.6.20.14-fbx/fs/ctmpfs./ctmpfs.h linux-2.6.20.14-fbx/fs/ctmpfs/ctmpfs.h
--- linux-2.6.20.14-fbx/fs/ctmpfs./ctmpfs.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.20.14-fbx/fs/ctmpfs/ctmpfs.h	2010-12-29 19:30:08.241449663 +0100
@@ -0,0 +1,117 @@
+#ifndef CTMPFS_H_
+#define CTMPFS_H_
+
+#include <linux/rwsem.h>
+
+#define CTMPFS_MAGIC	0x2e0e258e
+
+/*
+ * CTMPFS inode
+ */
+#define CTMPFS_I_F_IS_DIR		(1 << 0)
+#define CTMPFS_I_F_HAS_DATA(idx)	(1 << (30 + idx))
+
+#define NAME_IDX	0
+#define DATA_IDX	1
+#define LAST_IDX	2
+
+
+struct ctmpfs_inode {
+	unsigned long number;
+	unsigned int flags;
+
+	/* filename/dirname and file data / directory data, depending
+	on inode type */
+	unsigned int idx_start[LAST_IDX];
+	unsigned int idx_end[LAST_IDX];
+
+	struct list_head next;
+};
+
+/*
+ * retrieve ctmpfs inode data from generic vfs inode
+ */
+static inline struct ctmpfs_inode *CTMPFS_I(struct inode *inode)
+{
+	return (struct ctmpfs_inode *)inode->i_private;
+}
+
+/*
+ * handy macro to access inode data
+ */
+#define CTMPFS_I_DSTART(x)	((x)->idx_start[DATA_IDX])
+#define CTMPFS_I_DEND(x)	((x)->idx_end[DATA_IDX])
+#define CTMPFS_I_DSIZE(x)	(CTMPFS_I_DEND((x)) - CTMPFS_I_DSTART((x)))
+
+#define CTMPFS_I_NSTART(x)	((x)->idx_start[NAME_IDX])
+#define CTMPFS_I_NEND(x)	((x)->idx_end[NAME_IDX])
+#define CTMPFS_I_NSIZE(x)	(CTMPFS_I_NEND((x)) - CTMPFS_I_NSTART((x)))
+
+
+/*
+ * Threshold used to grow/shrink all buffers, power of two used (must
+ * be >= 2)
+ */
+#define NAME_BUF_ALLOC_SIZE	PAGE_SHIFT
+#define FDATA_BUF_ALLOC_SIZE	PAGE_SHIFT
+#define DDATA_BUF_ALLOC_SIZE	PAGE_SHIFT
+
+/*
+ * temporary buffer size (in bytes), used for memory slide operation
+ */
+#define MEMSLIDE_BUF_SIZE	PAGE_SIZE
+
+/*
+ * index of buffer used as backend
+ */
+#define NAME_BUF		0
+#define FDATA_BUF		1
+#define DDATA_BUF		2
+#define LAST_BUF		3
+
+struct ctmpfs_sb {
+	/*
+	 * all buffer operations are protected by this r/w mutex
+	 */
+	struct rw_semaphore sem;
+
+	/*
+	 * this is the backend storage for name data, file data, and
+	 * directory content data.
+	 */
+	unsigned char *buf[LAST_BUF];
+	unsigned int buf_size[LAST_BUF];
+	unsigned int buf_alloc_size[LAST_BUF];
+	unsigned int buf_alloc_thresh[LAST_BUF];
+
+	/*
+	 * used to speedup memslide operation
+	 */
+	unsigned char *tmp_buf;
+
+	/*
+	 * list of current inode
+	 */
+	struct list_head inode_list;
+};
+
+/*
+ * return private superblock information from super_block
+ */
+static inline struct ctmpfs_sb *CTMPFS_SB(struct super_block *sb)
+{
+	return (struct ctmpfs_sb *)sb->s_fs_info;
+}
+
+/*
+ * handy macro to browse a directory
+ */
+#define ctmpfs_foreach_direntry(sb, dir, i, pchild, skip)		\
+	for (i = dir->idx_start[DATA_IDX] + (sizeof (*pchild) * skip);\
+		(pchild = (struct ctmpfs_inode **)(sb->buf[DDATA_BUF] + i)), \
+		(dir->flags & CTMPFS_I_F_HAS_DATA(DATA_IDX)) &&		\
+		i < dir->idx_end[DATA_IDX];				\
+		i += sizeof (struct ctmpfs_inode *))
+
+
+#endif /* !CTMPFS_H_ */
diff -Nruw linux-2.6.20.14-fbx/fs/ctmpfs./Makefile linux-2.6.20.14-fbx/fs/ctmpfs/Makefile
--- linux-2.6.20.14-fbx/fs/ctmpfs./Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.20.14-fbx/fs/ctmpfs/Makefile	2010-12-29 19:30:08.241449663 +0100
@@ -0,0 +1,7 @@
+#
+# Makefile for the configfs virtual filesystem
+#
+
+obj-$(CONFIG_CTMP_FS)	+= ctmpfs.o
+
+ctmpfs-objs := super.o
diff -Nruw linux-2.6.20.14-fbx/fs/ctmpfs./super.c linux-2.6.20.14-fbx/fs/ctmpfs/super.c
--- linux-2.6.20.14-fbx/fs/ctmpfs./super.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.20.14-fbx/fs/ctmpfs/super.c	2010-12-29 19:30:08.241449663 +0100
@@ -0,0 +1,1011 @@
+/*
+ *  super.c - ctmpfs, "compact" virtual memory fs
+ *
+ *  Copyright (C) 2007 Maxime Bizon <mbizon@freebox.fr>
+ *
+ *	This program is free software; you can redistribute it and/or
+ *	modify it under the terms of the GNU General Public License version
+ *	2 as published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/mount.h>
+#include <linux/pagemap.h>
+#include <linux/init.h>
+#include <linux/namei.h>
+#include <linux/vmalloc.h>
+
+#include "ctmpfs.h"
+
+
+static struct inode_operations ctmpfs_dir_inode_ops;
+static struct inode_operations ctmpfs_reg_inode_ops;
+static struct file_operations ctmpfs_dir_file_ops;
+static struct file_operations ctmpfs_reg_file_ops;
+
+
+/*
+ * move memory at "mem" on len "bytes" forward/backward "noff" bytes,
+ * existing data found while moving are moved behind buffer
+ *
+ * char *p => "[01234]ABCDEFGHIHKLM";
+ * -> memslide(p, 7, 2)
+ * char *p => "AB[01234]CDEFGHIJKLM";
+ *
+ * char *p => "ABCDEFGHIJ0KLM";
+ * -> memslide(p + 10, 1, -10)
+ * char *p => "0ABCDEFGHIJKLM";
+ */
+static void
+memslide(void *mem, int len, int noff, void *tmp, int tmplen)
+{
+	int i;
+
+	if (abs(noff) > len) {
+		/* we're faster if len > noff, so adjust args if
+		 * that's the case */
+		if (noff > 0)
+			memslide(mem + len, noff, -len, tmp, tmplen);
+		else
+			memslide(mem + noff, -noff, len, tmp, tmplen);
+		return;
+	}
+
+	if (tmplen > abs(noff))
+		tmplen = abs(noff);
+
+	if (noff >= 0) {
+		for (i = 0; i < noff; i += tmplen) {
+			if (tmplen + i > noff)
+				tmplen = noff - i;
+			memcpy(tmp, mem + i + len, tmplen);
+			memmove(mem + i + tmplen, mem + i, len);
+			memcpy(mem + i, tmp, tmplen);
+		}
+	} else {
+		for (i = 0; i > noff; i -= tmplen) {
+			if (i - tmplen < noff)
+				tmplen = abs(noff - i);
+			memcpy(tmp, mem + i - tmplen, tmplen);
+			memmove(mem + i - tmplen, mem + i, len);
+			memcpy(mem + i + len - tmplen, tmp, tmplen);
+		}
+	}
+}
+
+/*
+ * grow/shrink given buffer "idx" by "adjust" bytes
+ */
+static int ctmpfs_resize_buffer(struct ctmpfs_sb *sb_c, int idx,
+				int adjust)
+{
+	unsigned char *new_buf;
+	unsigned int new_alloc_size;
+
+	/* no  vrealloc,  should not  be  too  difficult to  implement
+	 * though */
+	new_alloc_size = sb_c->buf_alloc_size[idx] + adjust;
+	new_buf = vmalloc(new_alloc_size);
+
+	if (!new_buf)
+		return 1;
+
+	memcpy(new_buf, sb_c->buf[idx], sb_c->buf_size[idx]);
+	if (sb_c->buf[idx])
+		vfree(sb_c->buf[idx]);
+
+	sb_c->buf_alloc_size[idx] += adjust;
+	sb_c->buf[idx] = new_buf;
+
+	return 0;
+}
+
+/*
+ * make sure we have enough room in buffer "idx" to add "len" bytes,
+ * grow buffer if needed
+ */
+static int ctmpfs_check_buffer(struct ctmpfs_sb *sb_c, int idx,
+			       unsigned int len)
+{
+	unsigned int needed, thresh;
+
+	/* check room */
+	if (sb_c->buf_size[idx] + len <= sb_c->buf_alloc_size[idx])
+		return 0;
+
+	/* need to grow buffer */
+	thresh = sb_c->buf_alloc_thresh[idx];
+	needed = ((len >> thresh) + 1) << thresh;
+
+	return ctmpfs_resize_buffer(sb_c, idx, needed);
+}
+
+/*
+ * try to shrink given buffer if possible
+ */
+static void ctmpfs_try_to_shrink_buffer(struct ctmpfs_sb *sb_c, int idx)
+{
+	unsigned char *new_buf;
+	unsigned int alloc, new_alloc_size;
+
+	/* check room */
+	alloc = (1 << sb_c->buf_alloc_thresh[idx]);
+
+	if (sb_c->buf_alloc_size[idx] > 0 &&
+	    sb_c->buf_alloc_size[idx] - sb_c->buf_size[idx] < alloc)
+		return;
+
+	new_alloc_size = sb_c->buf_alloc_size[idx] - alloc;
+	if (new_alloc_size == 0) {
+		vfree(sb_c->buf[idx]);
+		sb_c->buf[idx] = NULL;
+		sb_c->buf_alloc_size[idx] = 0;
+		return;
+	}
+
+	new_buf = vmalloc(new_alloc_size);
+	if (!new_buf)
+		return;
+
+	memcpy(new_buf, sb_c->buf[idx], sb_c->buf_size[idx]);
+	vfree(sb_c->buf[idx]);
+
+	sb_c->buf_alloc_size[idx] = new_alloc_size;
+	sb_c->buf[idx] = new_buf;
+}
+
+/*
+ * backend operation, remove inode data  in given buffer idx
+ */
+static void ctmpfs_inode_remove_data(struct ctmpfs_sb *sb_c,
+				     struct ctmpfs_inode *ci,
+				     int buf_idx, int ino_idx)
+{
+	char *buf;
+	unsigned int buf_size, start, end, size;
+	struct ctmpfs_inode *tmp_ci;
+
+	if (!(ci->flags & CTMPFS_I_F_HAS_DATA(ino_idx)))
+		return;
+
+	buf_size = sb_c->buf_size[buf_idx];
+	start = ci->idx_start[ino_idx];
+	end = ci->idx_end[ino_idx];
+
+	if (end == buf_size) {
+		/* easy one */
+		sb_c->buf_size[buf_idx] = ci->idx_start[ino_idx];
+		ci->flags &= ~(CTMPFS_I_F_HAS_DATA(ino_idx));
+		ctmpfs_try_to_shrink_buffer(sb_c, buf_idx);
+		return;
+	}
+
+	/* memmove needed */
+	buf = sb_c->buf[buf_idx];
+	size = end - start;
+	memmove(buf + start, buf + end, buf_size - size);
+
+	/* adjust  all data offsets  for all  other inodes  whose data
+	 * where on the modified area  */
+	list_for_each_entry(tmp_ci, &sb_c->inode_list, next) {
+
+		if (tmp_ci == ci ||
+		    !(tmp_ci->flags & CTMPFS_I_F_HAS_DATA(ino_idx)) ||
+		    tmp_ci->idx_end[ino_idx] <= start)
+			continue;
+
+		if (ino_idx == DATA_IDX &&
+		    (ci->flags & CTMPFS_I_F_IS_DIR) !=
+		    (tmp_ci->flags & CTMPFS_I_F_IS_DIR))
+			continue;
+
+		tmp_ci->idx_start[ino_idx] -= size;
+		tmp_ci->idx_end[ino_idx] -= size;
+	}
+
+	ci->flags &= ~(CTMPFS_I_F_HAS_DATA(ino_idx));
+	sb_c->buf_size[buf_idx] -= size;
+	ctmpfs_try_to_shrink_buffer(sb_c, buf_idx);
+}
+
+/*
+ * backend operation, change inode data  in given buffer idx, at given
+ * offset, with optional truncate
+ */
+static int ctmpfs_inode_set_data(struct ctmpfs_sb *sb_c,
+				 struct ctmpfs_inode *ci,
+				 int buf_idx, int ino_idx, unsigned int offset,
+				 const void *data, int datalen, int truncate)
+{
+	unsigned char *buf;
+	unsigned int bufsize, end, start;
+	int size, newsize;
+	int size_change;
+
+	/* a truncate with len 0 at offset 0 is actually a delete */
+	if (!datalen && !offset && truncate) {
+		ctmpfs_inode_remove_data(sb_c, ci, buf_idx, ino_idx);
+		ctmpfs_try_to_shrink_buffer(sb_c, buf_idx);
+	}
+
+	buf = sb_c->buf[buf_idx];
+	bufsize = sb_c->buf_size[buf_idx];
+
+	if (!(ci->flags & CTMPFS_I_F_HAS_DATA(ino_idx))) {
+		/* just append at end of buffer and adjust
+		 * pointers  */
+		if (ctmpfs_check_buffer(sb_c, buf_idx, datalen))
+			return -ENOMEM;
+
+		/* check_buffer might have changed buf pointer */
+		buf = sb_c->buf[buf_idx];
+
+		if (data)
+			memcpy(buf + bufsize, data, datalen);
+		else
+			memset(buf + bufsize, 0, datalen);
+		ci->idx_start[ino_idx] = bufsize;
+		ci->idx_end[ino_idx] = ci->idx_start[ino_idx] + datalen;
+		sb_c->buf_size[buf_idx] += datalen;
+		ci->flags |= CTMPFS_I_F_HAS_DATA(ino_idx);
+		return 0;
+	}
+
+	/* precompute useful values */
+	start = ci->idx_start[ino_idx];
+	end = ci->idx_end[ino_idx];
+	size = end - start;
+	newsize = offset + datalen;
+	size_change = 0;
+	if (newsize > size || truncate)
+		size_change = newsize - size;
+
+	if (size_change == 0) {
+		/* no size change, cool, just update */
+		if (data)
+			memcpy(buf + start + offset, data, datalen);
+		else
+			memset(buf + start + offset, 0, datalen);
+		return 0;
+	}
+
+	/* make sure the buffer is big enough if we add more data  */
+	if (size_change > 0 &&
+	    ctmpfs_check_buffer(sb_c, buf_idx, size_change))
+		return -ENOMEM;
+
+	/* check_buffer might have changed buf pointer */
+	buf = sb_c->buf[buf_idx];
+	start = ci->idx_start[ino_idx];
+	end = ci->idx_end[ino_idx];
+
+	if (end != bufsize) {
+		struct ctmpfs_inode *tmp_ci;
+
+		/* size changed,  and inode data are in  the middle of
+		   buffer,  we  need  to  relocate  them  at  the  end
+		   first */
+		memslide(buf + start, size, bufsize - end,
+			 sb_c->tmp_buf, MEMSLIDE_BUF_SIZE);
+
+		/* adjust all data offsets for all other inodes whose
+		 * data where on the modified area  */
+		list_for_each_entry(tmp_ci, &sb_c->inode_list, next) {
+
+			if (tmp_ci == ci ||
+			    !(tmp_ci->flags & CTMPFS_I_F_HAS_DATA(ino_idx)) ||
+			    tmp_ci->idx_end[ino_idx] <= start)
+				continue;
+
+			if (ino_idx == DATA_IDX &&
+			    (ci->flags & CTMPFS_I_F_IS_DIR) !=
+			    (tmp_ci->flags & CTMPFS_I_F_IS_DIR))
+				continue;
+
+			tmp_ci->idx_start[ino_idx] -= size;
+			tmp_ci->idx_end[ino_idx] -= size;
+		}
+
+		/* adjust inode pointer */
+		ci->idx_start[ino_idx] += bufsize - end;
+		ci->idx_end[ino_idx] = bufsize;
+
+		start = ci->idx_start[ino_idx];
+		end = ci->idx_end[ino_idx];
+	}
+
+	/* update data at right offset */
+	if (data)
+		memcpy(buf + start + offset, data, datalen);
+	else
+		memset(buf + start + offset, 0, datalen);
+
+	/* apply inode data size change */
+	ci->idx_end[ino_idx] = (int)end + size_change;
+	sb_c->buf_size[buf_idx] = (int)bufsize + size_change;
+
+	if (size_change < 0)
+		ctmpfs_try_to_shrink_buffer(sb_c, buf_idx);
+	return 0;
+}
+
+/*
+ * allocate a new inode on backend storage
+ */
+static inline struct ctmpfs_inode *ctmpfs_alloc_inode(struct ctmpfs_sb *sb_c)
+{
+	return kzalloc(sizeof (struct ctmpfs_inode), GFP_KERNEL);
+}
+
+/*
+ * fill VFS inode fields from given ctmpfs inode
+ */
+static void ctmpfs_init_vfs_inode(struct ctmpfs_sb *sb_c, struct inode *inode,
+				  const struct ctmpfs_inode *inode_ci)
+{
+	unsigned int size;
+
+	inode->i_ino = inode_ci->number;
+
+	if ((inode_ci->flags & CTMPFS_I_F_IS_DIR)) {
+		struct ctmpfs_inode **pchild_ci;
+		int i;
+
+		inode->i_mode = 0755 | S_IFDIR;
+		inode->i_op = &ctmpfs_dir_inode_ops;
+
+		inc_nlink(inode);
+
+		ctmpfs_foreach_direntry(sb_c, inode_ci, i, pchild_ci, 0) {
+			if ((*pchild_ci)->flags & CTMPFS_I_F_IS_DIR)
+				inc_nlink(inode);
+		}
+		inode->i_fop = &ctmpfs_dir_file_ops;
+
+	} else {
+		inode->i_mode = 0644 | S_IFREG;
+		inode->i_blocks = 0;
+		inode->i_op = &ctmpfs_reg_inode_ops;
+		inode->i_fop = &ctmpfs_reg_file_ops;
+	}
+
+	inode->i_uid = 0;
+	inode->i_gid = 0;
+	inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
+	inode->i_private = (void *)inode_ci;
+
+	size = 0;
+	if ((inode_ci->flags & CTMPFS_I_F_HAS_DATA(DATA_IDX)))
+		size = CTMPFS_I_DSIZE(inode_ci);
+	inode->i_size = size;
+	inode_set_bytes(inode, size);
+}
+
+/*
+ * create new VFS inode and associated ctmpfs inode
+ */
+static struct inode *ctmpfs_get_inode(struct super_block *sb,
+				      int directory, unsigned long number)
+{
+	struct ctmpfs_sb *sb_c;
+	struct ctmpfs_inode *inode_ci;
+	struct inode *inode;
+
+	inode = new_inode(sb);
+	if (!inode)
+		return NULL;
+
+	sb_c = CTMPFS_SB(sb);
+	inode_ci = ctmpfs_alloc_inode(sb_c);
+	if (!inode_ci) {
+		iput(inode);
+		return NULL;
+	}
+
+	if (number == 0)
+		inode_ci->number = iunique(sb, 1);
+	else
+		inode_ci->number = 1;
+
+	if (directory)
+		inode_ci->flags |= CTMPFS_I_F_IS_DIR;
+
+	ctmpfs_init_vfs_inode(sb_c, inode, inode_ci);
+	unlock_new_inode(inode);
+	list_add_tail(&inode_ci->next, &sb_c->inode_list);
+	return inode;
+}
+
+/*
+ * inode operation, lookup
+ */
+static struct dentry *ctmpfs_inode_lookup(struct inode *dir_i,
+					  struct dentry *dentry,
+					  struct nameidata *nameidata)
+{
+	struct ctmpfs_sb *sb_c;
+	struct ctmpfs_inode *dir_ci, **pchild_ci;
+	struct inode *child_i;
+	unsigned int i;
+
+	dir_ci = CTMPFS_I(dir_i);
+
+	/* make sure we have a directory */
+	if (!(dir_ci->flags & CTMPFS_I_F_IS_DIR))
+		return ERR_PTR(-ENOTDIR);
+
+	/* start walking directory content, looking for requested
+	 * name */
+	sb_c = CTMPFS_SB(dir_i->i_sb);
+	child_i = NULL;
+
+	down_read(&sb_c->sem);
+
+	ctmpfs_foreach_direntry(sb_c, dir_ci, i, pchild_ci, 0) {
+		char *name;
+		unsigned int len;
+
+		/* check name */
+		len = CTMPFS_I_NSIZE(*pchild_ci);
+		name = sb_c->buf[NAME_BUF] + CTMPFS_I_NSTART(*pchild_ci);
+
+		if (len != dentry->d_name.len ||
+		    strncmp(dentry->d_name.name, name, len))
+			continue;
+
+		/* found, check if we have the inode in cache, if not,
+		 * allocate a new one */
+		child_i = iget_locked(dir_i->i_sb, (*pchild_ci)->number);
+		if (!child_i) {
+			up_read(&sb_c->sem);
+			return ERR_PTR(-ENOMEM);
+		}
+
+		/* inode maybe already in cache */
+		if (!(child_i->i_state & I_NEW))
+			break;
+
+		/* fill the new inode data */
+		ctmpfs_init_vfs_inode(sb_c, child_i, *pchild_ci);
+		unlock_new_inode(child_i);
+		break;
+	}
+
+	up_read(&sb_c->sem);
+	d_add(dentry, child_i);
+	return NULL;
+}
+
+/*
+ * inode operation, mknod
+ */
+static int ctmpfs_inode_mknod(struct inode *dir_i, struct dentry *dentry,
+			      int mode, dev_t dev)
+{
+	struct ctmpfs_sb *sb_c;
+	struct ctmpfs_inode *dir_ci, *new_ci;
+	struct inode *new_i;
+
+	dir_ci = CTMPFS_I(dir_i);
+	sb_c = CTMPFS_SB(dir_i->i_sb);
+
+	/* make sure we have a directory */
+	if (!(dir_ci->flags & CTMPFS_I_F_IS_DIR))
+		return -ENOTDIR;
+
+	down_write(&sb_c->sem);
+
+	/* check for room in name and directory data buffer, so we can
+	 * fail early */
+	if (ctmpfs_check_buffer(sb_c, DDATA_BUF, sizeof (*new_ci)) ||
+	    ctmpfs_check_buffer(sb_c, NAME_BUF, dentry->d_name.len)) {
+		up_write(&sb_c->sem);
+		return -ENOMEM;
+	}
+
+	/* allocate new inode, with unique number */
+	new_i = ctmpfs_get_inode(dir_i->i_sb, (mode & S_IFDIR) ? 1 : 0, 0);
+	if (!new_i) {
+		up_write(&sb_c->sem);
+		return -ENOMEM;
+	}
+	new_ci = CTMPFS_I(new_i);
+
+	/* append name  data, can't fail since we  checked buffer size
+	 * before */
+	ctmpfs_inode_set_data(sb_c, new_ci, NAME_BUF, NAME_IDX, 0,
+			      dentry->d_name.name, dentry->d_name.len, 0);
+
+	/* append directory data: store inode address */
+	ctmpfs_inode_set_data(sb_c, dir_ci, DDATA_BUF, DATA_IDX,
+			      CTMPFS_I_DSIZE(dir_ci), &new_ci,
+			      sizeof (new_ci), 0);
+
+	dir_i->i_size += sizeof (new_ci);
+	inode_add_bytes(dir_i, sizeof (new_ci));
+
+	d_instantiate(dentry, new_i);
+	up_write(&sb_c->sem);
+
+	return 0;
+}
+
+/*
+ * inode operation, mkdir
+ */
+static int ctmpfs_inode_mkdir(struct inode *dir_i, struct dentry *dentry,
+			      int mode)
+{
+	int retval;
+
+	retval = ctmpfs_inode_mknod(dir_i, dentry, mode | S_IFDIR, 0);
+	if (!retval)
+		inc_nlink(dir_i);
+	return retval;
+}
+
+/*
+ * inode operation, create
+ */
+static int ctmpfs_inode_create(struct inode *dir_i, struct dentry *dentry,
+			      int mode, struct nameidata *nd)
+{
+	return ctmpfs_inode_mknod(dir_i, dentry, mode | S_IFREG, 0);
+}
+
+/*
+ * inode operations, unlink
+ */
+static int ctmpfs_inode_unlink(struct inode *dir_i, struct dentry *dentry)
+{
+	struct ctmpfs_sb *sb_c;
+	struct ctmpfs_inode *dir_ci, *victim_ci, **pchild_ci;
+	struct inode *victim_i;
+	unsigned int off_victim, off_last;
+	int i, found;
+
+	dir_ci = CTMPFS_I(dir_i);
+	sb_c = CTMPFS_SB(dir_i->i_sb);
+
+	/* make sure we have a directory */
+	if (!(dir_ci->flags & CTMPFS_I_F_IS_DIR))
+		return -ENOTDIR;
+
+	down_write(&sb_c->sem);
+
+	/* find victim in directory */
+	victim_i = dentry->d_inode;
+	found = 0;
+	ctmpfs_foreach_direntry(sb_c, dir_ci, i, pchild_ci, 0) {
+		if ((*pchild_ci)->number == victim_i->i_ino) {
+			found = 1;
+			break;
+		}
+	}
+	victim_ci = *pchild_ci;
+
+	/* if victim is a directory, make sure it is empty */
+	if ((victim_ci->flags & CTMPFS_I_F_IS_DIR) &&
+	    (victim_ci->flags & CTMPFS_I_F_HAS_DATA(DATA_IDX))) {
+		up_write(&sb_c->sem);
+		return -ENOTEMPTY;
+	}
+
+	/* exchange  victim  position  with  last one  in  inode  data
+	 * buffer */
+	off_victim = (unsigned char *)pchild_ci - sb_c->buf[DDATA_BUF];
+	off_last = CTMPFS_I_DEND(dir_ci) - sizeof (victim_ci);
+
+	if (off_victim != off_last) {
+		struct ctmpfs_inode **plast_ci, *tmp;
+		plast_ci = (struct ctmpfs_inode **)(sb_c->buf[DDATA_BUF] +
+						    off_last);
+
+		tmp = *plast_ci;
+		*plast_ci = *pchild_ci;
+		*pchild_ci = tmp;
+	}
+
+	/* remove directory data */
+	ctmpfs_inode_set_data(sb_c, dir_ci, DDATA_BUF, DATA_IDX,
+			      off_last - CTMPFS_I_DSTART(dir_ci),
+			      "", 0, 1);
+	dir_i->i_size -= sizeof (victim_ci);
+	inode_sub_bytes(dir_i, sizeof (victim_ci));
+
+	/* remove name data */
+	ctmpfs_inode_remove_data(sb_c, victim_ci, NAME_BUF, NAME_IDX);
+
+	drop_nlink(victim_i);
+	if ((victim_ci->flags & CTMPFS_I_F_IS_DIR))
+		drop_nlink(victim_i);
+
+	up_write(&sb_c->sem);
+
+	return 0;
+}
+
+/*
+ * inode operations, rmdir
+ */
+static int ctmpfs_inode_rmdir(struct inode *dir_i, struct dentry *dentry)
+{
+	struct ctmpfs_sb *sb_c;
+	struct ctmpfs_inode *dir_ci;
+	int err;
+
+	dir_ci = CTMPFS_I(dir_i);
+	sb_c = CTMPFS_SB(dir_i->i_sb);
+
+	err = ctmpfs_inode_unlink(dir_i, dentry);
+	if (!err)
+		drop_nlink(dir_i);
+
+	return err;
+}
+
+
+static struct inode_operations ctmpfs_dir_inode_ops = {
+	create: ctmpfs_inode_create,
+	lookup: ctmpfs_inode_lookup,
+	mkdir: ctmpfs_inode_mkdir,
+	rmdir: ctmpfs_inode_rmdir,
+	unlink: ctmpfs_inode_unlink,
+};
+
+/*
+ * inode operations, truncate
+ */
+static void ctmpfs_inode_truncate(struct inode *reg_i)
+{
+	struct ctmpfs_sb *sb_c;
+	struct ctmpfs_inode *reg_ci;
+
+	reg_ci = CTMPFS_I(reg_i);
+	sb_c = CTMPFS_SB(reg_i->i_sb);
+
+	if (!(reg_ci->flags & CTMPFS_I_F_HAS_DATA(DATA_IDX)))
+		return;
+
+	down_write(&sb_c->sem);
+	ctmpfs_inode_remove_data(sb_c, reg_ci, FDATA_BUF, DATA_IDX);
+	reg_i->i_size = 0;
+	inode_set_bytes(reg_i, 0);
+	up_write(&sb_c->sem);
+}
+
+static struct inode_operations ctmpfs_reg_inode_ops = {
+	truncate: ctmpfs_inode_truncate,
+};
+
+/*
+ * file operations, readdir
+ */
+static int ctmpfs_file_readdir(struct file *dir_f, void *dirent,
+			       filldir_t filldir)
+{
+	struct ctmpfs_sb *sb_c;
+	struct dentry *dir_d;
+	struct inode *dir_i;
+	struct ctmpfs_inode *dir_ci;
+	unsigned int i, skip;
+	struct ctmpfs_inode **pchild_ci;
+
+	dir_d = dir_f->f_dentry;
+	dir_i = dir_d->d_inode;
+	dir_ci = CTMPFS_I(dir_i);
+
+	/* add "." and ".." if required */
+	if (dir_f->f_pos == 0) {
+		if (filldir(dirent, ".", 1, dir_f->f_pos++, dir_i->i_ino,
+			    DT_DIR))
+			return 0;
+	}
+
+	if (dir_f->f_pos < 2) {
+		if (filldir(dirent, "..", 2, dir_f->f_pos++,
+			    dir_d->d_parent->d_inode->i_ino, DT_DIR))
+			return 0;
+	}
+
+	/* check how many item we must skip */
+	skip = 0;
+	if (dir_f->f_pos > 2)
+		skip = dir_f->f_pos - 2;
+
+	/* browse directory from right offset */
+	sb_c = CTMPFS_SB(dir_i->i_sb);
+
+	down_read(&sb_c->sem);
+
+	ctmpfs_foreach_direntry(sb_c, dir_ci, i, pchild_ci, skip) {
+		char *name;
+		unsigned int len, is_dir;
+
+		len = CTMPFS_I_NSIZE(*pchild_ci);
+		name = sb_c->buf[NAME_BUF] + CTMPFS_I_NSTART(*pchild_ci);
+
+		is_dir = 0;
+		if (((*pchild_ci)->flags & CTMPFS_I_F_IS_DIR))
+			is_dir = 1;
+
+		/* fill dirent with it */
+		if (filldir(dirent, name, len, dir_f->f_pos++,
+			    (*pchild_ci)->number, is_dir ? DT_DIR : DT_REG)) {
+			up_read(&sb_c->sem);
+			return 0;
+		}
+	}
+	up_read(&sb_c->sem);
+	return 1;
+}
+
+static struct file_operations ctmpfs_dir_file_ops = {
+	readdir: &ctmpfs_file_readdir,
+};
+
+/*
+ * file operations, read
+ */
+static ssize_t ctmpfs_file_read(struct file *file, char *buffer, size_t count,
+				loff_t *ppos)
+{
+	struct ctmpfs_sb *sb_c;
+	struct inode *reg_i;
+	struct ctmpfs_inode *reg_ci;
+	unsigned int size;
+	unsigned char *p;
+
+	reg_i = file->f_dentry->d_inode;
+	reg_ci = CTMPFS_I(reg_i);
+	if (!(reg_ci->flags & CTMPFS_I_F_HAS_DATA(DATA_IDX)))
+		return 0;
+
+	sb_c = CTMPFS_SB(reg_i->i_sb);
+
+	down_read(&sb_c->sem);
+	size = CTMPFS_I_DSIZE(reg_ci);
+
+	if (*ppos >= size) {
+		up_read(&sb_c->sem);
+		return 0;
+	}
+
+	if (count > (size - *ppos))
+		count = size - *ppos;
+
+	if (count > PAGE_SIZE)
+		count = PAGE_SIZE;
+
+	p = sb_c->buf[FDATA_BUF] + CTMPFS_I_DSTART(reg_ci) + *ppos;
+	if (copy_to_user(buffer, p, count)) {
+		up_read(&sb_c->sem);
+		return -EFAULT;
+	}
+
+	up_read(&sb_c->sem);
+	*ppos += count;
+
+	return count;
+}
+
+static ssize_t ctmpfs_file_write(struct file *file, const char *buffer,
+				 size_t count, loff_t *ppos)
+{
+	struct ctmpfs_sb *sb_c;
+	struct inode *reg_i;
+	struct ctmpfs_inode *reg_ci;
+	unsigned int zerofill, grow, size;
+
+	reg_i = file->f_dentry->d_inode;
+	reg_ci = CTMPFS_I(reg_i);
+	sb_c = CTMPFS_SB(reg_i->i_sb);
+
+	down_write(&sb_c->sem);
+
+	size = CTMPFS_I_DSIZE(reg_ci);
+
+	if (count > PAGE_SIZE)
+		count = PAGE_SIZE;
+
+	/* if ppos is bigger than current size, we must zero-fill */
+	zerofill = 0;
+	if (!(reg_ci->flags & CTMPFS_I_F_HAS_DATA(DATA_IDX))) {
+		zerofill = *ppos;
+	} else {
+		if (file->f_flags & O_APPEND)
+			*ppos = size;
+		else {
+			if (*ppos > size)
+				zerofill = size - *ppos;
+		}
+	}
+
+	/* compute new size */
+	grow = 0;
+	if (!(reg_ci->flags & CTMPFS_I_F_HAS_DATA(DATA_IDX))) {
+		grow = *ppos + count;
+	} else {
+		unsigned int newsize;
+		newsize = *ppos + count;
+		if (newsize > size)
+			grow = newsize - size;
+	}
+
+	if (grow > 0 && ctmpfs_check_buffer(sb_c, FDATA_BUF, grow)) {
+		up_write(&sb_c->sem);
+		return -ENOMEM;
+	}
+
+	/* zerofill at end of buffer if needed */
+	if (zerofill)
+		ctmpfs_inode_set_data(sb_c, reg_ci, FDATA_BUF, DATA_IDX,
+				      size, NULL, zerofill, 0);
+
+	/* append file data */
+	ctmpfs_inode_set_data(sb_c, reg_ci, FDATA_BUF, DATA_IDX, *ppos,
+			      buffer, count, 0);
+
+	/* update inode size */
+	if (grow) {
+		reg_i->i_size += grow;
+		inode_add_bytes(reg_i, grow);
+	}
+
+	up_write(&sb_c->sem);
+
+	*ppos += count;
+
+	return count;
+}
+
+/*
+ * file operations, write
+ */
+static struct file_operations ctmpfs_reg_file_ops = {
+	llseek: generic_file_llseek,
+	read: ctmpfs_file_read,
+	write: ctmpfs_file_write,
+};
+
+/*
+ * superblock operation, delete_inode
+ */
+static void ctmpfs_delete_inode(struct inode *inode)
+{
+	struct ctmpfs_sb *sb_c;
+	struct ctmpfs_inode *inode_ci;
+
+	inode_ci = CTMPFS_I(inode);
+	sb_c = CTMPFS_SB(inode->i_sb);
+
+	truncate_inode_pages(&inode->i_data, 0);
+
+	/* remove file data for regular file */
+	if (!(inode_ci->flags & CTMPFS_I_F_IS_DIR))
+		ctmpfs_inode_truncate(inode);
+
+	/* remove from inode list */
+	list_del(&inode_ci->next);
+	kfree(inode_ci);
+	clear_inode (inode);
+}
+
+/*
+ * superblock operation, put_super
+ */
+static void ctmpfs_put_super(struct super_block *sb)
+{
+	struct ctmpfs_sb *sb_c;
+	struct ctmpfs_inode *tmp_ci, *tmp2_ci;
+	int i;
+
+	sb_c = CTMPFS_SB(sb);
+	for (i = 0; i < LAST_BUF; i++)
+		vfree(sb_c->buf[i]);
+	vfree(sb_c->tmp_buf);
+	list_for_each_entry_safe(tmp_ci, tmp2_ci, &sb_c->inode_list, next)
+		kfree(tmp_ci);
+	kfree(sb_c);
+	sb->s_fs_info = NULL;
+}
+
+struct super_operations ctmpfs_super_ops = {
+	delete_inode: ctmpfs_delete_inode,
+	drop_inode: generic_drop_inode,
+	put_super: ctmpfs_put_super,
+	statfs: simple_statfs,
+};
+
+/*
+ * fill super block information
+ */
+static int ctmpfs_fill_super(struct super_block *sb, void *data, int silent)
+{
+	struct inode *rinode;
+	struct ctmpfs_sb *sb_c;
+	int i;
+
+	/* allocate/init our private ctmpfs_sb data */
+	rinode = NULL;
+	sb->s_fs_info = kzalloc(sizeof (struct ctmpfs_sb), GFP_KERNEL);
+	if (!sb->s_fs_info)
+		return -ENOMEM;
+	sb_c = CTMPFS_SB(sb);
+
+	INIT_LIST_HEAD(&sb_c->inode_list);
+	init_rwsem(&sb_c->sem);
+
+	if (!(sb_c->tmp_buf = vmalloc(MEMSLIDE_BUF_SIZE)))
+		goto out_mem;
+
+	/* fill default allocation threshold */
+	sb_c->buf_alloc_thresh[NAME_BUF] = NAME_BUF_ALLOC_SIZE;
+	sb_c->buf_alloc_thresh[FDATA_BUF] = FDATA_BUF_ALLOC_SIZE;
+	sb_c->buf_alloc_thresh[DDATA_BUF] = DDATA_BUF_ALLOC_SIZE;
+
+	/* fill superblock common fields */
+	sb->s_blocksize = PAGE_CACHE_SIZE;
+	sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
+	sb->s_magic = CTMPFS_MAGIC;
+	sb->s_op = &ctmpfs_super_ops;
+	sb->s_time_gran = 1;
+
+	/* allocate root inode / dentry */
+	rinode = ctmpfs_get_inode(sb, 1, 1);
+	if (!rinode)
+		goto out_mem;
+
+	sb->s_root = d_alloc_root(rinode);
+	if (!sb->s_root)
+		goto out_mem;
+
+	return 0;
+
+out_mem:
+	if (sb_c) {
+		if (sb_c->tmp_buf)
+			vfree(sb_c->tmp_buf);
+		for (i = 0; i < LAST_BUF; i++) {
+			if (sb_c->buf[i])
+				vfree(sb_c->buf[i]);
+		}
+		if (rinode)
+			iput(rinode);
+
+		kfree(sb_c);
+	}
+	return -ENOMEM;
+}
+
+static int ctmpfs_get_sb(struct file_system_type *fs_type, int flags,
+			 const char *dev_name, void *data,
+			 struct vfsmount *mnt)
+{
+	return get_sb_nodev(fs_type, flags, data, ctmpfs_fill_super, mnt);
+}
+
+static struct file_system_type ctmpfs_fs_type = {
+	.name		= "ctmpfs",
+	.get_sb		= ctmpfs_get_sb,
+	.kill_sb	= kill_anon_super,
+	.owner		= THIS_MODULE,
+};
+
+/*
+ * module init/exit
+ */
+static int __init ctmpfs_init(void)
+{
+	return register_filesystem(&ctmpfs_fs_type);
+}
+
+static void __exit ctmpfs_exit(void)
+{
+	unregister_filesystem(&ctmpfs_fs_type);
+}
+
+module_init(ctmpfs_init)
+module_exit(ctmpfs_exit)
+MODULE_LICENSE("GPL");
diff -Nruw linux-2.6.20.14-fbx/fs/squashfs./inode.c linux-2.6.20.14-fbx/fs/squashfs/inode.c
--- linux-2.6.20.14-fbx/fs/squashfs./inode.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.20.14-fbx/fs/squashfs/inode.c	2010-12-29 19:30:08.361441361 +0100
@@ -0,0 +1,2428 @@
+/*
+ * Squashfs - a compressed read only filesystem for Linux
+ *
+ * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007
+ * Phillip Lougher <phillip@lougher.org.uk>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2,
+ * or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * inode.c
+ */
+
+#include <linux/squashfs_fs.h>
+#include <linux/module.h>
+#include <linux/zlib.h>
+#include <linux/fs.h>
+#include <linux/squashfs_fs_sb.h>
+#include <linux/squashfs_fs_i.h>
+#include <linux/buffer_head.h>
+#include <linux/vfs.h>
+#include <linux/vmalloc.h>
+#include <linux/smp_lock.h>
+
+#include "squashfs.h"
+#include "linux/sqlzma.h"
+#include "linux/sqmagic.h"
+
+#define KeepPreemptive
+#undef KeepPreemptive
+struct sqlzma {
+#ifdef KeepPreemptive
+	struct mutex mtx;
+#endif
+	unsigned char read_data[SQUASHFS_FILE_MAX_SIZE];
+	struct sqlzma_un un;
+};
+static DEFINE_PER_CPU(struct sqlzma *, sqlzma);
+
+#define dpri(fmt, args...) /* printk("%s:%d: " fmt, __func__, __LINE__, ##args) */
+#define dpri_un(un)	dpri("un{%d, {%d %p}, {%d %p}, {%d %p}}\n", \
+			     (un)->un_lzma, (un)->un_a[0].sz, (un)->un_a[0].buf, \
+			     (un)->un_a[1].sz, (un)->un_a[1].buf, \
+			     (un)->un_a[2].sz, (un)->un_a[2].buf)
+
+static void vfs_read_inode(struct inode *i);
+static struct dentry *squashfs_get_parent(struct dentry *child);
+static int squashfs_read_inode(struct inode *i, squashfs_inode_t inode);
+static int squashfs_statfs(struct dentry *, struct kstatfs *);
+static int squashfs_symlink_readpage(struct file *file, struct page *page);
+static long long read_blocklist(struct inode *inode, int index,
+				int readahead_blks, char *block_list,
+				unsigned short **block_p, unsigned int *bsize);
+static int squashfs_readpage(struct file *file, struct page *page);
+static int squashfs_readpage4K(struct file *file, struct page *page);
+static int squashfs_readdir(struct file *, void *, filldir_t);
+static struct dentry *squashfs_lookup(struct inode *, struct dentry *,
+				struct nameidata *);
+static int squashfs_remount(struct super_block *s, int *flags, char *data);
+static void squashfs_put_super(struct super_block *);
+static int squashfs_get_sb(struct file_system_type *,int, const char *, void *,
+				struct vfsmount *);
+static struct inode *squashfs_alloc_inode(struct super_block *sb);
+static void squashfs_destroy_inode(struct inode *inode);
+static int init_inodecache(void);
+static void destroy_inodecache(void);
+
+static struct file_system_type squashfs_fs_type = {
+	.owner = THIS_MODULE,
+	.name = "squashfs",
+	.get_sb = squashfs_get_sb,
+	.kill_sb = kill_block_super,
+	.fs_flags = FS_REQUIRES_DEV
+};
+
+static const unsigned char squashfs_filetype_table[] = {
+	DT_UNKNOWN, DT_DIR, DT_REG, DT_LNK, DT_BLK, DT_CHR, DT_FIFO, DT_SOCK
+};
+
+static struct super_operations squashfs_super_ops = {
+	.alloc_inode = squashfs_alloc_inode,
+	.destroy_inode = squashfs_destroy_inode,
+	.statfs = squashfs_statfs,
+	.put_super = squashfs_put_super,
+	.remount_fs = squashfs_remount
+};
+
+static struct super_operations squashfs_export_super_ops = {
+	.alloc_inode = squashfs_alloc_inode,
+	.destroy_inode = squashfs_destroy_inode,
+	.statfs = squashfs_statfs,
+	.put_super = squashfs_put_super,
+	.read_inode = vfs_read_inode
+};
+
+static struct export_operations squashfs_export_ops = {
+	.get_parent = squashfs_get_parent
+};
+
+SQSH_EXTERN const struct address_space_operations squashfs_symlink_aops = {
+	.readpage = squashfs_symlink_readpage
+};
+
+SQSH_EXTERN const struct address_space_operations squashfs_aops = {
+	.readpage = squashfs_readpage
+};
+
+SQSH_EXTERN const struct address_space_operations squashfs_aops_4K = {
+	.readpage = squashfs_readpage4K
+};
+
+static const struct file_operations squashfs_dir_ops = {
+	.read = generic_read_dir,
+	.readdir = squashfs_readdir
+};
+
+SQSH_EXTERN struct inode_operations squashfs_dir_inode_ops = {
+	.lookup = squashfs_lookup
+};
+
+
+static struct buffer_head *get_block_length(struct super_block *s,
+				int *cur_index, int *offset, int *c_byte)
+{
+	struct squashfs_sb_info *msblk = s->s_fs_info;
+	unsigned short temp;
+	struct buffer_head *bh;
+
+	if (!(bh = sb_bread(s, *cur_index)))
+		goto out;
+
+	if (msblk->devblksize - *offset == 1) {
+		if (msblk->swap)
+			((unsigned char *) &temp)[1] = *((unsigned char *)
+				(bh->b_data + *offset));
+		else
+			((unsigned char *) &temp)[0] = *((unsigned char *)
+				(bh->b_data + *offset));
+		brelse(bh);
+		if (!(bh = sb_bread(s, ++(*cur_index))))
+			goto out;
+		if (msblk->swap)
+			((unsigned char *) &temp)[0] = *((unsigned char *)
+				bh->b_data); 
+		else
+			((unsigned char *) &temp)[1] = *((unsigned char *)
+				bh->b_data); 
+		*c_byte = temp;
+		*offset = 1;
+	} else {
+		if (msblk->swap) {
+			((unsigned char *) &temp)[1] = *((unsigned char *)
+				(bh->b_data + *offset));
+			((unsigned char *) &temp)[0] = *((unsigned char *)
+				(bh->b_data + *offset + 1)); 
+		} else {
+			((unsigned char *) &temp)[0] = *((unsigned char *)
+				(bh->b_data + *offset));
+			((unsigned char *) &temp)[1] = *((unsigned char *)
+				(bh->b_data + *offset + 1)); 
+		}
+		*c_byte = temp;
+		*offset += 2;
+	}
+
+	if (SQUASHFS_CHECK_DATA(msblk->sblk.flags)) {
+		if (*offset == msblk->devblksize) {
+			brelse(bh);
+			if (!(bh = sb_bread(s, ++(*cur_index))))
+				goto out;
+			*offset = 0;
+		}
+		if (*((unsigned char *) (bh->b_data + *offset)) !=
+						SQUASHFS_MARKER_BYTE) {
+			ERROR("Metadata block marker corrupt @ %x\n",
+						*cur_index);
+			brelse(bh);
+			goto out;
+		}
+		(*offset)++;
+	}
+	return bh;
+
+out:
+	return NULL;
+}
+
+
+SQSH_EXTERN unsigned int squashfs_read_data(struct super_block *s, char *buffer,
+			long long index, unsigned int length,
+			long long *next_index, int srclength)
+{
+	struct squashfs_sb_info *msblk = s->s_fs_info;
+	struct squashfs_super_block *sblk = &msblk->sblk;
+	struct buffer_head *bh[((SQUASHFS_FILE_MAX_SIZE - 1) >>
+			msblk->devblksize_log2) + 2];
+	unsigned int offset = index & ((1 << msblk->devblksize_log2) - 1);
+	unsigned int cur_index = index >> msblk->devblksize_log2;
+	int bytes, avail_bytes, b = 0, k = 0;
+	unsigned int compressed;
+	unsigned int c_byte = length;
+
+	if (c_byte) {
+		bytes = msblk->devblksize - offset;
+		compressed = SQUASHFS_COMPRESSED_BLOCK(c_byte);
+		c_byte = SQUASHFS_COMPRESSED_SIZE_BLOCK(c_byte);
+
+		TRACE("Block @ 0x%llx, %scompressed size %d, src size %d\n", index, compressed
+					? "" : "un", (unsigned int) c_byte, srclength);
+
+		if (c_byte > srclength || index < 0 || (index + c_byte) > sblk->bytes_used)
+			goto read_failure;
+
+		if (!(bh[0] = sb_getblk(s, cur_index)))
+			goto block_release;
+
+		for (b = 1; bytes < c_byte; b++) {
+			if (!(bh[b] = sb_getblk(s, ++cur_index)))
+				goto block_release;
+			bytes += msblk->devblksize;
+		}
+		ll_rw_block(READ, b, bh);
+	} else {
+		if (index < 0 || (index + 2) > sblk->bytes_used)
+			goto read_failure;
+
+		if (!(bh[0] = get_block_length(s, &cur_index, &offset,
+								&c_byte)))
+			goto read_failure;
+
+		bytes = msblk->devblksize - offset;
+		compressed = SQUASHFS_COMPRESSED(c_byte);
+		c_byte = SQUASHFS_COMPRESSED_SIZE(c_byte);
+
+		TRACE("Block @ 0x%llx, %scompressed size %d\n", index, compressed
+					? "" : "un", (unsigned int) c_byte);
+
+		if (c_byte > srclength || (index + c_byte) > sblk->bytes_used)
+			goto read_failure;
+
+		for (b = 1; bytes < c_byte; b++) {
+			if (!(bh[b] = sb_getblk(s, ++cur_index)))
+				goto block_release;
+			bytes += msblk->devblksize;
+		}
+		ll_rw_block(READ, b - 1, bh + 1);
+	}
+
+	if (compressed) {
+		int zlib_err = Z_STREAM_END;
+		int rest, start;
+		enum {Src, Dst};
+		struct sized_buf sbuf[2];
+		struct sqlzma *percpu;
+
+		/*
+	 	* uncompress block
+	 	*/
+		for (k = 0; k < b; k++) {
+			wait_on_buffer(bh[k]);
+			if (!buffer_uptodate(bh[k]))
+				goto block_release;
+		}
+
+		avail_bytes = 0;
+		for (k = 0; !avail_bytes && k < b; k++) {
+			avail_bytes = msblk->devblksize - offset;
+			if (c_byte < avail_bytes)
+				avail_bytes = c_byte;
+			if (avail_bytes)
+				break;
+			offset = 0;
+			brelse(bh[k]);
+		}
+		bytes = 0;
+		if (!avail_bytes)
+			goto block_release; // nothing to be process
+
+		start = k;
+		/* it disables preemption */
+		percpu = get_cpu_var(sqlzma);
+#ifdef KeepPreemptive
+		put_cpu_var(sqlzma);
+		mutex_lock(&percpu->mtx);
+#endif
+
+		for (; k < b; k++) {
+			memcpy(percpu->read_data + bytes, bh[k]->b_data + offset,
+			       avail_bytes);
+			bytes += avail_bytes;
+			offset = 0;
+			brelse(bh[k]);
+			avail_bytes = msblk->devblksize - offset;
+			rest = c_byte - bytes;
+			if (rest < avail_bytes)
+				avail_bytes = rest;
+		}
+
+		sbuf[Src].buf = percpu->read_data;
+		sbuf[Src].sz = bytes;
+		sbuf[Dst].buf = buffer;
+		sbuf[Dst].sz = srclength;
+		dpri_un(&percpu->un);
+		dpri("src %d %p, dst %d %p\n", sbuf[Src].sz, sbuf[Src].buf,
+		     sbuf[Dst].sz, sbuf[Dst].buf);
+		zlib_err = sqlzma_un(&percpu->un, sbuf + Src, sbuf + Dst);
+		bytes = percpu->un.un_reslen;
+
+#ifdef KeepPreemptive
+		mutex_unlock(&percpu->mtx);
+#else
+		put_cpu_var(sqlzma);
+#endif
+		if (unlikely(zlib_err)) {
+			dpri("zlib_err %d\n", zlib_err);
+			goto release_mutex;
+		}
+	} else {
+		int i;
+
+		for(i = 0; i < b; i++) {
+			wait_on_buffer(bh[i]);
+			if(!buffer_uptodate(bh[i]))
+				goto block_release;
+		}
+
+		for (bytes = 0; k < b; k++) {
+			avail_bytes = (c_byte - bytes) > (msblk->devblksize - offset) ?
+					msblk->devblksize - offset :
+					c_byte - bytes;
+			memcpy(buffer + bytes, bh[k]->b_data + offset, avail_bytes);
+			bytes += avail_bytes;
+			offset = 0;
+			brelse(bh[k]);
+		}
+	}
+
+	if (next_index)
+		*next_index = index + c_byte + (length ? 0 :
+				(SQUASHFS_CHECK_DATA(msblk->sblk.flags)
+				 ? 3 : 2));
+	return bytes;
+
+release_mutex:
+	//mutex_unlock(&msblk->read_data_mutex);
+
+block_release:
+	for (; k < b; k++)
+		brelse(bh[k]);
+
+read_failure:
+	ERROR("sb_bread failed reading block 0x%x\n", cur_index);
+	return 0;
+}
+
+
+SQSH_EXTERN int squashfs_get_cached_block(struct super_block *s, char *buffer,
+				long long block, unsigned int offset,
+				int length, long long *next_block,
+				unsigned int *next_offset)
+{
+	struct squashfs_sb_info *msblk = s->s_fs_info;
+	int n, i, bytes, return_length = length;
+	long long next_index;
+
+	TRACE("Entered squashfs_get_cached_block [%llx:%x]\n", block, offset);
+
+	while ( 1 ) {
+		for (i = 0; i < SQUASHFS_CACHED_BLKS; i++) 
+			if (msblk->block_cache[i].block == block)
+				break; 
+		
+		mutex_lock(&msblk->block_cache_mutex);
+
+		if (i == SQUASHFS_CACHED_BLKS) {
+			/* read inode header block */
+			for (i = msblk->next_cache, n = SQUASHFS_CACHED_BLKS;
+					n ; n --, i = (i + 1) %
+					SQUASHFS_CACHED_BLKS)
+				if (msblk->block_cache[i].block !=
+							SQUASHFS_USED_BLK)
+					break;
+
+			if (n == 0) {
+				wait_queue_t wait;
+
+				init_waitqueue_entry(&wait, current);
+				add_wait_queue(&msblk->waitq, &wait);
+				set_current_state(TASK_UNINTERRUPTIBLE);
+ 				mutex_unlock(&msblk->block_cache_mutex);
+				schedule();
+				set_current_state(TASK_RUNNING);
+				remove_wait_queue(&msblk->waitq, &wait);
+				continue;
+			}
+			msblk->next_cache = (i + 1) % SQUASHFS_CACHED_BLKS;
+
+			if (msblk->block_cache[i].block ==
+							SQUASHFS_INVALID_BLK) {
+				if (!(msblk->block_cache[i].data =
+						kmalloc(SQUASHFS_METADATA_SIZE,
+						GFP_KERNEL))) {
+					ERROR("Failed to allocate cache"
+							"block\n");
+					mutex_unlock(&msblk->block_cache_mutex);
+					goto out;
+				}
+			}
+	
+			msblk->block_cache[i].block = SQUASHFS_USED_BLK;
+			mutex_unlock(&msblk->block_cache_mutex);
+
+			msblk->block_cache[i].length = squashfs_read_data(s,
+				msblk->block_cache[i].data, block, 0, &next_index, SQUASHFS_METADATA_SIZE);
+			if (msblk->block_cache[i].length == 0) {
+				ERROR("Unable to read cache block [%llx:%x]\n",
+						block, offset);
+				mutex_lock(&msblk->block_cache_mutex);
+				msblk->block_cache[i].block = SQUASHFS_INVALID_BLK;
+				kfree(msblk->block_cache[i].data);
+				wake_up(&msblk->waitq);
+				mutex_unlock(&msblk->block_cache_mutex);
+				goto out;
+			}
+
+			mutex_lock(&msblk->block_cache_mutex);
+			wake_up(&msblk->waitq);
+			msblk->block_cache[i].block = block;
+			msblk->block_cache[i].next_index = next_index;
+			TRACE("Read cache block [%llx:%x]\n", block, offset);
+		}
+
+		if (msblk->block_cache[i].block != block) {
+			mutex_unlock(&msblk->block_cache_mutex);
+			continue;
+		}
+
+		bytes = msblk->block_cache[i].length - offset;
+
+		if (bytes < 1) {
+			mutex_unlock(&msblk->block_cache_mutex);
+			goto out;
+		} else if (bytes >= length) {
+			if (buffer)
+				memcpy(buffer, msblk->block_cache[i].data +
+						offset, length);
+			if (msblk->block_cache[i].length - offset == length) {
+				*next_block = msblk->block_cache[i].next_index;
+				*next_offset = 0;
+			} else {
+				*next_block = block;
+				*next_offset = offset + length;
+			}
+			mutex_unlock(&msblk->block_cache_mutex);
+			goto finish;
+		} else {
+			if (buffer) {
+				memcpy(buffer, msblk->block_cache[i].data +
+						offset, bytes);
+				buffer += bytes;
+			}
+			block = msblk->block_cache[i].next_index;
+			mutex_unlock(&msblk->block_cache_mutex);
+			length -= bytes;
+			offset = 0;
+		}
+	}
+
+finish:
+	return return_length;
+out:
+	return 0;
+}
+
+
+static int get_fragment_location(struct super_block *s, unsigned int fragment,
+				long long *fragment_start_block,
+				unsigned int *fragment_size)
+{
+	struct squashfs_sb_info *msblk = s->s_fs_info;
+	long long start_block =
+		msblk->fragment_index[SQUASHFS_FRAGMENT_INDEX(fragment)];
+	int offset = SQUASHFS_FRAGMENT_INDEX_OFFSET(fragment);
+	struct squashfs_fragment_entry fragment_entry;
+
+	if (msblk->swap) {
+		struct squashfs_fragment_entry sfragment_entry;
+
+		if (!squashfs_get_cached_block(s, (char *) &sfragment_entry,
+					start_block, offset,
+					sizeof(sfragment_entry), &start_block,
+					&offset))
+			goto out;
+		SQUASHFS_SWAP_FRAGMENT_ENTRY(&fragment_entry, &sfragment_entry);
+	} else
+		if (!squashfs_get_cached_block(s, (char *) &fragment_entry,
+					start_block, offset,
+					sizeof(fragment_entry), &start_block,
+					&offset))
+			goto out;
+
+	*fragment_start_block = fragment_entry.start_block;
+	*fragment_size = fragment_entry.size;
+
+	return 1;
+
+out:
+	return 0;
+}
+
+
+SQSH_EXTERN void release_cached_fragment(struct squashfs_sb_info *msblk, struct
+					squashfs_fragment_cache *fragment)
+{
+	mutex_lock(&msblk->fragment_mutex);
+	fragment->locked --;
+	wake_up(&msblk->fragment_wait_queue);
+	mutex_unlock(&msblk->fragment_mutex);
+}
+
+
+SQSH_EXTERN struct squashfs_fragment_cache *get_cached_fragment(struct super_block
+					*s, long long start_block,
+					int length)
+{
+	int i, n;
+	struct squashfs_sb_info *msblk = s->s_fs_info;
+	struct squashfs_super_block *sblk = &msblk->sblk;
+
+	while ( 1 ) {
+		mutex_lock(&msblk->fragment_mutex);
+
+		for (i = 0; i < SQUASHFS_CACHED_FRAGMENTS &&
+				msblk->fragment[i].block != start_block; i++);
+
+		if (i == SQUASHFS_CACHED_FRAGMENTS) {
+			for (i = msblk->next_fragment, n =
+				SQUASHFS_CACHED_FRAGMENTS; n &&
+				msblk->fragment[i].locked; n--, i = (i + 1) %
+				SQUASHFS_CACHED_FRAGMENTS);
+
+			if (n == 0) {
+				wait_queue_t wait;
+
+				init_waitqueue_entry(&wait, current);
+				add_wait_queue(&msblk->fragment_wait_queue,
+									&wait);
+				set_current_state(TASK_UNINTERRUPTIBLE);
+				mutex_unlock(&msblk->fragment_mutex);
+				schedule();
+				set_current_state(TASK_RUNNING);
+				remove_wait_queue(&msblk->fragment_wait_queue,
+									&wait);
+				continue;
+			}
+			msblk->next_fragment = (msblk->next_fragment + 1) %
+				SQUASHFS_CACHED_FRAGMENTS;
+			
+			if (msblk->fragment[i].data == NULL)
+				if (!(msblk->fragment[i].data = SQUASHFS_ALLOC
+						(SQUASHFS_FILE_MAX_SIZE))) {
+					ERROR("Failed to allocate fragment "
+							"cache block\n");
+					mutex_unlock(&msblk->fragment_mutex);
+					goto out;
+				}
+
+			msblk->fragment[i].block = SQUASHFS_INVALID_BLK;
+			msblk->fragment[i].locked = 1;
+			mutex_unlock(&msblk->fragment_mutex);
+
+			if (!(msblk->fragment[i].length = squashfs_read_data(s,
+						msblk->fragment[i].data,
+						start_block, length, NULL, sblk->block_size))) {
+				ERROR("Unable to read fragment cache block "
+							"[%llx]\n", start_block);
+				msblk->fragment[i].locked = 0;
+				smp_mb();
+				goto out;
+			}
+
+			mutex_lock(&msblk->fragment_mutex);
+			msblk->fragment[i].block = start_block;
+			TRACE("New fragment %d, start block %lld, locked %d\n",
+						i, msblk->fragment[i].block,
+						msblk->fragment[i].locked);
+			mutex_unlock(&msblk->fragment_mutex);
+			break;
+		}
+
+		msblk->fragment[i].locked++;
+		mutex_unlock(&msblk->fragment_mutex);
+		TRACE("Got fragment %d, start block %lld, locked %d\n", i,
+						msblk->fragment[i].block,
+						msblk->fragment[i].locked);
+		break;
+	}
+
+	return &msblk->fragment[i];
+
+out:
+	return NULL;
+}
+
+
+static void squashfs_new_inode(struct squashfs_sb_info *msblk, struct inode *i,
+		struct squashfs_base_inode_header *inodeb)
+{
+	i->i_ino = inodeb->inode_number;
+	i->i_mtime.tv_sec = inodeb->mtime;
+	i->i_atime.tv_sec = inodeb->mtime;
+	i->i_ctime.tv_sec = inodeb->mtime;
+	i->i_uid = msblk->uid[inodeb->uid];
+	i->i_mode = inodeb->mode;
+	i->i_size = 0;
+	if (inodeb->guid == SQUASHFS_GUIDS)
+		i->i_gid = i->i_uid;
+	else
+		i->i_gid = msblk->guid[inodeb->guid];
+}
+
+
+static squashfs_inode_t squashfs_inode_lookup(struct super_block *s, int ino)
+{
+	struct squashfs_sb_info *msblk = s->s_fs_info;
+	long long start = msblk->inode_lookup_table[SQUASHFS_LOOKUP_BLOCK(ino - 1)];
+	int offset = SQUASHFS_LOOKUP_BLOCK_OFFSET(ino - 1);
+	squashfs_inode_t inode;
+
+	TRACE("Entered squashfs_inode_lookup, inode_number = %d\n", ino);
+
+	if (msblk->swap) {
+		squashfs_inode_t sinode;
+
+		if (!squashfs_get_cached_block(s, (char *) &sinode, start, offset,
+					sizeof(sinode), &start, &offset))
+			goto out;
+		SQUASHFS_SWAP_INODE_T((&inode), &sinode);
+	} else if (!squashfs_get_cached_block(s, (char *) &inode, start, offset,
+					sizeof(inode), &start, &offset))
+			goto out;
+
+	TRACE("squashfs_inode_lookup, inode = 0x%llx\n", inode);
+
+	return inode;
+
+out:
+	return SQUASHFS_INVALID_BLK;
+}
+	
+
+static void vfs_read_inode(struct inode *i)
+{
+	struct squashfs_sb_info *msblk = i->i_sb->s_fs_info;
+	squashfs_inode_t inode = squashfs_inode_lookup(i->i_sb, i->i_ino);
+
+	TRACE("Entered vfs_read_inode\n");
+
+	if(inode != SQUASHFS_INVALID_BLK)
+		(msblk->read_inode)(i, inode);
+}
+
+
+static struct dentry *squashfs_get_parent(struct dentry *child)
+{
+	struct inode *i = child->d_inode;
+	struct inode *parent = iget(i->i_sb, SQUASHFS_I(i)->u.s2.parent_inode);
+	struct dentry *rv;
+
+	TRACE("Entered squashfs_get_parent\n");
+
+	if(parent == NULL) {
+		rv = ERR_PTR(-EACCES);
+		goto out;
+	}
+
+	rv = d_alloc_anon(parent);
+	if(rv == NULL)
+		rv = ERR_PTR(-ENOMEM);
+
+out:
+	return rv;
+}
+
+	
+SQSH_EXTERN struct inode *squashfs_iget(struct super_block *s, squashfs_inode_t inode, unsigned int inode_number)
+{
+	struct squashfs_sb_info *msblk = s->s_fs_info;
+	struct inode *i = iget_locked(s, inode_number);
+
+	TRACE("Entered squashfs_iget\n");
+
+	if(i && (i->i_state & I_NEW)) {
+		(msblk->read_inode)(i, inode);
+		unlock_new_inode(i);
+	}
+
+	return i;
+}
+
+
+static int squashfs_read_inode(struct inode *i, squashfs_inode_t inode)
+{
+	struct super_block *s = i->i_sb;
+	struct squashfs_sb_info *msblk = s->s_fs_info;
+	struct squashfs_super_block *sblk = &msblk->sblk;
+	long long block = SQUASHFS_INODE_BLK(inode) +
+		sblk->inode_table_start;
+	unsigned int offset = SQUASHFS_INODE_OFFSET(inode);
+	long long next_block;
+	unsigned int next_offset;
+	union squashfs_inode_header id, sid;
+	struct squashfs_base_inode_header *inodeb = &id.base,
+					  *sinodeb = &sid.base;
+
+	TRACE("Entered squashfs_read_inode\n");
+
+	if (msblk->swap) {
+		if (!squashfs_get_cached_block(s, (char *) sinodeb, block,
+					offset, sizeof(*sinodeb), &next_block,
+					&next_offset))
+			goto failed_read;
+		SQUASHFS_SWAP_BASE_INODE_HEADER(inodeb, sinodeb,
+					sizeof(*sinodeb));
+	} else
+		if (!squashfs_get_cached_block(s, (char *) inodeb, block,
+					offset, sizeof(*inodeb), &next_block,
+					&next_offset))
+			goto failed_read;
+
+	squashfs_new_inode(msblk, i, inodeb);
+
+	switch(inodeb->inode_type) {
+		case SQUASHFS_FILE_TYPE: {
+			unsigned int frag_size;
+			long long frag_blk;
+			struct squashfs_reg_inode_header *inodep = &id.reg;
+			struct squashfs_reg_inode_header *sinodep = &sid.reg;
+				
+			if (msblk->swap) {
+				if (!squashfs_get_cached_block(s, (char *)
+						sinodep, block, offset,
+						sizeof(*sinodep), &next_block,
+						&next_offset))
+					goto failed_read;
+				SQUASHFS_SWAP_REG_INODE_HEADER(inodep, sinodep);
+			} else
+				if (!squashfs_get_cached_block(s, (char *)
+						inodep, block, offset,
+						sizeof(*inodep), &next_block,
+						&next_offset))
+					goto failed_read;
+
+			frag_blk = SQUASHFS_INVALID_BLK;
+			if (inodep->fragment != SQUASHFS_INVALID_FRAG &&
+					!get_fragment_location(s,
+					inodep->fragment, &frag_blk, &frag_size))
+				goto failed_read;
+				
+			i->i_nlink = 1;
+			i->i_size = inodep->file_size;
+			i->i_fop = &generic_ro_fops;
+			i->i_mode |= S_IFREG;
+			i->i_blocks = ((i->i_size - 1) >> 9) + 1;
+			SQUASHFS_I(i)->u.s1.fragment_start_block = frag_blk;
+			SQUASHFS_I(i)->u.s1.fragment_size = frag_size;
+			SQUASHFS_I(i)->u.s1.fragment_offset = inodep->offset;
+			SQUASHFS_I(i)->start_block = inodep->start_block;
+			SQUASHFS_I(i)->u.s1.block_list_start = next_block;
+			SQUASHFS_I(i)->offset = next_offset;
+			if (sblk->block_size > 4096)
+				i->i_data.a_ops = &squashfs_aops;
+			else
+				i->i_data.a_ops = &squashfs_aops_4K;
+
+			TRACE("File inode %x:%x, start_block %llx, "
+					"block_list_start %llx, offset %x\n",
+					SQUASHFS_INODE_BLK(inode), offset,
+					inodep->start_block, next_block,
+					next_offset);
+			break;
+		}
+		case SQUASHFS_LREG_TYPE: {
+			unsigned int frag_size;
+			long long frag_blk;
+			struct squashfs_lreg_inode_header *inodep = &id.lreg;
+			struct squashfs_lreg_inode_header *sinodep = &sid.lreg;
+				
+			if (msblk->swap) {
+				if (!squashfs_get_cached_block(s, (char *)
+						sinodep, block, offset,
+						sizeof(*sinodep), &next_block,
+						&next_offset))
+					goto failed_read;
+				SQUASHFS_SWAP_LREG_INODE_HEADER(inodep, sinodep);
+			} else
+				if (!squashfs_get_cached_block(s, (char *)
+						inodep, block, offset,
+						sizeof(*inodep), &next_block,
+						&next_offset))
+					goto failed_read;
+
+			frag_blk = SQUASHFS_INVALID_BLK;
+			if (inodep->fragment != SQUASHFS_INVALID_FRAG &&
+					!get_fragment_location(s,
+					inodep->fragment, &frag_blk, &frag_size))
+				goto failed_read;
+				
+			i->i_nlink = inodep->nlink;
+			i->i_size = inodep->file_size;
+			i->i_fop = &generic_ro_fops;
+			i->i_mode |= S_IFREG;
+			i->i_blocks = ((i->i_size - 1) >> 9) + 1;
+			SQUASHFS_I(i)->u.s1.fragment_start_block = frag_blk;
+			SQUASHFS_I(i)->u.s1.fragment_size = frag_size;
+			SQUASHFS_I(i)->u.s1.fragment_offset = inodep->offset;
+			SQUASHFS_I(i)->start_block = inodep->start_block;
+			SQUASHFS_I(i)->u.s1.block_list_start = next_block;
+			SQUASHFS_I(i)->offset = next_offset;
+			if (sblk->block_size > 4096)
+				i->i_data.a_ops = &squashfs_aops;
+			else
+				i->i_data.a_ops = &squashfs_aops_4K;
+
+			TRACE("File inode %x:%x, start_block %llx, "
+					"block_list_start %llx, offset %x\n",
+					SQUASHFS_INODE_BLK(inode), offset,
+					inodep->start_block, next_block,
+					next_offset);
+			break;
+		}
+		case SQUASHFS_DIR_TYPE: {
+			struct squashfs_dir_inode_header *inodep = &id.dir;
+			struct squashfs_dir_inode_header *sinodep = &sid.dir;
+
+			if (msblk->swap) {
+				if (!squashfs_get_cached_block(s, (char *)
+						sinodep, block, offset,
+						sizeof(*sinodep), &next_block,
+						&next_offset))
+					goto failed_read;
+				SQUASHFS_SWAP_DIR_INODE_HEADER(inodep, sinodep);
+			} else
+				if (!squashfs_get_cached_block(s, (char *)
+						inodep, block, offset,
+						sizeof(*inodep), &next_block,
+						&next_offset))
+					goto failed_read;
+
+			i->i_nlink = inodep->nlink;
+			i->i_size = inodep->file_size;
+			i->i_op = &squashfs_dir_inode_ops;
+			i->i_fop = &squashfs_dir_ops;
+			i->i_mode |= S_IFDIR;
+			SQUASHFS_I(i)->start_block = inodep->start_block;
+			SQUASHFS_I(i)->offset = inodep->offset;
+			SQUASHFS_I(i)->u.s2.directory_index_count = 0;
+			SQUASHFS_I(i)->u.s2.parent_inode = inodep->parent_inode;
+
+			TRACE("Directory inode %x:%x, start_block %x, offset "
+					"%x\n", SQUASHFS_INODE_BLK(inode),
+					offset, inodep->start_block,
+					inodep->offset);
+			break;
+		}
+		case SQUASHFS_LDIR_TYPE: {
+			struct squashfs_ldir_inode_header *inodep = &id.ldir;
+			struct squashfs_ldir_inode_header *sinodep = &sid.ldir;
+
+			if (msblk->swap) {
+				if (!squashfs_get_cached_block(s, (char *)
+						sinodep, block, offset,
+						sizeof(*sinodep), &next_block,
+						&next_offset))
+					goto failed_read;
+				SQUASHFS_SWAP_LDIR_INODE_HEADER(inodep,
+						sinodep);
+			} else
+				if (!squashfs_get_cached_block(s, (char *)
+						inodep, block, offset,
+						sizeof(*inodep), &next_block,
+						&next_offset))
+					goto failed_read;
+
+			i->i_nlink = inodep->nlink;
+			i->i_size = inodep->file_size;
+			i->i_op = &squashfs_dir_inode_ops;
+			i->i_fop = &squashfs_dir_ops;
+			i->i_mode |= S_IFDIR;
+			SQUASHFS_I(i)->start_block = inodep->start_block;
+			SQUASHFS_I(i)->offset = inodep->offset;
+			SQUASHFS_I(i)->u.s2.directory_index_start = next_block;
+			SQUASHFS_I(i)->u.s2.directory_index_offset =
+								next_offset;
+			SQUASHFS_I(i)->u.s2.directory_index_count =
+								inodep->i_count;
+			SQUASHFS_I(i)->u.s2.parent_inode = inodep->parent_inode;
+
+			TRACE("Long directory inode %x:%x, start_block %x, "
+					"offset %x\n",
+					SQUASHFS_INODE_BLK(inode), offset,
+					inodep->start_block, inodep->offset);
+			break;
+		}
+		case SQUASHFS_SYMLINK_TYPE: {
+			struct squashfs_symlink_inode_header *inodep =
+								&id.symlink;
+			struct squashfs_symlink_inode_header *sinodep =
+								&sid.symlink;
+	
+			if (msblk->swap) {
+				if (!squashfs_get_cached_block(s, (char *)
+						sinodep, block, offset,
+						sizeof(*sinodep), &next_block,
+						&next_offset))
+					goto failed_read;
+				SQUASHFS_SWAP_SYMLINK_INODE_HEADER(inodep,
+								sinodep);
+			} else
+				if (!squashfs_get_cached_block(s, (char *)
+						inodep, block, offset,
+						sizeof(*inodep), &next_block,
+						&next_offset))
+					goto failed_read;
+
+			i->i_nlink = inodep->nlink;
+			i->i_size = inodep->symlink_size;
+			i->i_op = &page_symlink_inode_operations;
+			i->i_data.a_ops = &squashfs_symlink_aops;
+			i->i_mode |= S_IFLNK;
+			SQUASHFS_I(i)->start_block = next_block;
+			SQUASHFS_I(i)->offset = next_offset;
+
+			TRACE("Symbolic link inode %x:%x, start_block %llx, "
+					"offset %x\n",
+					SQUASHFS_INODE_BLK(inode), offset,
+					next_block, next_offset);
+			break;
+		 }
+		 case SQUASHFS_BLKDEV_TYPE:
+		 case SQUASHFS_CHRDEV_TYPE: {
+			struct squashfs_dev_inode_header *inodep = &id.dev;
+			struct squashfs_dev_inode_header *sinodep = &sid.dev;
+
+			if (msblk->swap) {
+				if (!squashfs_get_cached_block(s, (char *)
+						sinodep, block, offset,
+						sizeof(*sinodep), &next_block,
+						&next_offset))
+					goto failed_read;
+				SQUASHFS_SWAP_DEV_INODE_HEADER(inodep, sinodep);
+			} else	
+				if (!squashfs_get_cached_block(s, (char *)
+						inodep, block, offset,
+						sizeof(*inodep), &next_block,
+						&next_offset))
+					goto failed_read;
+
+			i->i_nlink = inodep->nlink;
+			i->i_mode |= (inodeb->inode_type ==
+					SQUASHFS_CHRDEV_TYPE) ?  S_IFCHR :
+					S_IFBLK;
+			init_special_inode(i, i->i_mode,
+					old_decode_dev(inodep->rdev));
+
+			TRACE("Device inode %x:%x, rdev %x\n",
+					SQUASHFS_INODE_BLK(inode), offset,
+					inodep->rdev);
+			break;
+		 }
+		 case SQUASHFS_FIFO_TYPE:
+		 case SQUASHFS_SOCKET_TYPE: {
+			struct squashfs_ipc_inode_header *inodep = &id.ipc;
+			struct squashfs_ipc_inode_header *sinodep = &sid.ipc;
+
+			if (msblk->swap) {
+				if (!squashfs_get_cached_block(s, (char *)
+						sinodep, block, offset,
+						sizeof(*sinodep), &next_block,
+						&next_offset))
+					goto failed_read;
+				SQUASHFS_SWAP_IPC_INODE_HEADER(inodep, sinodep);
+			} else	
+				if (!squashfs_get_cached_block(s, (char *)
+						inodep, block, offset,
+						sizeof(*inodep), &next_block,
+						&next_offset))
+					goto failed_read;
+
+			i->i_nlink = inodep->nlink;
+			i->i_mode |= (inodeb->inode_type == SQUASHFS_FIFO_TYPE)
+							? S_IFIFO : S_IFSOCK;
+			init_special_inode(i, i->i_mode, 0);
+			break;
+		 }
+		 default:
+			ERROR("Unknown inode type %d in squashfs_iget!\n",
+					inodeb->inode_type);
+			goto failed_read1;
+	}
+	
+	return 1;
+
+failed_read:
+	ERROR("Unable to read inode [%llx:%x]\n", block, offset);
+
+failed_read1:
+	make_bad_inode(i);
+	return 0;
+}
+
+
+static int read_inode_lookup_table(struct super_block *s)
+{
+	struct squashfs_sb_info *msblk = s->s_fs_info;
+	struct squashfs_super_block *sblk = &msblk->sblk;
+	unsigned int length = SQUASHFS_LOOKUP_BLOCK_BYTES(sblk->inodes);
+
+	TRACE("In read_inode_lookup_table, length %d\n", length);
+
+	/* Allocate inode lookup table */
+	if (!(msblk->inode_lookup_table = kmalloc(length, GFP_KERNEL))) {
+		ERROR("Failed to allocate inode lookup table\n");
+		return 0;
+	}
+   
+	if (!squashfs_read_data(s, (char *) msblk->inode_lookup_table,
+			sblk->lookup_table_start, length |
+			SQUASHFS_COMPRESSED_BIT_BLOCK, NULL, length)) {
+		ERROR("unable to read inode lookup table\n");
+		return 0;
+	}
+
+	if (msblk->swap) {
+		int i;
+		long long block;
+
+		for (i = 0; i < SQUASHFS_LOOKUP_BLOCKS(sblk->inodes); i++) {
+			SQUASHFS_SWAP_LOOKUP_BLOCKS((&block),
+						&msblk->inode_lookup_table[i], 1);
+			msblk->inode_lookup_table[i] = block;
+		}
+	}
+
+	return 1;
+}
+
+
+static int read_fragment_index_table(struct super_block *s)
+{
+	struct squashfs_sb_info *msblk = s->s_fs_info;
+	struct squashfs_super_block *sblk = &msblk->sblk;
+	unsigned int length = SQUASHFS_FRAGMENT_INDEX_BYTES(sblk->fragments);
+
+	if(length == 0)
+		return 1;
+
+	/* Allocate fragment index table */
+	if (!(msblk->fragment_index = kmalloc(length, GFP_KERNEL))) {
+		ERROR("Failed to allocate fragment index table\n");
+		return 0;
+	}
+   
+	if (!squashfs_read_data(s, (char *) msblk->fragment_index,
+			sblk->fragment_table_start, length |
+			SQUASHFS_COMPRESSED_BIT_BLOCK, NULL, length)) {
+		ERROR("unable to read fragment index table\n");
+		return 0;
+	}
+
+	if (msblk->swap) {
+		int i;
+		long long fragment;
+
+		for (i = 0; i < SQUASHFS_FRAGMENT_INDEXES(sblk->fragments); i++) {
+			SQUASHFS_SWAP_FRAGMENT_INDEXES((&fragment),
+						&msblk->fragment_index[i], 1);
+			msblk->fragment_index[i] = fragment;
+		}
+	}
+
+	return 1;
+}
+
+
+static int supported_squashfs_filesystem(struct squashfs_sb_info *msblk, int silent)
+{
+	struct squashfs_super_block *sblk = &msblk->sblk;
+
+	msblk->read_inode = squashfs_read_inode;
+	msblk->read_blocklist = read_blocklist;
+	msblk->read_fragment_index_table = read_fragment_index_table;
+
+	if (sblk->s_major == 1) {
+		if (!squashfs_1_0_supported(msblk)) {
+			SERROR("Major/Minor mismatch, Squashfs 1.0 filesystems "
+				"are unsupported\n");
+			SERROR("Please recompile with "
+				"Squashfs 1.0 support enabled\n");
+			return 0;
+		}
+	} else if (sblk->s_major == 2) {
+		if (!squashfs_2_0_supported(msblk)) {
+			SERROR("Major/Minor mismatch, Squashfs 2.0 filesystems "
+				"are unsupported\n");
+			SERROR("Please recompile with "
+				"Squashfs 2.0 support enabled\n");
+			return 0;
+		}
+	} else if(sblk->s_major != SQUASHFS_MAJOR || sblk->s_minor >
+			SQUASHFS_MINOR) {
+		SERROR("Major/Minor mismatch, trying to mount newer %d.%d "
+				"filesystem\n", sblk->s_major, sblk->s_minor);
+		SERROR("Please update your kernel\n");
+		return 0;
+	}
+
+	return 1;
+}
+
+
+static int squashfs_fill_super(struct super_block *s, void *data, int silent)
+{
+	struct squashfs_sb_info *msblk;
+	struct squashfs_super_block *sblk;
+	int i, err;
+	char b[BDEVNAME_SIZE];
+	struct inode *root;
+	void *label;
+
+	TRACE("Entered squashfs_read_superblock\n");
+
+	err = -ENOMEM;
+	if (!(s->s_fs_info = kmalloc(sizeof(struct squashfs_sb_info),
+						GFP_KERNEL))) {
+		ERROR("Failed to allocate superblock\n");
+		goto failure;
+	}
+	label = &&out_fsinfo;
+	memset(s->s_fs_info, 0, sizeof(struct squashfs_sb_info));
+	msblk = s->s_fs_info;
+	sblk = &msblk->sblk;
+	
+	msblk->devblksize = sb_min_blocksize(s, BLOCK_SIZE);
+	msblk->devblksize_log2 = ffz(~msblk->devblksize);
+
+	//mutex_init(&msblk->read_data_mutex);
+	mutex_init(&msblk->read_page_mutex);
+	mutex_init(&msblk->block_cache_mutex);
+	mutex_init(&msblk->fragment_mutex);
+	mutex_init(&msblk->meta_index_mutex);
+	
+	init_waitqueue_head(&msblk->waitq);
+	init_waitqueue_head(&msblk->fragment_wait_queue);
+
+	err = -EINVAL;
+	sblk->bytes_used = sizeof(struct squashfs_super_block);
+	if (!squashfs_read_data(s, (char *) sblk, SQUASHFS_START,
+					sizeof(struct squashfs_super_block) |
+					SQUASHFS_COMPRESSED_BIT_BLOCK, NULL, sizeof(struct squashfs_super_block))) {
+		SERROR("unable to read superblock\n");
+		goto *label;
+	}
+
+	/* Check it is a SQUASHFS superblock */
+	s->s_magic = sblk->s_magic;
+	msblk->swap = 0;
+	dpri("magic 0x%x\n", sblk->s_magic);
+	switch (sblk->s_magic) {
+		struct squashfs_super_block ssblk;
+
+	case SQUASHFS_MAGIC_SWAP:
+		/*FALLTHROUGH*/
+	case SQUASHFS_MAGIC_LZMA_SWAP:
+		WARNING("Mounting a different endian SQUASHFS "
+			"filesystem on %s\n", bdevname(s->s_bdev, b));
+
+		SQUASHFS_SWAP_SUPER_BLOCK(&ssblk, sblk);
+		memcpy(sblk, &ssblk, sizeof(struct squashfs_super_block));
+		msblk->swap = 1;
+		/*FALLTHROUGH*/
+	case SQUASHFS_MAGIC:
+	case SQUASHFS_MAGIC_LZMA:
+		break;
+	default:
+		SERROR("Can't find a SQUASHFS superblock on %s\n",
+		       bdevname(s->s_bdev, b));
+		goto *label;
+	}
+
+	{
+		struct sqlzma *p;
+		dpri("block_size %d\n", sblk->block_size);
+		BUG_ON(sblk->block_size > sizeof(p->read_data));
+	}
+
+	/* Check the MAJOR & MINOR versions */
+	err = -EINVAL;
+	if(!supported_squashfs_filesystem(msblk, silent))
+		goto *label;
+
+	/* Check the filesystem does not extend beyond the end of the
+	   block device */
+	if(sblk->bytes_used < 0 || sblk->bytes_used > i_size_read(s->s_bdev->bd_inode))
+		goto *label;
+
+	/* Check the root inode for sanity */
+	if (SQUASHFS_INODE_OFFSET(sblk->root_inode) > SQUASHFS_METADATA_SIZE)
+		goto *label;
+
+	TRACE("Found valid superblock on %s\n", bdevname(s->s_bdev, b));
+	TRACE("Inodes are %scompressed\n",
+					SQUASHFS_UNCOMPRESSED_INODES
+					(sblk->flags) ? "un" : "");
+	TRACE("Data is %scompressed\n",
+					SQUASHFS_UNCOMPRESSED_DATA(sblk->flags)
+					? "un" : "");
+	TRACE("Check data is %s present in the filesystem\n",
+					SQUASHFS_CHECK_DATA(sblk->flags) ?
+					"" : "not");
+	TRACE("Filesystem size %lld bytes\n", sblk->bytes_used);
+	TRACE("Block size %d\n", sblk->block_size);
+	TRACE("Number of inodes %d\n", sblk->inodes);
+	if (sblk->s_major > 1)
+		TRACE("Number of fragments %d\n", sblk->fragments);
+	TRACE("Number of uids %d\n", sblk->no_uids);
+	TRACE("Number of gids %d\n", sblk->no_guids);
+	TRACE("sblk->inode_table_start %llx\n", sblk->inode_table_start);
+	TRACE("sblk->directory_table_start %llx\n", sblk->directory_table_start);
+	if (sblk->s_major > 1)
+		TRACE("sblk->fragment_table_start %llx\n",
+					sblk->fragment_table_start);
+	TRACE("sblk->uid_start %llx\n", sblk->uid_start);
+
+	s->s_flags |= MS_RDONLY;
+	s->s_op = &squashfs_super_ops;
+
+	/* Init inode_table block pointer array */
+	err = -ENOMEM;
+	if (!(msblk->block_cache = kmalloc(sizeof(struct squashfs_cache) *
+					SQUASHFS_CACHED_BLKS, GFP_KERNEL))) {
+		ERROR("Failed to allocate block cache\n");
+		goto *label;
+	}
+	label = &&out_block_cache;
+
+	for (i = 0; i < SQUASHFS_CACHED_BLKS; i++)
+		msblk->block_cache[i].block = SQUASHFS_INVALID_BLK;
+
+	msblk->next_cache = 0;
+
+	/* Allocate read_page block */
+	if (!(msblk->read_page = kmalloc(sblk->block_size, GFP_KERNEL))) {
+		ERROR("Failed to allocate read_page block\n");
+		goto *label;
+	}
+	label = &&out_read_page;
+
+	/* Allocate uid and gid tables */
+	if (!(msblk->uid = kmalloc((sblk->no_uids + sblk->no_guids) *
+					sizeof(unsigned int), GFP_KERNEL))) {
+		ERROR("Failed to allocate uid/gid table\n");
+		goto *label;
+	}
+	label = &&out_uid;
+	msblk->guid = msblk->uid + sblk->no_uids;
+   
+	dpri("swap %d\n", msblk->swap);
+	err = -EINVAL;
+	if (msblk->swap) {
+		unsigned int suid[sblk->no_uids + sblk->no_guids];
+
+		if (!squashfs_read_data(s, (char *) &suid, sblk->uid_start,
+					((sblk->no_uids + sblk->no_guids) *
+					 sizeof(unsigned int)) |
+					SQUASHFS_COMPRESSED_BIT_BLOCK, NULL, (sblk->no_uids + sblk->no_guids) * sizeof(unsigned int))) {
+			ERROR("unable to read uid/gid table\n");
+			goto *label;
+		}
+
+		SQUASHFS_SWAP_DATA(msblk->uid, suid, (sblk->no_uids +
+			sblk->no_guids), (sizeof(unsigned int) * 8));
+	} else
+		if (!squashfs_read_data(s, (char *) msblk->uid, sblk->uid_start,
+					((sblk->no_uids + sblk->no_guids) *
+					 sizeof(unsigned int)) |
+					SQUASHFS_COMPRESSED_BIT_BLOCK, NULL, (sblk->no_uids + sblk->no_guids) * sizeof(unsigned int))) {
+			ERROR("unable to read uid/gid table\n");
+			goto *label;
+		}
+
+
+	if (sblk->s_major == 1 && squashfs_1_0_supported(msblk))
+		goto allocate_root;
+
+	err = -ENOMEM;
+	if (!(msblk->fragment = kmalloc(sizeof(struct squashfs_fragment_cache) *
+				SQUASHFS_CACHED_FRAGMENTS, GFP_KERNEL))) {
+		ERROR("Failed to allocate fragment block cache\n");
+		goto *label;
+	}
+	label = &&out_fragment;
+
+	for (i = 0; i < SQUASHFS_CACHED_FRAGMENTS; i++) {
+		msblk->fragment[i].locked = 0;
+		msblk->fragment[i].block = SQUASHFS_INVALID_BLK;
+		msblk->fragment[i].data = NULL;
+	}
+
+	msblk->next_fragment = 0;
+
+	/* Allocate and read fragment index table */
+	if (msblk->read_fragment_index_table(s) == 0)
+		goto *label;
+
+	if(sblk->s_major < 3 || sblk->lookup_table_start == SQUASHFS_INVALID_BLK)
+		goto allocate_root;
+
+	/* Allocate and read inode lookup table */
+	if (read_inode_lookup_table(s) == 0)
+		goto failed_mount;
+
+	s->s_op = &squashfs_export_super_ops;
+	s->s_export_op = &squashfs_export_ops;
+
+allocate_root:
+	dpri("alloate_root\n");
+	root = new_inode(s);
+	if ((msblk->read_inode)(root, sblk->root_inode) == 0) {
+		iput(root);
+		goto failed_mount;
+	}
+	insert_inode_hash(root);
+
+	if ((s->s_root = d_alloc_root(root)) == NULL) {
+		ERROR("Root inode create failed\n");
+		iput(root);
+		goto failed_mount;
+	}
+
+	TRACE("Leaving squashfs_read_super\n");
+	return 0;
+
+failed_mount:
+	kfree(msblk->inode_lookup_table);
+	kfree(msblk->fragment_index);
+	kfree(msblk->fragment_index_2);
+ out_fragment:
+	kfree(msblk->fragment);
+ out_uid:
+	kfree(msblk->uid);
+ out_read_page:
+	kfree(msblk->read_page);
+ out_block_cache:
+	kfree(msblk->block_cache);
+ out_fsinfo:
+	kfree(s->s_fs_info);
+	s->s_fs_info = NULL;
+ failure:
+	return err;
+}
+
+
+static int squashfs_statfs(struct dentry *dentry, struct kstatfs *buf)
+{
+	struct squashfs_sb_info *msblk = dentry->d_sb->s_fs_info;
+	struct squashfs_super_block *sblk = &msblk->sblk;
+
+	TRACE("Entered squashfs_statfs\n");
+
+	buf->f_type = sblk->s_magic;
+	buf->f_bsize = sblk->block_size;
+	buf->f_blocks = ((sblk->bytes_used - 1) >> sblk->block_log) + 1;
+	buf->f_bfree = buf->f_bavail = 0;
+	buf->f_files = sblk->inodes;
+	buf->f_ffree = 0;
+	buf->f_namelen = SQUASHFS_NAME_LEN;
+
+	return 0;
+}
+
+
+static int squashfs_symlink_readpage(struct file *file, struct page *page)
+{
+	struct inode *inode = page->mapping->host;
+	int index = page->index << PAGE_CACHE_SHIFT, length, bytes;
+	long long block = SQUASHFS_I(inode)->start_block;
+	int offset = SQUASHFS_I(inode)->offset;
+	void *pageaddr = kmap(page);
+
+	TRACE("Entered squashfs_symlink_readpage, page index %ld, start block "
+				"%llx, offset %x\n", page->index,
+				SQUASHFS_I(inode)->start_block,
+				SQUASHFS_I(inode)->offset);
+
+	for (length = 0; length < index; length += bytes) {
+		if (!(bytes = squashfs_get_cached_block(inode->i_sb, NULL,
+				block, offset, PAGE_CACHE_SIZE, &block,
+				&offset))) {
+			ERROR("Unable to read symbolic link [%llx:%x]\n", block,
+					offset);
+			goto skip_read;
+		}
+	}
+
+	if (length != index) {
+		ERROR("(squashfs_symlink_readpage) length != index\n");
+		bytes = 0;
+		goto skip_read;
+	}
+
+	bytes = (i_size_read(inode) - length) > PAGE_CACHE_SIZE ? PAGE_CACHE_SIZE :
+					i_size_read(inode) - length;
+
+	if (!(bytes = squashfs_get_cached_block(inode->i_sb, pageaddr, block,
+					offset, bytes, &block, &offset)))
+		ERROR("Unable to read symbolic link [%llx:%x]\n", block, offset);
+
+skip_read:
+	memset(pageaddr + bytes, 0, PAGE_CACHE_SIZE - bytes);
+	kunmap(page);
+	flush_dcache_page(page);
+	SetPageUptodate(page);
+	unlock_page(page);
+
+	return 0;
+}
+
+
+struct meta_index *locate_meta_index(struct inode *inode, int index, int offset)
+{
+	struct meta_index *meta = NULL;
+	struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
+	int i;
+
+	mutex_lock(&msblk->meta_index_mutex);
+
+	TRACE("locate_meta_index: index %d, offset %d\n", index, offset);
+
+	if(msblk->meta_index == NULL)
+		goto not_allocated;
+
+	for (i = 0; i < SQUASHFS_META_NUMBER; i ++)
+		if (msblk->meta_index[i].inode_number == inode->i_ino &&
+				msblk->meta_index[i].offset >= offset &&
+				msblk->meta_index[i].offset <= index &&
+				msblk->meta_index[i].locked == 0) {
+			TRACE("locate_meta_index: entry %d, offset %d\n", i,
+					msblk->meta_index[i].offset);
+			meta = &msblk->meta_index[i];
+			offset = meta->offset;
+		}
+
+	if (meta)
+		meta->locked = 1;
+
+not_allocated:
+	mutex_unlock(&msblk->meta_index_mutex);
+
+	return meta;
+}
+
+
+struct meta_index *empty_meta_index(struct inode *inode, int offset, int skip)
+{
+	struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
+	struct meta_index *meta = NULL;
+	int i;
+
+	mutex_lock(&msblk->meta_index_mutex);
+
+	TRACE("empty_meta_index: offset %d, skip %d\n", offset, skip);
+
+	if(msblk->meta_index == NULL) {
+		if (!(msblk->meta_index = kmalloc(sizeof(struct meta_index) *
+					SQUASHFS_META_NUMBER, GFP_KERNEL))) {
+			ERROR("Failed to allocate meta_index\n");
+			goto failed;
+		}
+		for(i = 0; i < SQUASHFS_META_NUMBER; i++) {
+			msblk->meta_index[i].inode_number = 0;
+			msblk->meta_index[i].locked = 0;
+		}
+		msblk->next_meta_index = 0;
+	}
+
+	for(i = SQUASHFS_META_NUMBER; i &&
+			msblk->meta_index[msblk->next_meta_index].locked; i --)
+		msblk->next_meta_index = (msblk->next_meta_index + 1) %
+			SQUASHFS_META_NUMBER;
+
+	if(i == 0) {
+		TRACE("empty_meta_index: failed!\n");
+		goto failed;
+	}
+
+	TRACE("empty_meta_index: returned meta entry %d, %p\n",
+			msblk->next_meta_index,
+			&msblk->meta_index[msblk->next_meta_index]);
+
+	meta = &msblk->meta_index[msblk->next_meta_index];
+	msblk->next_meta_index = (msblk->next_meta_index + 1) %
+			SQUASHFS_META_NUMBER;
+
+	meta->inode_number = inode->i_ino;
+	meta->offset = offset;
+	meta->skip = skip;
+	meta->entries = 0;
+	meta->locked = 1;
+
+failed:
+	mutex_unlock(&msblk->meta_index_mutex);
+	return meta;
+}
+
+
+void release_meta_index(struct inode *inode, struct meta_index *meta)
+{
+	meta->locked = 0;
+	smp_mb();
+}
+
+
+static int read_block_index(struct super_block *s, int blocks, char *block_list,
+		long long *start_block, int *offset)
+{
+	struct squashfs_sb_info *msblk = s->s_fs_info;
+	unsigned int *block_listp;
+	int block = 0;
+	
+	if (msblk->swap) {
+		char sblock_list[blocks << 2];
+
+		if (!squashfs_get_cached_block(s, sblock_list, *start_block,
+				*offset, blocks << 2, start_block, offset)) {
+			ERROR("Unable to read block list [%llx:%x]\n",
+				*start_block, *offset);
+			goto failure;
+		}
+		SQUASHFS_SWAP_INTS(((unsigned int *)block_list),
+				((unsigned int *)sblock_list), blocks);
+	} else
+		if (!squashfs_get_cached_block(s, block_list, *start_block,
+				*offset, blocks << 2, start_block, offset)) {
+			ERROR("Unable to read block list [%llx:%x]\n",
+				*start_block, *offset);
+			goto failure;
+		}
+
+	for (block_listp = (unsigned int *) block_list; blocks;
+				block_listp++, blocks --)
+		block += SQUASHFS_COMPRESSED_SIZE_BLOCK(*block_listp);
+
+	return block;
+
+failure:
+	return -1;
+}
+
+
+#define SIZE 256
+
+static inline int calculate_skip(int blocks) {
+	int skip = (blocks - 1) / ((SQUASHFS_SLOTS * SQUASHFS_META_ENTRIES + 1) * SQUASHFS_META_INDEXES);
+	return skip >= 7 ? 7 : skip + 1;
+}
+
+
+static int get_meta_index(struct inode *inode, int index,
+		long long *index_block, int *index_offset,
+		long long *data_block, char *block_list)
+{
+	struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
+	struct squashfs_super_block *sblk = &msblk->sblk;
+	int skip = calculate_skip(i_size_read(inode) >> sblk->block_log);
+	int offset = 0;
+	struct meta_index *meta;
+	struct meta_entry *meta_entry;
+	long long cur_index_block = SQUASHFS_I(inode)->u.s1.block_list_start;
+	int cur_offset = SQUASHFS_I(inode)->offset;
+	long long cur_data_block = SQUASHFS_I(inode)->start_block;
+	int i;
+ 
+	index /= SQUASHFS_META_INDEXES * skip;
+
+	while ( offset < index ) {
+		meta = locate_meta_index(inode, index, offset + 1);
+
+		if (meta == NULL) {
+			if ((meta = empty_meta_index(inode, offset + 1,
+							skip)) == NULL)
+				goto all_done;
+		} else {
+			if(meta->entries == 0)
+				goto failed;
+			offset = index < meta->offset + meta->entries ? index :
+				meta->offset + meta->entries - 1;
+			meta_entry = &meta->meta_entry[offset - meta->offset];
+			cur_index_block = meta_entry->index_block + sblk->inode_table_start;
+			cur_offset = meta_entry->offset;
+			cur_data_block = meta_entry->data_block;
+			TRACE("get_meta_index: offset %d, meta->offset %d, "
+				"meta->entries %d\n", offset, meta->offset,
+				meta->entries);
+			TRACE("get_meta_index: index_block 0x%llx, offset 0x%x"
+				" data_block 0x%llx\n", cur_index_block,
+				cur_offset, cur_data_block);
+		}
+
+		for (i = meta->offset + meta->entries; i <= index &&
+				i < meta->offset + SQUASHFS_META_ENTRIES; i++) {
+			int blocks = skip * SQUASHFS_META_INDEXES;
+
+			while (blocks) {
+				int block = blocks > (SIZE >> 2) ? (SIZE >> 2) :
+					blocks;
+				int res = read_block_index(inode->i_sb, block,
+					block_list, &cur_index_block,
+					&cur_offset);
+
+				if (res == -1)
+					goto failed;
+
+				cur_data_block += res;
+				blocks -= block;
+			}
+
+			meta_entry = &meta->meta_entry[i - meta->offset];
+			meta_entry->index_block = cur_index_block - sblk->inode_table_start;
+			meta_entry->offset = cur_offset;
+			meta_entry->data_block = cur_data_block;
+			meta->entries ++;
+			offset ++;
+		}
+
+		TRACE("get_meta_index: meta->offset %d, meta->entries %d\n",
+				meta->offset, meta->entries);
+
+		release_meta_index(inode, meta);
+	}
+
+all_done:
+	*index_block = cur_index_block;
+	*index_offset = cur_offset;
+	*data_block = cur_data_block;
+
+	return offset * SQUASHFS_META_INDEXES * skip;
+
+failed:
+	release_meta_index(inode, meta);
+	return -1;
+}
+
+
+static long long read_blocklist(struct inode *inode, int index,
+				int readahead_blks, char *block_list,
+				unsigned short **block_p, unsigned int *bsize)
+{
+	long long block_ptr;
+	int offset;
+	long long block;
+	int res = get_meta_index(inode, index, &block_ptr, &offset, &block,
+		block_list);
+
+	TRACE("read_blocklist: res %d, index %d, block_ptr 0x%llx, offset"
+		       " 0x%x, block 0x%llx\n", res, index, block_ptr, offset,
+		       block);
+
+	if(res == -1)
+		goto failure;
+
+	index -= res;
+
+	while ( index ) {
+		int blocks = index > (SIZE >> 2) ? (SIZE >> 2) : index;
+		int res = read_block_index(inode->i_sb, blocks, block_list,
+			&block_ptr, &offset);
+		if (res == -1)
+			goto failure;
+		block += res;
+		index -= blocks;
+	}
+
+	if (read_block_index(inode->i_sb, 1, block_list,
+			&block_ptr, &offset) == -1)
+		goto failure;
+	*bsize = *((unsigned int *) block_list);
+
+	return block;
+
+failure:
+	return 0;
+}
+
+
+static int squashfs_readpage(struct file *file, struct page *page)
+{
+	struct inode *inode = page->mapping->host;
+	struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
+	struct squashfs_super_block *sblk = &msblk->sblk;
+	unsigned char *block_list;
+	long long block;
+	unsigned int bsize, i = 0, bytes = 0, byte_offset = 0;
+	int index = page->index >> (sblk->block_log - PAGE_CACHE_SHIFT);
+ 	void *pageaddr;
+	struct squashfs_fragment_cache *fragment = NULL;
+	char *data_ptr = msblk->read_page;
+	
+	int mask = (1 << (sblk->block_log - PAGE_CACHE_SHIFT)) - 1;
+	int start_index = page->index & ~mask;
+	int end_index = start_index | mask;
+
+	TRACE("Entered squashfs_readpage, page index %lx, start block %llx\n",
+					page->index,
+					SQUASHFS_I(inode)->start_block);
+
+	if (!(block_list = kmalloc(SIZE, GFP_KERNEL))) {
+		ERROR("Failed to allocate block_list\n");
+		goto skip_read;
+	}
+
+	if (page->index >= ((i_size_read(inode) + PAGE_CACHE_SIZE - 1) >>
+					PAGE_CACHE_SHIFT))
+		goto skip_read;
+
+	if (SQUASHFS_I(inode)->u.s1.fragment_start_block == SQUASHFS_INVALID_BLK
+					|| index < (i_size_read(inode) >>
+					sblk->block_log)) {
+		if ((block = (msblk->read_blocklist)(inode, index, 1,
+					block_list, NULL, &bsize)) == 0)
+			goto skip_read;
+
+		mutex_lock(&msblk->read_page_mutex);
+		
+		if (!(bytes = squashfs_read_data(inode->i_sb, msblk->read_page,
+					block, bsize, NULL, sblk->block_size))) {
+			ERROR("Unable to read page, block %llx, size %x\n", block,
+					bsize);
+			mutex_unlock(&msblk->read_page_mutex);
+			goto skip_read;
+		}
+	} else {
+		if ((fragment = get_cached_fragment(inode->i_sb,
+					SQUASHFS_I(inode)->
+					u.s1.fragment_start_block,
+					SQUASHFS_I(inode)->u.s1.fragment_size))
+					== NULL) {
+			ERROR("Unable to read page, block %llx, size %x\n",
+					SQUASHFS_I(inode)->
+					u.s1.fragment_start_block,
+					(int) SQUASHFS_I(inode)->
+					u.s1.fragment_size);
+			goto skip_read;
+		}
+		bytes = SQUASHFS_I(inode)->u.s1.fragment_offset +
+					(i_size_read(inode) & (sblk->block_size
+					- 1));
+		byte_offset = SQUASHFS_I(inode)->u.s1.fragment_offset;
+		data_ptr = fragment->data;
+	}
+
+	for (i = start_index; i <= end_index && byte_offset < bytes;
+					i++, byte_offset += PAGE_CACHE_SIZE) {
+		struct page *push_page;
+		int avail = (bytes - byte_offset) > PAGE_CACHE_SIZE ?
+					PAGE_CACHE_SIZE : bytes - byte_offset;
+
+		TRACE("bytes %d, i %d, byte_offset %d, available_bytes %d\n",
+					bytes, i, byte_offset, avail);
+
+		push_page = (i == page->index) ? page :
+			grab_cache_page_nowait(page->mapping, i);
+
+		if (!push_page)
+			continue;
+
+		if (PageUptodate(push_page))
+			goto skip_page;
+
+ 		pageaddr = kmap_atomic(push_page, KM_USER0);
+		memcpy(pageaddr, data_ptr + byte_offset, avail);
+		memset(pageaddr + avail, 0, PAGE_CACHE_SIZE - avail);
+		kunmap_atomic(pageaddr, KM_USER0);
+		flush_dcache_page(push_page);
+		SetPageUptodate(push_page);
+skip_page:
+		unlock_page(push_page);
+		if(i != page->index)
+			page_cache_release(push_page);
+	}
+
+	if (SQUASHFS_I(inode)->u.s1.fragment_start_block == SQUASHFS_INVALID_BLK
+					|| index < (i_size_read(inode) >>
+					sblk->block_log))
+		mutex_unlock(&msblk->read_page_mutex);
+	else
+		release_cached_fragment(msblk, fragment);
+
+	kfree(block_list);
+	return 0;
+
+skip_read:
+	pageaddr = kmap_atomic(page, KM_USER0);
+	memset(pageaddr + bytes, 0, PAGE_CACHE_SIZE - bytes);
+	kunmap_atomic(pageaddr, KM_USER0);
+	flush_dcache_page(page);
+	SetPageUptodate(page);
+	unlock_page(page);
+
+	kfree(block_list);
+	return 0;
+}
+
+
+static int squashfs_readpage4K(struct file *file, struct page *page)
+{
+	struct inode *inode = page->mapping->host;
+	struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info;
+	struct squashfs_super_block *sblk = &msblk->sblk;
+	unsigned char *block_list;
+	long long block;
+	unsigned int bsize, bytes = 0;
+ 	void *pageaddr;
+	
+	TRACE("Entered squashfs_readpage4K, page index %lx, start block %llx\n",
+					page->index,
+					SQUASHFS_I(inode)->start_block);
+
+	if (page->index >= ((i_size_read(inode) + PAGE_CACHE_SIZE - 1) >>
+					PAGE_CACHE_SHIFT)) {
+		block_list = NULL;
+		goto skip_read;
+	}
+
+	if (!(block_list = kmalloc(SIZE, GFP_KERNEL))) {
+		ERROR("Failed to allocate block_list\n");
+		goto skip_read;
+	}
+
+	if (SQUASHFS_I(inode)->u.s1.fragment_start_block == SQUASHFS_INVALID_BLK
+					|| page->index < (i_size_read(inode) >>
+					sblk->block_log)) {
+		block = (msblk->read_blocklist)(inode, page->index, 1,
+					block_list, NULL, &bsize);
+		if(block == 0)
+			goto skip_read;
+
+		mutex_lock(&msblk->read_page_mutex);
+		bytes = squashfs_read_data(inode->i_sb, msblk->read_page, block,
+					bsize, NULL, sblk->block_size);
+		if (bytes) {
+			pageaddr = kmap_atomic(page, KM_USER0);
+			memcpy(pageaddr, msblk->read_page, bytes);
+			kunmap_atomic(pageaddr, KM_USER0);
+		} else
+			ERROR("Unable to read page, block %llx, size %x\n",
+					block, bsize);
+		mutex_unlock(&msblk->read_page_mutex);
+	} else {
+		struct squashfs_fragment_cache *fragment =
+			get_cached_fragment(inode->i_sb,
+					SQUASHFS_I(inode)->
+					u.s1.fragment_start_block,
+					SQUASHFS_I(inode)-> u.s1.fragment_size);
+		if (fragment) {
+			bytes = i_size_read(inode) & (sblk->block_size - 1);
+			pageaddr = kmap_atomic(page, KM_USER0);
+			memcpy(pageaddr, fragment->data + SQUASHFS_I(inode)->
+					u.s1.fragment_offset, bytes);
+			kunmap_atomic(pageaddr, KM_USER0);
+			release_cached_fragment(msblk, fragment);
+		} else
+			ERROR("Unable to read page, block %llx, size %x\n",
+					SQUASHFS_I(inode)->
+					u.s1.fragment_start_block, (int)
+					SQUASHFS_I(inode)-> u.s1.fragment_size);
+	}
+
+skip_read:
+	pageaddr = kmap_atomic(page, KM_USER0);
+	memset(pageaddr + bytes, 0, PAGE_CACHE_SIZE - bytes);
+	kunmap_atomic(pageaddr, KM_USER0);
+	flush_dcache_page(page);
+	SetPageUptodate(page);
+	unlock_page(page);
+
+	kfree(block_list);
+	return 0;
+}
+
+
+static int get_dir_index_using_offset(struct super_block *s, long long 
+				*next_block, unsigned int *next_offset,
+				long long index_start,
+				unsigned int index_offset, int i_count,
+				long long f_pos)
+{
+	struct squashfs_sb_info *msblk = s->s_fs_info;
+	struct squashfs_super_block *sblk = &msblk->sblk;
+	int i, length = 0;
+	struct squashfs_dir_index index;
+
+	TRACE("Entered get_dir_index_using_offset, i_count %d, f_pos %d\n",
+					i_count, (unsigned int) f_pos);
+
+	f_pos =- 3;
+	if (f_pos == 0)
+		goto finish;
+
+	for (i = 0; i < i_count; i++) {
+		if (msblk->swap) {
+			struct squashfs_dir_index sindex;
+			squashfs_get_cached_block(s, (char *) &sindex,
+					index_start, index_offset,
+					sizeof(sindex), &index_start,
+					&index_offset);
+			SQUASHFS_SWAP_DIR_INDEX(&index, &sindex);
+		} else
+			squashfs_get_cached_block(s, (char *) &index,
+					index_start, index_offset,
+					sizeof(index), &index_start,
+					&index_offset);
+
+		if (index.index > f_pos)
+			break;
+
+		squashfs_get_cached_block(s, NULL, index_start, index_offset,
+					index.size + 1, &index_start,
+					&index_offset);
+
+		length = index.index;
+		*next_block = index.start_block + sblk->directory_table_start;
+	}
+
+	*next_offset = (length + *next_offset) % SQUASHFS_METADATA_SIZE;
+
+finish:
+	return length + 3;
+}
+
+
+static int get_dir_index_using_name(struct super_block *s, long long
+				*next_block, unsigned int *next_offset,
+				long long index_start,
+				unsigned int index_offset, int i_count,
+				const char *name, int size)
+{
+	struct squashfs_sb_info *msblk = s->s_fs_info;
+	struct squashfs_super_block *sblk = &msblk->sblk;
+	int i, length = 0;
+	struct squashfs_dir_index *index;
+	char *str;
+
+	TRACE("Entered get_dir_index_using_name, i_count %d\n", i_count);
+
+	if (!(str = kmalloc(sizeof(struct squashfs_dir_index) +
+		(SQUASHFS_NAME_LEN + 1) * 2, GFP_KERNEL))) {
+		ERROR("Failed to allocate squashfs_dir_index\n");
+		goto failure;
+	}
+
+	index = (struct squashfs_dir_index *) (str + SQUASHFS_NAME_LEN + 1);
+	strncpy(str, name, size);
+	str[size] = '\0';
+
+	for (i = 0; i < i_count; i++) {
+		if (msblk->swap) {
+			struct squashfs_dir_index sindex;
+			squashfs_get_cached_block(s, (char *) &sindex,
+					index_start, index_offset,
+					sizeof(sindex), &index_start,
+					&index_offset);
+			SQUASHFS_SWAP_DIR_INDEX(index, &sindex);
+		} else
+			squashfs_get_cached_block(s, (char *) index,
+					index_start, index_offset,
+					sizeof(struct squashfs_dir_index),
+					&index_start, &index_offset);
+
+		squashfs_get_cached_block(s, index->name, index_start,
+					index_offset, index->size + 1,
+					&index_start, &index_offset);
+
+		index->name[index->size + 1] = '\0';
+
+		if (strcmp(index->name, str) > 0)
+			break;
+
+		length = index->index;
+		*next_block = index->start_block + sblk->directory_table_start;
+	}
+
+	*next_offset = (length + *next_offset) % SQUASHFS_METADATA_SIZE;
+	kfree(str);
+failure:
+	return length + 3;
+}
+
+		
+static int squashfs_readdir(struct file *file, void *dirent, filldir_t filldir)
+{
+	struct inode *i = file->f_dentry->d_inode;
+	struct squashfs_sb_info *msblk = i->i_sb->s_fs_info;
+	struct squashfs_super_block *sblk = &msblk->sblk;
+	long long next_block = SQUASHFS_I(i)->start_block +
+		sblk->directory_table_start;
+	int next_offset = SQUASHFS_I(i)->offset, length = 0,
+		dir_count;
+	struct squashfs_dir_header dirh;
+	struct squashfs_dir_entry *dire;
+
+	TRACE("Entered squashfs_readdir [%llx:%x]\n", next_block, next_offset);
+
+	if (!(dire = kmalloc(sizeof(struct squashfs_dir_entry) +
+		SQUASHFS_NAME_LEN + 1, GFP_KERNEL))) {
+		ERROR("Failed to allocate squashfs_dir_entry\n");
+		goto finish;
+	}
+
+	while(file->f_pos < 3) {
+		char *name;
+		int size, i_ino;
+
+		if(file->f_pos == 0) {
+			name = ".";
+			size = 1;
+			i_ino = i->i_ino;
+		} else {
+			name = "..";
+			size = 2;
+			i_ino = SQUASHFS_I(i)->u.s2.parent_inode;
+		}
+		TRACE("Calling filldir(%x, %s, %d, %d, %d, %d)\n",
+				(unsigned int) dirent, name, size, (int)
+				file->f_pos, i_ino,
+				squashfs_filetype_table[1]);
+
+		if (filldir(dirent, name, size,
+				file->f_pos, i_ino,
+				squashfs_filetype_table[1]) < 0) {
+				TRACE("Filldir returned less than 0\n");
+				goto finish;
+		}
+		file->f_pos += size;
+	}
+
+	length = get_dir_index_using_offset(i->i_sb, &next_block, &next_offset,
+				SQUASHFS_I(i)->u.s2.directory_index_start,
+				SQUASHFS_I(i)->u.s2.directory_index_offset,
+				SQUASHFS_I(i)->u.s2.directory_index_count,
+				file->f_pos);
+
+	while (length < i_size_read(i)) {
+		/* read directory header */
+		if (msblk->swap) {
+			struct squashfs_dir_header sdirh;
+			
+			if (!squashfs_get_cached_block(i->i_sb, (char *) &sdirh,
+					next_block, next_offset, sizeof(sdirh),
+					&next_block, &next_offset))
+				goto failed_read;
+
+			length += sizeof(sdirh);
+			SQUASHFS_SWAP_DIR_HEADER(&dirh, &sdirh);
+		} else {
+			if (!squashfs_get_cached_block(i->i_sb, (char *) &dirh,
+					next_block, next_offset, sizeof(dirh),
+					&next_block, &next_offset))
+				goto failed_read;
+
+			length += sizeof(dirh);
+		}
+
+		dir_count = dirh.count + 1;
+		while (dir_count--) {
+			if (msblk->swap) {
+				struct squashfs_dir_entry sdire;
+				if (!squashfs_get_cached_block(i->i_sb, (char *)
+						&sdire, next_block, next_offset,
+						sizeof(sdire), &next_block,
+						&next_offset))
+					goto failed_read;
+				
+				length += sizeof(sdire);
+				SQUASHFS_SWAP_DIR_ENTRY(dire, &sdire);
+			} else {
+				if (!squashfs_get_cached_block(i->i_sb, (char *)
+						dire, next_block, next_offset,
+						sizeof(*dire), &next_block,
+						&next_offset))
+					goto failed_read;
+
+				length += sizeof(*dire);
+			}
+
+			if (!squashfs_get_cached_block(i->i_sb, dire->name,
+						next_block, next_offset,
+						dire->size + 1, &next_block,
+						&next_offset))
+				goto failed_read;
+
+			length += dire->size + 1;
+
+			if (file->f_pos >= length)
+				continue;
+
+			dire->name[dire->size + 1] = '\0';
+
+			TRACE("Calling filldir(%x, %s, %d, %d, %x:%x, %d, %d)\n",
+					(unsigned int) dirent, dire->name,
+					dire->size + 1, (int) file->f_pos,
+					dirh.start_block, dire->offset,
+					dirh.inode_number + dire->inode_number,
+					squashfs_filetype_table[dire->type]);
+
+			if (filldir(dirent, dire->name, dire->size + 1,
+					file->f_pos,
+					dirh.inode_number + dire->inode_number,
+					squashfs_filetype_table[dire->type])
+					< 0) {
+				TRACE("Filldir returned less than 0\n");
+				goto finish;
+			}
+			file->f_pos = length;
+		}
+	}
+
+finish:
+	kfree(dire);
+	return 0;
+
+failed_read:
+	ERROR("Unable to read directory block [%llx:%x]\n", next_block,
+		next_offset);
+	kfree(dire);
+	return 0;
+}
+
+
+static struct dentry *squashfs_lookup(struct inode *i, struct dentry *dentry,
+				struct nameidata *nd)
+{
+	const unsigned char *name = dentry->d_name.name;
+	int len = dentry->d_name.len;
+	struct inode *inode = NULL;
+	struct squashfs_sb_info *msblk = i->i_sb->s_fs_info;
+	struct squashfs_super_block *sblk = &msblk->sblk;
+	long long next_block = SQUASHFS_I(i)->start_block +
+				sblk->directory_table_start;
+	int next_offset = SQUASHFS_I(i)->offset, length = 0,
+				dir_count;
+	struct squashfs_dir_header dirh;
+	struct squashfs_dir_entry *dire;
+
+	TRACE("Entered squashfs_lookup [%llx:%x]\n", next_block, next_offset);
+
+	if (!(dire = kmalloc(sizeof(struct squashfs_dir_entry) +
+		SQUASHFS_NAME_LEN + 1, GFP_KERNEL))) {
+		ERROR("Failed to allocate squashfs_dir_entry\n");
+		goto exit_lookup;
+	}
+
+	if (len > SQUASHFS_NAME_LEN)
+		goto exit_lookup;
+
+	length = get_dir_index_using_name(i->i_sb, &next_block, &next_offset,
+				SQUASHFS_I(i)->u.s2.directory_index_start,
+				SQUASHFS_I(i)->u.s2.directory_index_offset,
+				SQUASHFS_I(i)->u.s2.directory_index_count, name,
+				len);
+
+	while (length < i_size_read(i)) {
+		/* read directory header */
+		if (msblk->swap) {
+			struct squashfs_dir_header sdirh;
+			if (!squashfs_get_cached_block(i->i_sb, (char *) &sdirh,
+					next_block, next_offset, sizeof(sdirh),
+					&next_block, &next_offset))
+				goto failed_read;
+
+			length += sizeof(sdirh);
+			SQUASHFS_SWAP_DIR_HEADER(&dirh, &sdirh);
+		} else {
+			if (!squashfs_get_cached_block(i->i_sb, (char *) &dirh,
+					next_block, next_offset, sizeof(dirh),
+					&next_block, &next_offset))
+				goto failed_read;
+
+			length += sizeof(dirh);
+		}
+
+		dir_count = dirh.count + 1;
+		while (dir_count--) {
+			if (msblk->swap) {
+				struct squashfs_dir_entry sdire;
+				if (!squashfs_get_cached_block(i->i_sb, (char *)
+						&sdire, next_block,next_offset,
+						sizeof(sdire), &next_block,
+						&next_offset))
+					goto failed_read;
+				
+				length += sizeof(sdire);
+				SQUASHFS_SWAP_DIR_ENTRY(dire, &sdire);
+			} else {
+				if (!squashfs_get_cached_block(i->i_sb, (char *)
+						dire, next_block,next_offset,
+						sizeof(*dire), &next_block,
+						&next_offset))
+					goto failed_read;
+
+				length += sizeof(*dire);
+			}
+
+			if (!squashfs_get_cached_block(i->i_sb, dire->name,
+					next_block, next_offset, dire->size + 1,
+					&next_block, &next_offset))
+				goto failed_read;
+
+			length += dire->size + 1;
+
+			if (name[0] < dire->name[0])
+				goto exit_lookup;
+
+			if ((len == dire->size + 1) && !strncmp(name, dire->name, len)) {
+				squashfs_inode_t ino = SQUASHFS_MKINODE(dirh.start_block,
+								dire->offset);
+
+				TRACE("calling squashfs_iget for directory "
+					"entry %s, inode %x:%x, %d\n", name,
+					dirh.start_block, dire->offset,
+					dirh.inode_number + dire->inode_number);
+
+				inode = squashfs_iget(i->i_sb, ino, dirh.inode_number + dire->inode_number);
+
+				goto exit_lookup;
+			}
+		}
+	}
+
+exit_lookup:
+	kfree(dire);
+	if (inode)
+		return d_splice_alias(inode, dentry);
+	d_add(dentry, inode);
+	return ERR_PTR(0);
+
+failed_read:
+	ERROR("Unable to read directory block [%llx:%x]\n", next_block,
+		next_offset);
+	goto exit_lookup;
+}
+
+
+static int squashfs_remount(struct super_block *s, int *flags, char *data)
+{
+	*flags |= MS_RDONLY;
+	return 0;
+}
+
+
+static void squashfs_put_super(struct super_block *s)
+{
+	int i;
+
+	if (s->s_fs_info) {
+		struct squashfs_sb_info *sbi = s->s_fs_info;
+		if (sbi->block_cache)
+			for (i = 0; i < SQUASHFS_CACHED_BLKS; i++)
+				if (sbi->block_cache[i].block !=
+							SQUASHFS_INVALID_BLK)
+					kfree(sbi->block_cache[i].data);
+		if (sbi->fragment)
+			for (i = 0; i < SQUASHFS_CACHED_FRAGMENTS; i++) 
+				SQUASHFS_FREE(sbi->fragment[i].data);
+		kfree(sbi->fragment);
+		kfree(sbi->block_cache);
+		kfree(sbi->read_page);
+		kfree(sbi->uid);
+		kfree(sbi->fragment_index);
+		kfree(sbi->fragment_index_2);
+		kfree(sbi->meta_index);
+		kfree(s->s_fs_info);
+		s->s_fs_info = NULL;
+	}
+}
+
+
+static int squashfs_get_sb(struct file_system_type *fs_type, int flags,
+				const char *dev_name, void *data,
+				struct vfsmount *mnt)
+{
+	return get_sb_bdev(fs_type, flags, dev_name, data, squashfs_fill_super,
+				mnt);
+}
+
+
+static void free_sqlzma(void)
+{
+	int cpu;
+	struct sqlzma *p;
+
+	for_each_online_cpu(cpu) {
+		p = per_cpu(sqlzma, cpu);
+		if (p) {
+#ifdef KeepPreemptive
+			mutex_destroy(&p->mtx);
+#endif
+			sqlzma_fin(&p->un);
+			kfree(p);
+		}
+	}
+}
+
+static int __init init_squashfs_fs(void)
+{
+	struct sqlzma *p;
+	int cpu;
+	int err = init_inodecache();
+	if (err)
+		goto out;
+
+	for_each_online_cpu(cpu) {
+		dpri("%d: %p\n", cpu, per_cpu(sqlzma, cpu));
+		err = -ENOMEM;
+		p = kmalloc(sizeof(struct sqlzma), GFP_KERNEL);
+		if (p) {
+#ifdef KeepPreemptive
+			mutex_init(&p->mtx);
+#endif
+			err = sqlzma_init(&p->un, 1, 0);
+			if (unlikely(err)) {
+				ERROR("Failed to intialize uncompress workspace\n");
+				break;
+			}
+			per_cpu(sqlzma, cpu) = p;
+			err = 0;
+		} else
+			break;
+	}
+	if (unlikely(err)) {
+		free_sqlzma();
+		goto out;
+	}
+
+	printk(KERN_INFO "squashfs: version 3.2-r2 (2007/01/15) "
+		"Phillip Lougher\n"
+		"squashfs: LZMA suppport for slax.org by jro\n");
+
+	if ((err = register_filesystem(&squashfs_fs_type))) {
+		free_sqlzma();
+		destroy_inodecache();
+	}
+
+out:
+	return err;
+}
+
+
+static void __exit exit_squashfs_fs(void)
+{
+	unregister_filesystem(&squashfs_fs_type);
+	free_sqlzma();
+	destroy_inodecache();
+}
+
+
+static struct kmem_cache * squashfs_inode_cachep;
+
+
+static struct inode *squashfs_alloc_inode(struct super_block *sb)
+{
+	struct squashfs_inode_info *ei;
+	ei = kmem_cache_alloc(squashfs_inode_cachep, GFP_KERNEL);
+	if (!ei)
+		return NULL;
+	return &ei->vfs_inode;
+}
+
+
+static void squashfs_destroy_inode(struct inode *inode)
+{
+	kmem_cache_free(squashfs_inode_cachep, SQUASHFS_I(inode));
+}
+
+
+static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags)
+{
+	struct squashfs_inode_info *ei = foo;
+
+	if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
+							SLAB_CTOR_CONSTRUCTOR)
+		inode_init_once(&ei->vfs_inode);
+}
+ 
+
+static int __init init_inodecache(void)
+{
+	squashfs_inode_cachep = kmem_cache_create("squashfs_inode_cache",
+	     sizeof(struct squashfs_inode_info),
+	     0, SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT,
+	     init_once, NULL);
+	if (squashfs_inode_cachep == NULL)
+		return -ENOMEM;
+	return 0;
+}
+
+
+static void destroy_inodecache(void)
+{
+	kmem_cache_destroy(squashfs_inode_cachep);
+}
+
+
+module_init(init_squashfs_fs);
+module_exit(exit_squashfs_fs);
+MODULE_DESCRIPTION("squashfs 3.2-r2, a compressed read-only filesystem, and LZMA suppport for slax.org");
+MODULE_AUTHOR("Phillip Lougher <phillip@lougher.org.uk>, and LZMA suppport for slax.org by jro");
+MODULE_LICENSE("GPL");
diff -Nruw linux-2.6.20.14-fbx/fs/squashfs./Makefile linux-2.6.20.14-fbx/fs/squashfs/Makefile
--- linux-2.6.20.14-fbx/fs/squashfs./Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.20.14-fbx/fs/squashfs/Makefile	2010-12-29 19:30:08.361441361 +0100
@@ -0,0 +1,7 @@
+#
+# Makefile for the linux squashfs routines.
+#
+
+obj-$(CONFIG_SQUASHFS) += squashfs.o
+squashfs-y += inode.o
+squashfs-y += squashfs2_0.o
diff -Nruw linux-2.6.20.14-fbx/fs/squashfs./squashfs2_0.c linux-2.6.20.14-fbx/fs/squashfs/squashfs2_0.c
--- linux-2.6.20.14-fbx/fs/squashfs./squashfs2_0.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.20.14-fbx/fs/squashfs/squashfs2_0.c	2010-12-29 19:30:08.361441361 +0100
@@ -0,0 +1,742 @@
+/*
+ * Squashfs - a compressed read only filesystem for Linux
+ *
+ * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007
+ * Phillip Lougher <phillip@lougher.org.uk>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2,
+ * or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * squashfs2_0.c
+ */
+
+#include <linux/squashfs_fs.h>
+#include <linux/module.h>
+#include <linux/zlib.h>
+#include <linux/fs.h>
+#include <linux/squashfs_fs_sb.h>
+#include <linux/squashfs_fs_i.h>
+
+#include "squashfs.h"
+static int squashfs_readdir_2(struct file *file, void *dirent, filldir_t filldir);
+static struct dentry *squashfs_lookup_2(struct inode *, struct dentry *,
+				struct nameidata *);
+
+static struct file_operations squashfs_dir_ops_2 = {
+	.read = generic_read_dir,
+	.readdir = squashfs_readdir_2
+};
+
+static struct inode_operations squashfs_dir_inode_ops_2 = {
+	.lookup = squashfs_lookup_2
+};
+
+static unsigned char squashfs_filetype_table[] = {
+	DT_UNKNOWN, DT_DIR, DT_REG, DT_LNK, DT_BLK, DT_CHR, DT_FIFO, DT_SOCK
+};
+
+static int read_fragment_index_table_2(struct super_block *s)
+{
+	struct squashfs_sb_info *msblk = s->s_fs_info;
+	struct squashfs_super_block *sblk = &msblk->sblk;
+
+	if (!(msblk->fragment_index_2 = kmalloc(SQUASHFS_FRAGMENT_INDEX_BYTES_2
+					(sblk->fragments), GFP_KERNEL))) {
+		ERROR("Failed to allocate uid/gid table\n");
+		return 0;
+	}
+   
+	if (SQUASHFS_FRAGMENT_INDEX_BYTES_2(sblk->fragments) &&
+					!squashfs_read_data(s, (char *)
+					msblk->fragment_index_2,
+					sblk->fragment_table_start,
+					SQUASHFS_FRAGMENT_INDEX_BYTES_2
+					(sblk->fragments) |
+					SQUASHFS_COMPRESSED_BIT_BLOCK, NULL, SQUASHFS_FRAGMENT_INDEX_BYTES_2(sblk->fragments))) {
+		ERROR("unable to read fragment index table\n");
+		return 0;
+	}
+
+	if (msblk->swap) {
+		int i;
+		unsigned int fragment;
+
+		for (i = 0; i < SQUASHFS_FRAGMENT_INDEXES_2(sblk->fragments);
+									i++) {
+			SQUASHFS_SWAP_FRAGMENT_INDEXES_2((&fragment),
+						&msblk->fragment_index_2[i], 1);
+			msblk->fragment_index_2[i] = fragment;
+		}
+	}
+
+	return 1;
+}
+
+
+static int get_fragment_location_2(struct super_block *s, unsigned int fragment,
+				long long *fragment_start_block,
+				unsigned int *fragment_size)
+{
+	struct squashfs_sb_info *msblk = s->s_fs_info;
+	long long start_block =
+		msblk->fragment_index_2[SQUASHFS_FRAGMENT_INDEX_2(fragment)];
+	int offset = SQUASHFS_FRAGMENT_INDEX_OFFSET_2(fragment);
+	struct squashfs_fragment_entry_2 fragment_entry;
+
+	if (msblk->swap) {
+		struct squashfs_fragment_entry_2 sfragment_entry;
+
+		if (!squashfs_get_cached_block(s, (char *) &sfragment_entry,
+					start_block, offset,
+					sizeof(sfragment_entry), &start_block,
+					&offset))
+			goto out;
+		SQUASHFS_SWAP_FRAGMENT_ENTRY_2(&fragment_entry, &sfragment_entry);
+	} else
+		if (!squashfs_get_cached_block(s, (char *) &fragment_entry,
+					start_block, offset,
+					sizeof(fragment_entry), &start_block,
+					&offset))
+			goto out;
+
+	*fragment_start_block = fragment_entry.start_block;
+	*fragment_size = fragment_entry.size;
+
+	return 1;
+
+out:
+	return 0;
+}
+
+
+static void squashfs_new_inode(struct squashfs_sb_info *msblk, struct inode *i,
+		struct squashfs_base_inode_header_2 *inodeb, unsigned int ino)
+{
+	struct squashfs_super_block *sblk = &msblk->sblk;
+
+	i->i_ino = ino;
+	i->i_mtime.tv_sec = sblk->mkfs_time;
+	i->i_atime.tv_sec = sblk->mkfs_time;
+	i->i_ctime.tv_sec = sblk->mkfs_time;
+	i->i_uid = msblk->uid[inodeb->uid];
+	i->i_mode = inodeb->mode;
+	i->i_nlink = 1;
+	i->i_size = 0;
+	if (inodeb->guid == SQUASHFS_GUIDS)
+		i->i_gid = i->i_uid;
+	else
+		i->i_gid = msblk->guid[inodeb->guid];
+}
+
+
+static int squashfs_read_inode_2(struct inode *i, squashfs_inode_t inode)
+{
+	struct super_block *s = i->i_sb;
+	struct squashfs_sb_info *msblk = s->s_fs_info;
+	struct squashfs_super_block *sblk = &msblk->sblk;
+	unsigned int block = SQUASHFS_INODE_BLK(inode) +
+		sblk->inode_table_start;
+	unsigned int offset = SQUASHFS_INODE_OFFSET(inode);
+	unsigned int ino = i->i_ino;
+	long long next_block;
+	unsigned int next_offset;
+	union squashfs_inode_header_2 id, sid;
+	struct squashfs_base_inode_header_2 *inodeb = &id.base,
+					  *sinodeb = &sid.base;
+
+	TRACE("Entered squashfs_iget\n");
+
+	if (msblk->swap) {
+		if (!squashfs_get_cached_block(s, (char *) sinodeb, block,
+					offset, sizeof(*sinodeb), &next_block,
+					&next_offset))
+			goto failed_read;
+		SQUASHFS_SWAP_BASE_INODE_HEADER_2(inodeb, sinodeb,
+					sizeof(*sinodeb));
+	} else
+		if (!squashfs_get_cached_block(s, (char *) inodeb, block,
+					offset, sizeof(*inodeb), &next_block,
+					&next_offset))
+			goto failed_read;
+
+	squashfs_new_inode(msblk, i, inodeb, ino);
+
+	switch(inodeb->inode_type) {
+		case SQUASHFS_FILE_TYPE: {
+			struct squashfs_reg_inode_header_2 *inodep = &id.reg;
+			struct squashfs_reg_inode_header_2 *sinodep = &sid.reg;
+			long long frag_blk;
+			unsigned int frag_size = 0;
+				
+			if (msblk->swap) {
+				if (!squashfs_get_cached_block(s, (char *)
+						sinodep, block, offset,
+						sizeof(*sinodep), &next_block,
+						&next_offset))
+					goto failed_read;
+				SQUASHFS_SWAP_REG_INODE_HEADER_2(inodep, sinodep);
+			} else
+				if (!squashfs_get_cached_block(s, (char *)
+						inodep, block, offset,
+						sizeof(*inodep), &next_block,
+						&next_offset))
+					goto failed_read;
+
+			frag_blk = SQUASHFS_INVALID_BLK;
+			if (inodep->fragment != SQUASHFS_INVALID_FRAG &&
+					!get_fragment_location_2(s,
+					inodep->fragment, &frag_blk, &frag_size))
+				goto failed_read;
+				
+			i->i_size = inodep->file_size;
+			i->i_fop = &generic_ro_fops;
+			i->i_mode |= S_IFREG;
+			i->i_mtime.tv_sec = inodep->mtime;
+			i->i_atime.tv_sec = inodep->mtime;
+			i->i_ctime.tv_sec = inodep->mtime;
+			i->i_blocks = ((i->i_size - 1) >> 9) + 1;
+			SQUASHFS_I(i)->u.s1.fragment_start_block = frag_blk;
+			SQUASHFS_I(i)->u.s1.fragment_size = frag_size;
+			SQUASHFS_I(i)->u.s1.fragment_offset = inodep->offset;
+			SQUASHFS_I(i)->start_block = inodep->start_block;
+			SQUASHFS_I(i)->u.s1.block_list_start = next_block;
+			SQUASHFS_I(i)->offset = next_offset;
+			if (sblk->block_size > 4096)
+				i->i_data.a_ops = &squashfs_aops;
+			else
+				i->i_data.a_ops = &squashfs_aops_4K;
+
+			TRACE("File inode %x:%x, start_block %x, "
+					"block_list_start %llx, offset %x\n",
+					SQUASHFS_INODE_BLK(inode), offset,
+					inodep->start_block, next_block,
+					next_offset);
+			break;
+		}
+		case SQUASHFS_DIR_TYPE: {
+			struct squashfs_dir_inode_header_2 *inodep = &id.dir;
+			struct squashfs_dir_inode_header_2 *sinodep = &sid.dir;
+
+			if (msblk->swap) {
+				if (!squashfs_get_cached_block(s, (char *)
+						sinodep, block, offset,
+						sizeof(*sinodep), &next_block,
+						&next_offset))
+					goto failed_read;
+				SQUASHFS_SWAP_DIR_INODE_HEADER_2(inodep, sinodep);
+			} else
+				if (!squashfs_get_cached_block(s, (char *)
+						inodep, block, offset,
+						sizeof(*inodep), &next_block,
+						&next_offset))
+					goto failed_read;
+
+			i->i_size = inodep->file_size;
+			i->i_op = &squashfs_dir_inode_ops_2;
+			i->i_fop = &squashfs_dir_ops_2;
+			i->i_mode |= S_IFDIR;
+			i->i_mtime.tv_sec = inodep->mtime;
+			i->i_atime.tv_sec = inodep->mtime;
+			i->i_ctime.tv_sec = inodep->mtime;
+			SQUASHFS_I(i)->start_block = inodep->start_block;
+			SQUASHFS_I(i)->offset = inodep->offset;
+			SQUASHFS_I(i)->u.s2.directory_index_count = 0;
+			SQUASHFS_I(i)->u.s2.parent_inode = 0;
+
+			TRACE("Directory inode %x:%x, start_block %x, offset "
+					"%x\n", SQUASHFS_INODE_BLK(inode),
+					offset, inodep->start_block,
+					inodep->offset);
+			break;
+		}
+		case SQUASHFS_LDIR_TYPE: {
+			struct squashfs_ldir_inode_header_2 *inodep = &id.ldir;
+			struct squashfs_ldir_inode_header_2 *sinodep = &sid.ldir;
+
+			if (msblk->swap) {
+				if (!squashfs_get_cached_block(s, (char *)
+						sinodep, block, offset,
+						sizeof(*sinodep), &next_block,
+						&next_offset))
+					goto failed_read;
+				SQUASHFS_SWAP_LDIR_INODE_HEADER_2(inodep,
+						sinodep);
+			} else
+				if (!squashfs_get_cached_block(s, (char *)
+						inodep, block, offset,
+						sizeof(*inodep), &next_block,
+						&next_offset))
+					goto failed_read;
+
+			i->i_size = inodep->file_size;
+			i->i_op = &squashfs_dir_inode_ops_2;
+			i->i_fop = &squashfs_dir_ops_2;
+			i->i_mode |= S_IFDIR;
+			i->i_mtime.tv_sec = inodep->mtime;
+			i->i_atime.tv_sec = inodep->mtime;
+			i->i_ctime.tv_sec = inodep->mtime;
+			SQUASHFS_I(i)->start_block = inodep->start_block;
+			SQUASHFS_I(i)->offset = inodep->offset;
+			SQUASHFS_I(i)->u.s2.directory_index_start = next_block;
+			SQUASHFS_I(i)->u.s2.directory_index_offset =
+								next_offset;
+			SQUASHFS_I(i)->u.s2.directory_index_count =
+								inodep->i_count;
+			SQUASHFS_I(i)->u.s2.parent_inode = 0;
+
+			TRACE("Long directory inode %x:%x, start_block %x, "
+					"offset %x\n",
+					SQUASHFS_INODE_BLK(inode), offset,
+					inodep->start_block, inodep->offset);
+			break;
+		}
+		case SQUASHFS_SYMLINK_TYPE: {
+			struct squashfs_symlink_inode_header_2 *inodep =
+								&id.symlink;
+			struct squashfs_symlink_inode_header_2 *sinodep =
+								&sid.symlink;
+	
+			if (msblk->swap) {
+				if (!squashfs_get_cached_block(s, (char *)
+						sinodep, block, offset,
+						sizeof(*sinodep), &next_block,
+						&next_offset))
+					goto failed_read;
+				SQUASHFS_SWAP_SYMLINK_INODE_HEADER_2(inodep,
+								sinodep);
+			} else
+				if (!squashfs_get_cached_block(s, (char *)
+						inodep, block, offset,
+						sizeof(*inodep), &next_block,
+						&next_offset))
+					goto failed_read;
+
+			i->i_size = inodep->symlink_size;
+			i->i_op = &page_symlink_inode_operations;
+			i->i_data.a_ops = &squashfs_symlink_aops;
+			i->i_mode |= S_IFLNK;
+			SQUASHFS_I(i)->start_block = next_block;
+			SQUASHFS_I(i)->offset = next_offset;
+
+			TRACE("Symbolic link inode %x:%x, start_block %llx, "
+					"offset %x\n",
+					SQUASHFS_INODE_BLK(inode), offset,
+					next_block, next_offset);
+			break;
+		 }
+		 case SQUASHFS_BLKDEV_TYPE:
+		 case SQUASHFS_CHRDEV_TYPE: {
+			struct squashfs_dev_inode_header_2 *inodep = &id.dev;
+			struct squashfs_dev_inode_header_2 *sinodep = &sid.dev;
+
+			if (msblk->swap) {
+				if (!squashfs_get_cached_block(s, (char *)
+						sinodep, block, offset,
+						sizeof(*sinodep), &next_block,
+						&next_offset))
+					goto failed_read;
+				SQUASHFS_SWAP_DEV_INODE_HEADER_2(inodep, sinodep);
+			} else	
+				if (!squashfs_get_cached_block(s, (char *)
+						inodep, block, offset,
+						sizeof(*inodep), &next_block,
+						&next_offset))
+					goto failed_read;
+
+			i->i_mode |= (inodeb->inode_type ==
+					SQUASHFS_CHRDEV_TYPE) ?  S_IFCHR :
+					S_IFBLK;
+			init_special_inode(i, i->i_mode,
+					old_decode_dev(inodep->rdev));
+
+			TRACE("Device inode %x:%x, rdev %x\n",
+					SQUASHFS_INODE_BLK(inode), offset,
+					inodep->rdev);
+			break;
+		 }
+		 case SQUASHFS_FIFO_TYPE:
+		 case SQUASHFS_SOCKET_TYPE: {
+
+			i->i_mode |= (inodeb->inode_type == SQUASHFS_FIFO_TYPE)
+							? S_IFIFO : S_IFSOCK;
+			init_special_inode(i, i->i_mode, 0);
+			break;
+		 }
+		 default:
+			ERROR("Unknown inode type %d in squashfs_iget!\n",
+					inodeb->inode_type);
+			goto failed_read1;
+	}
+	
+	return 1;
+
+failed_read:
+	ERROR("Unable to read inode [%x:%x]\n", block, offset);
+
+failed_read1:
+	return 0;
+}
+
+
+static int get_dir_index_using_offset(struct super_block *s, long long 
+				*next_block, unsigned int *next_offset,
+				long long index_start,
+				unsigned int index_offset, int i_count,
+				long long f_pos)
+{
+	struct squashfs_sb_info *msblk = s->s_fs_info;
+	struct squashfs_super_block *sblk = &msblk->sblk;
+	int i, length = 0;
+	struct squashfs_dir_index_2 index;
+
+	TRACE("Entered get_dir_index_using_offset, i_count %d, f_pos %d\n",
+					i_count, (unsigned int) f_pos);
+
+	if (f_pos == 0)
+		goto finish;
+
+	for (i = 0; i < i_count; i++) {
+		if (msblk->swap) {
+			struct squashfs_dir_index_2 sindex;
+			squashfs_get_cached_block(s, (char *) &sindex,
+					index_start, index_offset,
+					sizeof(sindex), &index_start,
+					&index_offset);
+			SQUASHFS_SWAP_DIR_INDEX_2(&index, &sindex);
+		} else
+			squashfs_get_cached_block(s, (char *) &index,
+					index_start, index_offset,
+					sizeof(index), &index_start,
+					&index_offset);
+
+		if (index.index > f_pos)
+			break;
+
+		squashfs_get_cached_block(s, NULL, index_start, index_offset,
+					index.size + 1, &index_start,
+					&index_offset);
+
+		length = index.index;
+		*next_block = index.start_block + sblk->directory_table_start;
+	}
+
+	*next_offset = (length + *next_offset) % SQUASHFS_METADATA_SIZE;
+
+finish:
+	return length;
+}
+
+
+static int get_dir_index_using_name(struct super_block *s, long long
+				*next_block, unsigned int *next_offset,
+				long long index_start,
+				unsigned int index_offset, int i_count,
+				const char *name, int size)
+{
+	struct squashfs_sb_info *msblk = s->s_fs_info;
+	struct squashfs_super_block *sblk = &msblk->sblk;
+	int i, length = 0;
+	struct squashfs_dir_index_2 *index;
+	char *str;
+
+	TRACE("Entered get_dir_index_using_name, i_count %d\n", i_count);
+
+	if (!(str = kmalloc(sizeof(struct squashfs_dir_index) +
+		(SQUASHFS_NAME_LEN + 1) * 2, GFP_KERNEL))) {
+		ERROR("Failed to allocate squashfs_dir_index\n");
+		goto failure;
+	}
+
+	index = (struct squashfs_dir_index_2 *) (str + SQUASHFS_NAME_LEN + 1);
+	strncpy(str, name, size);
+	str[size] = '\0';
+
+	for (i = 0; i < i_count; i++) {
+		if (msblk->swap) {
+			struct squashfs_dir_index_2 sindex;
+			squashfs_get_cached_block(s, (char *) &sindex,
+					index_start, index_offset,
+					sizeof(sindex), &index_start,
+					&index_offset);
+			SQUASHFS_SWAP_DIR_INDEX_2(index, &sindex);
+		} else
+			squashfs_get_cached_block(s, (char *) index,
+					index_start, index_offset,
+					sizeof(struct squashfs_dir_index_2),
+					&index_start, &index_offset);
+
+		squashfs_get_cached_block(s, index->name, index_start,
+					index_offset, index->size + 1,
+					&index_start, &index_offset);
+
+		index->name[index->size + 1] = '\0';
+
+		if (strcmp(index->name, str) > 0)
+			break;
+
+		length = index->index;
+		*next_block = index->start_block + sblk->directory_table_start;
+	}
+
+	*next_offset = (length + *next_offset) % SQUASHFS_METADATA_SIZE;
+	kfree(str);
+failure:
+	return length;
+}
+
+		
+static int squashfs_readdir_2(struct file *file, void *dirent, filldir_t filldir)
+{
+	struct inode *i = file->f_dentry->d_inode;
+	struct squashfs_sb_info *msblk = i->i_sb->s_fs_info;
+	struct squashfs_super_block *sblk = &msblk->sblk;
+	long long next_block = SQUASHFS_I(i)->start_block +
+		sblk->directory_table_start;
+	int next_offset = SQUASHFS_I(i)->offset, length = 0,
+		dir_count;
+	struct squashfs_dir_header_2 dirh;
+	struct squashfs_dir_entry_2 *dire;
+
+	TRACE("Entered squashfs_readdir_2 [%llx:%x]\n", next_block, next_offset);
+
+	if (!(dire = kmalloc(sizeof(struct squashfs_dir_entry) +
+		SQUASHFS_NAME_LEN + 1, GFP_KERNEL))) {
+		ERROR("Failed to allocate squashfs_dir_entry\n");
+		goto finish;
+	}
+
+	length = get_dir_index_using_offset(i->i_sb, &next_block, &next_offset,
+				SQUASHFS_I(i)->u.s2.directory_index_start,
+				SQUASHFS_I(i)->u.s2.directory_index_offset,
+				SQUASHFS_I(i)->u.s2.directory_index_count,
+				file->f_pos);
+
+	while (length < i_size_read(i)) {
+		/* read directory header */
+		if (msblk->swap) {
+			struct squashfs_dir_header_2 sdirh;
+			
+			if (!squashfs_get_cached_block(i->i_sb, (char *) &sdirh,
+					next_block, next_offset, sizeof(sdirh),
+					&next_block, &next_offset))
+				goto failed_read;
+
+			length += sizeof(sdirh);
+			SQUASHFS_SWAP_DIR_HEADER_2(&dirh, &sdirh);
+		} else {
+			if (!squashfs_get_cached_block(i->i_sb, (char *) &dirh,
+					next_block, next_offset, sizeof(dirh),
+					&next_block, &next_offset))
+				goto failed_read;
+
+			length += sizeof(dirh);
+		}
+
+		dir_count = dirh.count + 1;
+		while (dir_count--) {
+			if (msblk->swap) {
+				struct squashfs_dir_entry_2 sdire;
+				if (!squashfs_get_cached_block(i->i_sb, (char *)
+						&sdire, next_block, next_offset,
+						sizeof(sdire), &next_block,
+						&next_offset))
+					goto failed_read;
+				
+				length += sizeof(sdire);
+				SQUASHFS_SWAP_DIR_ENTRY_2(dire, &sdire);
+			} else {
+				if (!squashfs_get_cached_block(i->i_sb, (char *)
+						dire, next_block, next_offset,
+						sizeof(*dire), &next_block,
+						&next_offset))
+					goto failed_read;
+
+				length += sizeof(*dire);
+			}
+
+			if (!squashfs_get_cached_block(i->i_sb, dire->name,
+						next_block, next_offset,
+						dire->size + 1, &next_block,
+						&next_offset))
+				goto failed_read;
+
+			length += dire->size + 1;
+
+			if (file->f_pos >= length)
+				continue;
+
+			dire->name[dire->size + 1] = '\0';
+
+			TRACE("Calling filldir(%x, %s, %d, %d, %x:%x, %d)\n",
+					(unsigned int) dirent, dire->name,
+					dire->size + 1, (int) file->f_pos,
+					dirh.start_block, dire->offset,
+					squashfs_filetype_table[dire->type]);
+
+			if (filldir(dirent, dire->name, dire->size + 1,
+					file->f_pos, SQUASHFS_MK_VFS_INODE(
+					dirh.start_block, dire->offset),
+					squashfs_filetype_table[dire->type])
+					< 0) {
+				TRACE("Filldir returned less than 0\n");
+				goto finish;
+			}
+			file->f_pos = length;
+		}
+	}
+
+finish:
+	kfree(dire);
+	return 0;
+
+failed_read:
+	ERROR("Unable to read directory block [%llx:%x]\n", next_block,
+		next_offset);
+	kfree(dire);
+	return 0;
+}
+
+
+static struct dentry *squashfs_lookup_2(struct inode *i, struct dentry *dentry,
+				struct nameidata *nd)
+{
+	const unsigned char *name = dentry->d_name.name;
+	int len = dentry->d_name.len;
+	struct inode *inode = NULL;
+	struct squashfs_sb_info *msblk = i->i_sb->s_fs_info;
+	struct squashfs_super_block *sblk = &msblk->sblk;
+	long long next_block = SQUASHFS_I(i)->start_block +
+				sblk->directory_table_start;
+	int next_offset = SQUASHFS_I(i)->offset, length = 0,
+				dir_count;
+	struct squashfs_dir_header_2 dirh;
+	struct squashfs_dir_entry_2 *dire;
+	int sorted = sblk->s_major == 2 && sblk->s_minor >= 1;
+
+	TRACE("Entered squashfs_lookup_2 [%llx:%x]\n", next_block, next_offset);
+
+	if (!(dire = kmalloc(sizeof(struct squashfs_dir_entry) +
+		SQUASHFS_NAME_LEN + 1, GFP_KERNEL))) {
+		ERROR("Failed to allocate squashfs_dir_entry\n");
+		goto exit_loop;
+	}
+
+	if (len > SQUASHFS_NAME_LEN)
+		goto exit_loop;
+
+	length = get_dir_index_using_name(i->i_sb, &next_block, &next_offset,
+				SQUASHFS_I(i)->u.s2.directory_index_start,
+				SQUASHFS_I(i)->u.s2.directory_index_offset,
+				SQUASHFS_I(i)->u.s2.directory_index_count, name,
+				len);
+
+	while (length < i_size_read(i)) {
+		/* read directory header */
+		if (msblk->swap) {
+			struct squashfs_dir_header_2 sdirh;
+			if (!squashfs_get_cached_block(i->i_sb, (char *) &sdirh,
+					next_block, next_offset, sizeof(sdirh),
+					&next_block, &next_offset))
+				goto failed_read;
+
+			length += sizeof(sdirh);
+			SQUASHFS_SWAP_DIR_HEADER_2(&dirh, &sdirh);
+		} else {
+			if (!squashfs_get_cached_block(i->i_sb, (char *) &dirh,
+					next_block, next_offset, sizeof(dirh),
+					&next_block, &next_offset))
+				goto failed_read;
+
+			length += sizeof(dirh);
+		}
+
+		dir_count = dirh.count + 1;
+		while (dir_count--) {
+			if (msblk->swap) {
+				struct squashfs_dir_entry_2 sdire;
+				if (!squashfs_get_cached_block(i->i_sb, (char *)
+						&sdire, next_block,next_offset,
+						sizeof(sdire), &next_block,
+						&next_offset))
+					goto failed_read;
+				
+				length += sizeof(sdire);
+				SQUASHFS_SWAP_DIR_ENTRY_2(dire, &sdire);
+			} else {
+				if (!squashfs_get_cached_block(i->i_sb, (char *)
+						dire, next_block,next_offset,
+						sizeof(*dire), &next_block,
+						&next_offset))
+					goto failed_read;
+
+				length += sizeof(*dire);
+			}
+
+			if (!squashfs_get_cached_block(i->i_sb, dire->name,
+					next_block, next_offset, dire->size + 1,
+					&next_block, &next_offset))
+				goto failed_read;
+
+			length += dire->size + 1;
+
+			if (sorted && name[0] < dire->name[0])
+				goto exit_loop;
+
+			if ((len == dire->size + 1) && !strncmp(name,
+						dire->name, len)) {
+				squashfs_inode_t ino =
+					SQUASHFS_MKINODE(dirh.start_block,
+					dire->offset);
+				unsigned int inode_number = SQUASHFS_MK_VFS_INODE(dirh.start_block,
+					dire->offset);
+
+				TRACE("calling squashfs_iget for directory "
+					"entry %s, inode %x:%x, %lld\n", name,
+					dirh.start_block, dire->offset, ino);
+
+				inode = squashfs_iget(i->i_sb, ino, inode_number);
+
+				goto exit_loop;
+			}
+		}
+	}
+
+exit_loop:
+	kfree(dire);
+	d_add(dentry, inode);
+	return ERR_PTR(0);
+
+failed_read:
+	ERROR("Unable to read directory block [%llx:%x]\n", next_block,
+		next_offset);
+	goto exit_loop;
+}
+
+
+int squashfs_2_0_supported(struct squashfs_sb_info *msblk)
+{
+	struct squashfs_super_block *sblk = &msblk->sblk;
+
+	msblk->read_inode = squashfs_read_inode_2;
+	msblk->read_fragment_index_table = read_fragment_index_table_2;
+
+	sblk->bytes_used = sblk->bytes_used_2;
+	sblk->uid_start = sblk->uid_start_2;
+	sblk->guid_start = sblk->guid_start_2;
+	sblk->inode_table_start = sblk->inode_table_start_2;
+	sblk->directory_table_start = sblk->directory_table_start_2;
+	sblk->fragment_table_start = sblk->fragment_table_start_2;
+
+	return 1;
+}
diff -Nruw linux-2.6.20.14-fbx/fs/squashfs./squashfs.h linux-2.6.20.14-fbx/fs/squashfs/squashfs.h
--- linux-2.6.20.14-fbx/fs/squashfs./squashfs.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.20.14-fbx/fs/squashfs/squashfs.h	2010-12-29 19:30:08.361441361 +0100
@@ -0,0 +1,87 @@
+/*
+ * Squashfs - a compressed read only filesystem for Linux
+ *
+ * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007
+ * Phillip Lougher <phillip@lougher.org.uk>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2,
+ * or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * squashfs.h
+ */
+
+#ifdef CONFIG_SQUASHFS_1_0_COMPATIBILITY
+#undef CONFIG_SQUASHFS_1_0_COMPATIBILITY
+#endif
+
+#ifdef SQUASHFS_TRACE
+#define TRACE(s, args...)	printk(KERN_NOTICE "SQUASHFS: "s, ## args)
+#else
+#define TRACE(s, args...)	{}
+#endif
+
+#define ERROR(s, args...)	printk(KERN_ERR "SQUASHFS error: "s, ## args)
+
+#define SERROR(s, args...)	do { \
+				if (!silent) \
+				printk(KERN_ERR "SQUASHFS error: "s, ## args);\
+				} while(0)
+
+#define WARNING(s, args...)	printk(KERN_WARNING "SQUASHFS: "s, ## args)
+
+static inline struct squashfs_inode_info *SQUASHFS_I(struct inode *inode)
+{
+	return list_entry(inode, struct squashfs_inode_info, vfs_inode);
+}
+
+#if defined(CONFIG_SQUASHFS_1_0_COMPATIBILITY ) || defined(CONFIG_SQUASHFS_2_0_COMPATIBILITY)
+#define SQSH_EXTERN
+extern unsigned int squashfs_read_data(struct super_block *s, char *buffer,
+				long long index, unsigned int length,
+				long long *next_index, int srclength);
+extern int squashfs_get_cached_block(struct super_block *s, char *buffer,
+				long long block, unsigned int offset,
+				int length, long long *next_block,
+				unsigned int *next_offset);
+extern void release_cached_fragment(struct squashfs_sb_info *msblk, struct
+					squashfs_fragment_cache *fragment);
+extern struct squashfs_fragment_cache *get_cached_fragment(struct super_block
+					*s, long long start_block,
+					int length);
+extern struct inode *squashfs_iget(struct super_block *s, squashfs_inode_t inode, unsigned int inode_number);
+extern const struct address_space_operations squashfs_symlink_aops;
+extern const struct address_space_operations squashfs_aops;
+extern const struct address_space_operations squashfs_aops_4K;
+extern struct inode_operations squashfs_dir_inode_ops;
+#else
+#define SQSH_EXTERN static
+#endif
+
+#ifdef CONFIG_SQUASHFS_1_0_COMPATIBILITY
+extern int squashfs_1_0_supported(struct squashfs_sb_info *msblk);
+#else
+static inline int squashfs_1_0_supported(struct squashfs_sb_info *msblk)
+{
+	return 0;
+}
+#endif
+
+#ifdef CONFIG_SQUASHFS_2_0_COMPATIBILITY
+extern int squashfs_2_0_supported(struct squashfs_sb_info *msblk);
+#else
+static inline int squashfs_2_0_supported(struct squashfs_sb_info *msblk)
+{
+	return 0;
+}
+#endif
diff -Nruw linux-2.6.20.14-fbx/include/asm-arm/arch-mv88fxx81./devices.h linux-2.6.20.14-fbx/include/asm-arm/arch-mv88fxx81/devices.h
--- linux-2.6.20.14-fbx/include/asm-arm/arch-mv88fxx81./devices.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.20.14-fbx/include/asm-arm/arch-mv88fxx81/devices.h	2010-12-29 19:30:08.431441341 +0100
@@ -0,0 +1,71 @@
+
+#ifndef __DEVICES_H
+#define __DEVICES_H
+
+#include <asm/arch/io.h>
+#include <asm/arch/regs.h>
+#include <linux/ethtool.h>
+#include <linux/if_ether.h>
+
+/*
+ * SOC idma engine
+ */
+#define MV88FXX81_IDMA_BASE_PA	(MV_INTER_REGS_BASE_PA + MV_IDMA_REGS_BASE)
+#define MV88FXX81_IDMA_END_PA	(MV88FXX81_IDMA_BASE_PA + MV_IDMA_REGS_SIZE)
+
+/*
+ * board ethernet mac
+ */
+struct switch_ops
+{
+	struct net_device *dev;
+	int (*mdio_read)(struct net_device *dev, int phy_id, int location);
+	void (*mdio_write)(struct net_device *dev, int phy_id, int location,
+			   int val);
+};
+
+struct mv88fxx81_eth_platform_data
+{
+	char mac_addr[ETH_ALEN];
+
+	/* default speed/duplex value to apply at startup */
+	struct ethtool_cmd default_param;
+
+	/* enable this if no phy is used between mac and remote */
+	int ignore_phy;
+
+	/* don't try to check for gigabit support */
+	int disable_gmii;
+
+	/* MII layer init function */
+	int (*mii_init)(struct switch_ops *data);
+};
+
+struct mv88f5181_spi_platform_data
+{
+	int num_cs;
+	int gpio_cs;
+};
+
+#define MV88FXX81_ETH_BASE_PA	(MV_INTER_REGS_BASE_PA + MV_ETH_REGS_BASE)
+#define MV88FXX81_ETH_END_PA	(MV88FXX81_ETH_BASE_PA + MV_ETH_REGS_SIZE)
+
+/*
+ * board uart0 and uart1
+ */
+#define MV88FXX81_UART0_BASE_PA	(MV_INTER_REGS_BASE_PA + MV_UART0_REGS_BASE)
+#define MV88FXX81_UART1_BASE_PA	(MV_INTER_REGS_BASE_PA + MV_UART1_REGS_BASE)
+
+/*
+ * board i2c (twsi)
+ */
+#define MV88FXX81_TWSI_BASE_PA	(MV_INTER_REGS_BASE_PA + MV_TWSI_REGS_BASE)
+#define MV88FXX81_TWSI_END_PA	(MV88FXX81_TWSI_BASE_PA + MV_TWSI_REGS_SIZE)
+
+/*
+ * board spi
+ */
+#define MV88F5181_SPI_BASE_PA	(MV_INTER_REGS_BASE_PA + MV_SPI_REGS_BASE)
+#define MV88F5181_SPI_END_PA	(MV88F5181_SPI_BASE_PA + MV_SPI_REGS_SIZE)
+
+#endif /* __DEVICES_H */
diff -Nruw linux-2.6.20.14-fbx/include/asm-arm/arch-mv88fxx81./dma.h linux-2.6.20.14-fbx/include/asm-arm/arch-mv88fxx81/dma.h
--- linux-2.6.20.14-fbx/include/asm-arm/arch-mv88fxx81./dma.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.20.14-fbx/include/asm-arm/arch-mv88fxx81/dma.h	2010-12-29 19:30:08.431441341 +0100
@@ -0,0 +1,24 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+#ifndef __ASM_ARCH_DMA_H
+#define __ASM_ARCH_DMA_H
+
+#define MAX_DMA_ADDRESS		0xffffffff
+
+#define MAX_DMA_CHANNELS	0
+
+#endif /* _ASM_ARCH_DMA_H */
+
diff -Nruw linux-2.6.20.14-fbx/include/asm-arm/arch-mv88fxx81./entry-macro.S linux-2.6.20.14-fbx/include/asm-arm/arch-mv88fxx81/entry-macro.S
--- linux-2.6.20.14-fbx/include/asm-arm/arch-mv88fxx81./entry-macro.S	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.20.14-fbx/include/asm-arm/arch-mv88fxx81/entry-macro.S	2010-12-29 19:30:08.431441341 +0100
@@ -0,0 +1,20 @@
+/*
+ * comes from orion.git. adapted to use our register names.
+ */
+
+#include <asm/arch/io.h>
+#include <asm/arch/regs.h>
+
+	.macro  disable_fiq
+	.endm
+
+	.macro  get_irqnr_and_base, irqnr, irqstat, base, tmp
+	ldr	\base, =(MV_INTER_REGS_BASE_PA + MV_IRQ_CAUSE_REG)
+	ldr	\irqstat, [\base, #0]		@ main cause
+	ldr	\tmp, [\base, #(MV_IRQ_MASK_REG - MV_IRQ_CAUSE_REG)] @ main mask
+	mov	\irqnr, #0			@ default irqnr
+	@ find cause bits that are unmasked
+	ands	\irqstat, \irqstat, \tmp	@ clear Z flag if any
+	clzne	\irqnr, \irqstat		@ calc irqnr
+	rsbne	\irqnr, \irqnr, #31
+	.endm
diff -Nruw linux-2.6.20.14-fbx/include/asm-arm/arch-mv88fxx81./gpio.h linux-2.6.20.14-fbx/include/asm-arm/arch-mv88fxx81/gpio.h
--- linux-2.6.20.14-fbx/include/asm-arm/arch-mv88fxx81./gpio.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.20.14-fbx/include/asm-arm/arch-mv88fxx81/gpio.h	2010-12-29 19:30:08.431441341 +0100
@@ -0,0 +1,28 @@
+/*
+ * gpio.h for linux-mv
+ * Created by <nschichan@corp.free.fr> on Sun Sep 24 17:17:30 2006
+ * Freebox SA
+ */
+
+#ifndef __ASM_ARCH_GPIO_H__
+# define __ASM_ARCH_GPIO_H__
+
+struct mv_gpio_data
+{
+	uint32_t	direction;
+	uint32_t	polarity;
+	uint32_t	dataout;
+};
+
+struct fbxgpio_pin;
+
+int	mv_get_gpio_datain(int gpio);
+void	mv_set_gpio_dataout(int gpio, int data);
+int	mv_get_gpio_dataout(int gpio);
+void	mv_set_gpio_direction(int gpio, int is_input);
+int	mv_get_gpio_direction(int gpio);
+
+void	mv_gpio_init(const struct mv_gpio_data *);
+
+
+#endif /* !__ASM_ARCH_GPIO_H__ */
diff -Nruw linux-2.6.20.14-fbx/include/asm-arm/arch-mv88fxx81./hardware.h linux-2.6.20.14-fbx/include/asm-arm/arch-mv88fxx81/hardware.h
--- linux-2.6.20.14-fbx/include/asm-arm/arch-mv88fxx81./hardware.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.20.14-fbx/include/asm-arm/arch-mv88fxx81/hardware.h	2010-12-29 19:30:08.431441341 +0100
@@ -0,0 +1,32 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+#ifndef __ASM_ARCH_HARDWARE_H
+#define __ASM_ARCH_HARDWARE_H
+
+#include <asm/sizes.h>
+#include <asm/io.h>
+
+#define pcibios_assign_all_busses()     1	/* assign a bus number
+ 						   over the bridge
+ 						   while scanning
+ 						   it */
+
+#define PCIBIOS_MIN_IO          0x1000		/* min IO allocate for
+						   PCI dev */
+#define PCIBIOS_MIN_MEM         0x01000000	/* min MEM allocate
+						   for PCI dev */
+
+#endif
diff -Nruw linux-2.6.20.14-fbx/include/asm-arm/arch-mv88fxx81./io.h linux-2.6.20.14-fbx/include/asm-arm/arch-mv88fxx81/io.h
--- linux-2.6.20.14-fbx/include/asm-arm/arch-mv88fxx81./io.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.20.14-fbx/include/asm-arm/arch-mv88fxx81/io.h	2010-12-29 19:30:08.431441341 +0100
@@ -0,0 +1,87 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+#ifndef __ASM_ARM_ARCH_IO_H
+#define __ASM_ARM_ARCH_IO_H
+
+#include <asm/sizes.h>
+
+#define IO_SPACE_LIMIT		0xffffffff
+
+#define __io(a)			((void __iomem *)((a)))
+#define __mem_pci(a)		(a)
+#define __mem_isa(a)		(a)
+
+/*
+ * Start of internal registers
+ */
+#define MV_INTER_REGS_BASE_PA	(0xF1000000)
+
+/*
+ * Internal registers are mapped at the same address
+ */
+#define MV_REGS_VA(x)		((void __iomem *)(x))
+
+/*
+ * handy macro to read internal registers
+ */
+#define mv_readb(a)	(*(volatile unsigned char *)	\
+				(MV_REGS_VA((a) + MV_INTER_REGS_BASE_PA)))
+#define mv_readw(a)	(*(volatile unsigned short *)	\
+				(MV_REGS_VA((a) + MV_INTER_REGS_BASE_PA)))
+#define mv_readl(a)	(*(volatile unsigned int *)	\
+				(MV_REGS_VA((a) + MV_INTER_REGS_BASE_PA)))
+
+#define mv_writeb(v,a)	(*(volatile unsigned char *)	\
+				(MV_REGS_VA((a) + MV_INTER_REGS_BASE_PA)) = (v))
+#define mv_writew(v,a)	(*(volatile unsigned short *)	\
+				(MV_REGS_VA((a) + MV_INTER_REGS_BASE_PA)) = (v))
+#define mv_writel(v,a)	(*(volatile unsigned int *)	\
+				(MV_REGS_VA((a) + MV_INTER_REGS_BASE_PA)) = (v))
+
+/*
+ * PCI addresses
+ */
+#define PCI1_MEM_BASE		0xE0000000
+#define PCI1_MEM_SIZE		SZ_256M
+
+#define PCI1_IO_BASE		0xF0000000
+#define PCI1_IO_SIZE		SZ_1M
+
+/*
+ * used by dma code to known the beginning of device mapping
+ */
+#define DEVICE_BASE		0xF0000000UL
+#define DEVICE_SIZE		SZ_256M
+
+/*
+ * Flash address (boot CS)
+ */
+#define FLASH_BASE		0xFE000000
+#define FLASH_SIZE		SZ_32M
+
+/*
+ * DevCS0
+ */
+#define DEVCS0_BASE		0xFC000000
+#define DEVCS0_SIZE		SZ_32M
+
+/*
+ * DevCS1
+ */
+#define DEVCS1_BASE		0xFA000000
+#define DEVCS1_SIZE		SZ_32M
+
+#endif
diff -Nruw linux-2.6.20.14-fbx/include/asm-arm/arch-mv88fxx81./irqs.h linux-2.6.20.14-fbx/include/asm-arm/arch-mv88fxx81/irqs.h
--- linux-2.6.20.14-fbx/include/asm-arm/arch-mv88fxx81./irqs.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.20.14-fbx/include/asm-arm/arch-mv88fxx81/irqs.h	2010-12-29 19:30:08.431441341 +0100
@@ -0,0 +1,97 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#ifndef __ASM_ARCH_MV88FXX81_IRQS_H
+# define __ASM_ARCH_MV88FXX81_IRQS_H
+
+/*
+ *  Interrupt numbers
+ */
+#define IRQ_START			0
+#define IRQ_BRIDGE			0
+#define IRQ_UART0			3
+#define IRQ_UART1                       4
+#define IRQ_TWSI			5
+#define IRQ_GPIO_0_7			6
+#define IRQ_GPIO_8_15			7
+#define IRQ_GPIO_16_23			8
+#define IRQ_GPIO_24_31			9
+#define IRQ_PEX0_ERR			10
+#define IRQ_PEX0_INT			11
+#define IRQ_PEX1_ERR			12
+#define IRQ_PEX1_INT			13
+#define IRQ_PCI_ERR			15
+#define IRQ_USB_BR_ERR			16
+#define IRQ_USB_CTRL(x)			((x==0)? 17:12)
+#define IRQ_GB_RX			18
+#define IRQ_GB_TX			19
+#define IRQ_GB_MISC			20
+#define IRQ_GB_SUM			21
+#define IRQ_GB_ERR			22
+#define IRQ_IDMA_ERR			23
+#define IRQ_IDMA_0			24
+#define IRQ_IDMA_1			25
+#define IRQ_IDMA_2			26
+#define IRQ_IDMA_3			27
+#define CESA_IRQ			28
+
+#define IRQ_GPIO_START			32
+#define IRQ_ASM_GPIO_START		32
+#define IRQ_GPIO_0			32
+#define IRQ_GPIO_1			33
+#define IRQ_GPIO_2			34
+#define IRQ_GPIO_3			35
+#define IRQ_GPIO_4			36
+#define IRQ_GPIO_5			37
+#define IRQ_GPIO_6			38
+#define IRQ_GPIO_7			39
+#define IRQ_GPIO_8			40
+#define IRQ_GPIO_9			41
+#define IRQ_GPIO_10			42
+#define IRQ_GPIO_11			43
+#define IRQ_GPIO_12			44
+#define IRQ_GPIO_13			45
+#define IRQ_GPIO_14			46
+#define IRQ_GPIO_15			47
+#define IRQ_GPIO_16			48
+#define IRQ_GPIO_17			49
+#define IRQ_GPIO_18			50
+#define IRQ_GPIO_19			51
+#define IRQ_GPIO_20			52
+#define IRQ_GPIO_21			53
+#define IRQ_GPIO_22			54
+#define IRQ_GPIO_23			55
+#define IRQ_GPIO_24			56
+#define IRQ_GPIO_25			57
+#define IRQ_GPIO_26			58
+#define IRQ_GPIO_27			59
+#define IRQ_GPIO_28			60
+#define IRQ_GPIO_29			61
+#define IRQ_GPIO_30			62
+#define IRQ_GPIO_31			63
+
+#define NR_IRQS				64
+
+#define MV_VALID_INT_LOW		0x2cd9
+#define MV_VALID_INT_HIGH		0xffff
+
+#define GPIO_IRQ(gpio_num)		(IRQ_GPIO_START + (gpio_num))
+
+/* timer */
+#define TIMER_IRQ			IRQ_BRIDGE
+#define TIMER_IRQ_BIT_MASK(x)		(1<<(x+1))
+
+#endif /* __ASM_ARCH_MV88FXX81_IRQS_H */
diff -Nruw linux-2.6.20.14-fbx/include/asm-arm/arch-mv88fxx81./memory.h linux-2.6.20.14-fbx/include/asm-arm/arch-mv88fxx81/memory.h
--- linux-2.6.20.14-fbx/include/asm-arm/arch-mv88fxx81./memory.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.20.14-fbx/include/asm-arm/arch-mv88fxx81/memory.h	2010-12-29 19:30:08.431441341 +0100
@@ -0,0 +1,57 @@
+/*
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+#ifndef __ASM_ARCH_MMU_H
+#define __ASM_ARCH_MMU_H
+
+/*
+ * Task size: 3GB
+ */
+#define TASK_SIZE      (0xbf000000)
+
+/*
+ * This decides where the kernel will search for a free chunk of vm
+ * space during mmap's.
+ */
+#define TASK_UNMAPPED_BASE (0x40000000)
+
+/*
+ * Page offset: 3GB
+ */
+#define PAGE_OFFSET    (0xc0000000)
+#define PHYS_OFFSET    (0x00000000)
+
+/*
+ * the dram is contiguous
+ */
+#define __virt_to_phys__is_a_macro
+#define __virt_to_phys(vpage) ((vpage) - PAGE_OFFSET)
+#define __phys_to_virt__is_a_macro
+#define __phys_to_virt(ppage) ((ppage) + PAGE_OFFSET)
+
+/*
+ * Virtual view <-> DMA view memory address translations
+ * virt_to_bus: Used to translate the virtual address to an
+ *              address suitable to be passed to set_dma_addr
+ * bus_to_virt: Used to convert an address for DMA operations
+ *              to an address that the kernel can use.
+ */
+#define __virt_to_bus__is_a_macro
+#define __virt_to_bus(x)       (x - PAGE_OFFSET)
+#define __bus_to_virt__is_a_macro
+#define __bus_to_virt(x)       (x + PAGE_OFFSET)
+
+#endif
diff -Nruw linux-2.6.20.14-fbx/include/asm-arm/arch-mv88fxx81./mux.h linux-2.6.20.14-fbx/include/asm-arm/arch-mv88fxx81/mux.h
--- linux-2.6.20.14-fbx/include/asm-arm/arch-mv88fxx81./mux.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.20.14-fbx/include/asm-arm/arch-mv88fxx81/mux.h	2010-12-29 19:30:08.431441341 +0100
@@ -0,0 +1,21 @@
+/*
+ * mux.h for linux-freebox
+ * Created by <nschichan@freebox.fr> on Fri Dec 29 20:52:53 2006
+ * Freebox SA
+ */
+
+#ifndef MUX_H
+# define MUX_H
+
+struct mv_mux_data
+{
+	uint32_t mpp_0_7;
+	uint32_t mpp_8_15;
+	uint32_t mpp_16_19;
+	uint32_t dev_mux;
+};
+
+void __init
+mv_mux_init(const struct mv_mux_data *data);
+
+#endif /* !MUX_H */
diff -Nruw linux-2.6.20.14-fbx/include/asm-arm/arch-mv88fxx81./regs.h linux-2.6.20.14-fbx/include/asm-arm/arch-mv88fxx81/regs.h
--- linux-2.6.20.14-fbx/include/asm-arm/arch-mv88fxx81./regs.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.20.14-fbx/include/asm-arm/arch-mv88fxx81/regs.h	2010-12-29 19:30:08.431441341 +0100
@@ -0,0 +1,873 @@
+
+#ifndef __REGS_H
+#define __REGS_H
+
+/*
+ * Unit0: DDR registers
+ */
+#define MV_DRAM_REGS_BASE		(0x0)
+#define MV_DRAM_CS_BASE_REG(x)		(MV_DRAM_REGS_BASE + 0x1500 + x * 8)
+
+/* These macros describe DRAM_CS_BASE reg bits */
+#define MV_DRAM_CS_BASE_MASK		0xff000000
+#define MV_DRAM_CS_BASE_SHIFT		24
+
+#define MV_DRAM_CS_SIZE_REG(x)		(MV_DRAM_REGS_BASE + 0x1504 + x * 8)
+
+/* These macros describe DRAM_CS_SIZE reg bits */
+#define MV_DRAM_CS_SIZE_ENABLED		(1 << 0)
+#define MV_DRAM_CS_SIZE_MASK		0xff000000
+#define MV_DRAM_CS_SIZE_SHIFT		24
+
+
+/* MPP registers */
+#define MV_MPP_REGS_BASE		(0x10000)
+#define MV_MPP_CTL_0_REG		(MV_MPP_REGS_BASE + 0x0)
+#define MV_MPP_CTL_1_REG		(MV_MPP_REGS_BASE + 0x4)
+#define MV_MPP_CTL_2_REG		(MV_MPP_REGS_BASE + 0x50)
+#define MV_DEV_MUX_REG			(MV_MPP_REGS_BASE + 0x8)
+#define MV_MPP_SAMPLE_AT_RESET_REG	(MV_MPP_REGS_BASE + 0x10)
+
+/*
+ * Unit?: GPP (GPIO) registers
+ */
+#define MV_GPIO_REGS_BASE		(0x10100)
+#define MV_GPIO_DATAOUT_REG		(MV_GPIO_REGS_BASE + 0x0)
+#define MV_GPIO_INPUT_EN_REG		(MV_GPIO_REGS_BASE + 0x4)
+#define MV_GPIO_BLINK_EN_REG		(MV_GPIO_REGS_BASE + 0x8)
+#define MV_GPIO_DATAIN_POLARITY_REG	(MV_GPIO_REGS_BASE + 0xC)
+#define MV_GPIO_DATAIN_REG		(MV_GPIO_REGS_BASE + 0x10)
+#define MV_GPIO_IRQ_CAUSE_REG		(MV_GPIO_REGS_BASE + 0x14)
+#define MV_GPIO_IRQ_EDGE_MASK_REG      	(MV_GPIO_REGS_BASE + 0x18)
+#define MV_GPIO_IRQ_LEVEL_MASK_REG	(MV_GPIO_REGS_BASE + 0x1C)
+
+
+/*
+ * Unit?: Serial port registers
+ */
+#define MV_UART_REGS_BASE		(0x12000)
+#define MV_UART0_REGS_BASE		(MV_UART_REGS_BASE)
+#define MV_UART1_REGS_BASE		(MV_UART_REGS_BASE + 0x100)
+
+/* Uart0 */
+#define MV_UART0_THR_REG		(MV_UART0_REGS_BASE)
+#define MV_UART0_LSR_REG		(MV_UART0_REGS_BASE + 0x14)
+
+/* Uart1 */
+#define MV_UART1_THR_REG		(MV_UART1_REGS_BASE)
+#define MV_UART1_LSR_REG		(MV_UART1_REGS_BASE + 0x14)
+
+
+/*
+ * Unit?: TWSI registers
+ */
+#define MV_TWSI_REGS_BASE		(0x11000)
+#define MV_TWSI_REGS_SIZE		(0x20)
+
+
+/*
+ * Unit2: CPU registers (AHB to MBUS bridge registers)
+ */
+#define MV_CPU_REGS_BASE		(0x20000)
+
+#define MV_CPU_WIN_REGS_BASE		(MV_CPU_REGS_BASE + 0x0)
+#define MV_CPU_CTL_REGS_BASE		(MV_CPU_REGS_BASE + 0x100)
+#define MV_CPU_INTR_REGS_BASE		(MV_CPU_REGS_BASE + 0x200)
+#define MV_TIMER_REGS_BASE		(MV_CPU_REGS_BASE + 0x300)
+#define MV_TIMER_REGS_SIZE		(0x28)
+
+/* CPU Address Map registers */
+#define MV_CPU_WIN_CTL_REG(x)		(MV_CPU_WIN_REGS_BASE + (x) * 16 + 0x0)
+
+/* These macros describe WIN(X) control reg bits */
+#define MV_CPU_WIN_ENABLED		(1 << 0)
+#define MV_CPU_WIN_WRPROTECT		(1 << 1)
+#define MV_CPU_WIN_TARGET_MASK		0xf0
+#define MV_CPU_WIN_TARGET_SHIFT		4
+#define MV_CPU_WIN_ATTR_MASK		0xff00
+#define MV_CPU_WIN_ATTR_SHIFT		8
+#define MV_CPU_WIN_SIZE_MASK		0xffff0000
+#define MV_CPU_WIN_SIZE_SHIFT		16
+
+#define MV_CPU_WIN_BASE_REG(x)		(MV_CPU_WIN_REGS_BASE + (x) * 16 + 0x4)
+
+/* These macros describe WIN(X) base reg bits */
+#define MV_CPU_WIN_BASE_MASK		0xffff0000
+#define MV_CPU_WIN_BASE_SHIFT		16
+
+#define MV_CPU_WIN_REMAPL_REG(x)	(MV_CPU_WIN_REGS_BASE + (x) * 16 + 0x8)
+#define MV_CPU_WIN_REMAPH_REG(x)	(MV_CPU_WIN_REGS_BASE + (x) * 16 + 0xc)
+#define MV_CPU_IREG_ADDR_REG		(MV_CPU_WIN_REGS_BASE + 0x80)
+
+/* CPU Control and Status registers */
+#define MV_CPU_CTL_REG			(MV_CPU_CTL_REGS_BASE + 0x0)
+#define MV_CPU_CTL_AND_STATUS_REG	(MV_CPU_CTL_REGS_BASE + 0x4)
+#define MV_CPU_RSTOUTN_MASK_REG		(MV_CPU_CTL_REGS_BASE + 0x8)
+#define MV_CPU_SYS_SOFT_RST_REG		(MV_CPU_CTL_REGS_BASE + 0xc)
+#define MV_CPU_BRIDGE_INT_CAUSE_REG	(MV_CPU_CTL_REGS_BASE + 0x10)
+#define MV_CPU_BRIDGE_INT_MASK_REG    	(MV_CPU_CTL_REGS_BASE + 0x14)
+
+/* CPU irq registers */
+#define MV_IRQ_CAUSE_REG		(MV_CPU_INTR_REGS_BASE + 0x0)
+#define MV_IRQ_MASK_REG			(MV_CPU_INTR_REGS_BASE + 0x4)
+
+/* CPU Timers registers */
+#define MV_TIMER_CTL_REG		(MV_TIMER_REGS_BASE + 0x0)
+#define MV_TIMER_RELOAD_BASE		(MV_TIMER_REGS_BASE + 0x10)
+#define MV_TIMER_COUNTER_BASE		(MV_TIMER_REGS_BASE + 0x14)
+
+/* CPU control and status register constants */
+#define MV_CPU_PCI_DISABLE	(1 << 0)
+
+
+/*
+ * Unit3: PCI registers
+ */
+#define MV_PCI_REGS_BASE		(0x30000)
+
+#define MV_PCI_CS0_BAR_SIZE_REG		(MV_PCI_REGS_BASE + 0xc08)
+#define MV_PCI_CS1_BAR_SIZE_REG		(MV_PCI_REGS_BASE + 0xd08)
+#define MV_PCI_CS2_BAR_SIZE_REG		(MV_PCI_REGS_BASE + 0xc0c)
+#define MV_PCI_CS3_BAR_SIZE_REG		(MV_PCI_REGS_BASE + 0xd0c)
+#define MV_PCI_DEV_CS0_BAR_SIZE_REG	(MV_PCI_REGS_BASE + 0xc10)
+#define MV_PCI_DEV_CS1_BAR_SIZE_REG	(MV_PCI_REGS_BASE + 0xd10)
+#define MV_PCI_DEV_CS2_BAR_SIZE_REG	(MV_PCI_REGS_BASE + 0xd18)
+#define MV_PCI_BOOT_CS_BAR_SIZE_REG	(MV_PCI_REGS_BASE + 0xd18)
+#define MV_PCI_P2P_MEM0_BAR_SIZE_REG	(MV_PCI_REGS_BASE + 0xd1c)
+#define MV_PCI_P2P_IO_BAR_SIZE_REG	(MV_PCI_REGS_BASE + 0xd24)
+#define MV_PCI_EXP_ROM_BAR_SIZE_REG	(MV_PCI_REGS_BASE + 0xd2c)
+
+/* These macros describe BAR_SIZE regs bits */
+#define MV_PCI_BAR_SIZE_MASK		(0xfffff000)
+#define MV_PCI_BAR_SIZE_SHIFT		(12)
+
+#define MV_PCI_BAR_ENABLE_REG		(MV_PCI_REGS_BASE + 0xc3c)
+
+/* These macros describe BAR_ENABLE reg bits */
+#define MV_PCI_CS0_BAR_DISABLE		(1 << 0)
+#define MV_PCI_CS1_BAR_DISABLE		(1 << 1)
+#define MV_PCI_CS2_BAR_DISABLE		(1 << 2)
+#define MV_PCI_CS3_BAR_DISABLE		(1 << 3)
+#define MV_PCI_DEV_CS0_BAR_DISABLE	(1 << 4)
+#define MV_PCI_DEV_CS1_BAR_DISABLE	(1 << 5)
+#define MV_PCI_DEV_CS2_BAR_DISABLE	(1 << 6)
+#define MV_PCI_BOOT_CS_BAR_DISABLE	(1 << 8)
+#define MV_PCI_INTREGS_MEM_BAR_DISABLE	(1 << 9)
+#define MV_PCI_INTREGS_IO_BAR_DISABLE	(1 << 10)
+#define MV_PCI_P2P_MEM0_BAR_DISABLE	(1 << 11)
+#define MV_PCI_P2P_IO_BAR_DISABLE	(1 << 13)
+#define MV_PCI_RESERVED_BAR_ENABLE	~((1 << 7) | (1 << 14) | (1 << 15))
+
+#define MV_PCI_CS0_BAR_REMAP_REG	(MV_PCI_REGS_BASE + 0xc48)
+#define MV_PCI_CS1_BAR_REMAP_REG	(MV_PCI_REGS_BASE + 0xd48)
+#define MV_PCI_CS2_BAR_REMAP_REG	(MV_PCI_REGS_BASE + 0xc4c)
+#define MV_PCI_CS3_BAR_REMAP_REG	(MV_PCI_REGS_BASE + 0xd4c)
+#define MV_PCI_DEV_CS0_BAR_REMAP_REG	(MV_PCI_REGS_BASE + 0xc50)
+#define MV_PCI_DEV_CS1_BAR_REMAP_REG	(MV_PCI_REGS_BASE + 0xd50)
+#define MV_PCI_DEV_CS2_BAR_REMAP_REG	(MV_PCI_REGS_BASE + 0xd58)
+#define MV_PCI_BOOT_CS_BAR_REMAP_REG	(MV_PCI_REGS_BASE + 0xd54)
+#define MV_PCI_P2P_MEM0_BAR_REMAP_REG	(MV_PCI_REGS_BASE + 0xd5c)
+#define MV_PCI_P2P_IO_BAR_REMAP_REG	(MV_PCI_REGS_BASE + 0xd60)
+#define MV_PCI_P2P_IO_BAR_REMAPH_REG	(MV_PCI_REGS_BASE + 0xd6c)
+#define MV_PCI_EXP_ROM_BAR_REMAP_REG	(MV_PCI_REGS_BASE + 0xf38)
+
+#define MV_PCI_DB_SELECT_REG		(MV_PCI_REGS_BASE + 0xc1c)
+
+/* These macros describe DB_SELECT reg bits */
+#define MV_PCI_DB_SELECT_CS0		(0)
+#define MV_PCI_DB_SELECT_CS1		(1)
+#define MV_PCI_DB_SELECT_CS2		(2)
+#define MV_PCI_DB_SELECT_CS3		(3)
+#define MV_PCI_DB_SELECT_DB0_SHIFT	(0)
+#define MV_PCI_DB_SELECT_DB1_SHIFT	(2)
+#define MV_PCI_DB_SELECT_DB2_SHIFT	(4)
+#define MV_PCI_DB_SELECT_DB3_SHIFT	(6)
+
+#define MV_PCI_ADDR_DECODE_CTL_REG	(MV_PCI_REGS_BASE + 0xd3c)
+
+/* These macros describe ADDR_DECODE_CTL reg bits */
+#define MV_PCI_REMAP_WR_DISABLE		(1 << 0)
+#define MV_PCI_REMAP_VPD_HADDR_SHIFT	(8)
+
+#define MV_PCI_ARBITER_CTL_REG		(MV_PCI_REGS_BASE + 0x1d00)
+
+/* These macros describe ARBITER_CTL reg bits */
+#define MV_PCI_ARBITER_BD_ENABLE	(1 << 1)
+#define MV_PCI_ARBITER_BV_SHIFT		3
+#define MV_PCI_ARBITER_BV_MASK		(0x78)
+#define MV_PCI_ARBITER_PD_SHIFT		14
+#define MV_PCI_ARBITER_PD_MASK		(0x1FC000)
+#define MV_PCI_ARBITER_ENABLE		(1 << 31)
+
+#define MV_PCI_P2P_CONFIG_REG		(MV_PCI_REGS_BASE + 0x1d14)
+
+/* These macros decribes P2P_CONFIG reg bits */
+#define MV_PCI_P2P_BUS_NUMBER_SHIFT	16
+#define MV_PCI_P2P_BUS_NUMBER_MASK	(0xff0000)
+#define MV_PCI_P2P_DEV_NUMBER_SHIFT	24
+#define MV_PCI_P2P_DEV_NUMBER_MASK	(0xf000000)
+
+#define MV_PCI_CFG_ADDR_REG		(MV_PCI_REGS_BASE + 0xc78)
+
+/* These macros describes CFG_ADDR reg bits */
+#define MV_PCI_CFG_ADDR_REG_SHIFT	2
+#define MV_PCI_CFG_ADDR_REG_MASK	(0xfc)
+#define MV_PCI_CFG_ADDR_FUNC_SHIFT	8
+#define MV_PCI_CFG_ADDR_FUNC_MASK	(0x700)
+#define MV_PCI_CFG_ADDR_DEV_SHIFT	11
+#define MV_PCI_CFG_ADDR_DEV_MASK	(0xf800)
+#define MV_PCI_CFG_ADDR_BUS_SHIFT	16
+#define MV_PCI_CFG_ADDR_BUS_MASK	(0xff0000)
+#define MV_PCI_CFG_ADDR_CFG_ENABLED	(1 << 31)
+
+#define MV_PCI_CFG_DATA_REG		(MV_PCI_REGS_BASE + 0xc7c)
+
+#define MV_PCI_SERR_MASK_REG		(MV_PCI_REGS_BASE + 0xc28)
+
+/* These macros describes SERR_MASK reg bits */
+#define MV_PCI_SERR_DPERR_ENABLED	(1 << 0)
+#define MV_PCI_SERR_SWRPERR_ENABLED	(1 << 1)
+#define MV_PCI_SERR_SRDPERR_ENABLED	(1 << 2)
+#define MV_PCI_SERR_MIOERR_ENABLED	(1 << 4)
+#define MV_PCI_SERR_MWRPERR_ENABLED	(1 << 5)
+#define MV_PCI_SERR_MRDPERR_ENABLED	(1 << 6)
+#define MV_PCI_SERR_MCTABORT_ENABLED	(1 << 7)
+#define MV_PCI_SERR_MMABORT_ENABLED	(1 << 8)
+#define MV_PCI_SERR_MTABORT_ENABLED	(1 << 9)
+#define MV_PCI_SERR_MDIS_ENABLED	(1 << 10)
+#define MV_PCI_SERR_MRETRY_ENABLED	(1 << 11)
+#define MV_PCI_SERR_MDISCARD_ENABLED	(1 << 12)
+#define MV_PCI_SERR_MUNEXP_ENABLED	(1 << 13)
+#define MV_PCI_SERR_MERRMSG_ENABLED	(1 << 14)
+#define MV_PCI_SERR_SCMABORT_ENABLED	(1 << 16)
+#define MV_PCI_SERR_STBOART_ENABLED	(1 << 17)
+#define MV_PCI_SERR_SCTABORT_ENABLED	(1 << 18)
+#define MV_PCI_SERR_SRDBUF_ENABLED	(1 << 20)
+#define MV_PCI_SERR_ARB_ENABLED		(1 << 21)
+#define MV_PCI_SERR_SRETRY_ENABLED	(1 << 22)
+#define MV_PCI_SERR_SCDESTRD_ENABLED	(1 << 23)
+
+#define MV_PCI_INTR_CAUSE_REG		(MV_PCI_REGS_BASE + 0x1d58)
+
+/* These macros describes PCI_INTR_CAUSE reg bits */
+#define MV_PCI_CAUSE_EV_MASK		(0x00ffffff)
+#define MV_PCI_CAUSE_BIST		(1 << 24)
+#define MV_PCI_CAUSE_PMG		(1 << 25)
+#define MV_PCI_CAUSE_RESET		(1 << 26)
+#define MV_PCI_CAUSE_SEL_MASK		(0xf8000000)
+#define MV_PCI_CAUSE_SEL_SHIFT		(27)
+
+#define MV_PCI_INTR_MASK_REG		(MV_PCI_REGS_BASE + 0x1d5c)
+#define MV_PCI_ERROR_ADDRL_REG		(MV_PCI_REGS_BASE + 0x1d40)
+
+
+/*
+ * Unit6: IDMA controller interface registers
+ */
+
+#define MV_IDMA_REGS_BASE		(0x60000)
+#define MV_IDMA_REGS_SIZE		(0x1000)
+
+#define MV_IDMA_BYTE_COUNT_REG(x)	(MV_IDMA_REGS_BASE + 0x800 + (x) * 4)
+
+/* These macros descriptor IDMA_BYTE_COUNT(x) reg bits */
+#define MV_IDMA_BYTE_COUNT_MASK		0xffffff
+#define MV_IDMA_BYTE_COUNT_SHIFT	0
+#define MV_IDMA_BYTE_COUNT_LEFT		(1 << 30)
+#define MV_IDMA_BYTE_COUNT_OWN		(1 << 31)
+
+#define MV_IDMA_SRC_ADDR_REG(x)		(MV_IDMA_REGS_BASE + 0x810 + (x) * 4)
+#define MV_IDMA_DST_ADDR_REG(x)		(MV_IDMA_REGS_BASE + 0x820 + (x) * 4)
+#define MV_IDMA_NEXT_DESC_REG(x)	(MV_IDMA_REGS_BASE + 0x830 + (x) * 4)
+#define MV_IDMA_CUR_DESC_REG(x)		(MV_IDMA_REGS_BASE + 0x870 + (x) * 4)
+
+/* IDMA address map registers */
+#define MV_IDMA_WIN_REGS_BASE		(MV_IDMA_REGS_BASE + 0xA00)
+
+#define MV_IDMA_WIN_BASE_REG(x)		(MV_IDMA_WIN_REGS_BASE + (x) * 8)
+
+/* These macros describe IDMA_WIN_BASE(X) reg bits */
+#define MV_IDMA_WIN_TARGET_MASK		0xf
+#define MV_IDMA_WIN_TARGET_SHIFT	0
+#define MV_IDMA_WIN_ATTR_MASK		0xff00
+#define MV_IDMA_WIN_ATTR_SHIFT		8
+#define MV_IDMA_WIN_BASE_MASK		0xffff0000
+#define MV_IDMA_WIN_BASE_SHIFT		16
+
+#define MV_IDMA_WIN_SIZE_REG(x)		(MV_IDMA_WIN_REGS_BASE + (x) * 8 + 0x4)
+#define MV_IDMA_WIN_SIZE_MASK		0xffff0000
+#define MV_IDMA_WIN_SIZE_SHIFT		16
+
+#define MV_IDMA_WIN_REMAP_REG(x)	(MV_IDMA_WIN_REGS_BASE + 0x60 + (x) * 4)
+#define MV_IDMA_WIN_BARE_REG		(MV_IDMA_WIN_REGS_BASE + 0x80)
+
+/* These macros describe IDMA_WIN_BARE(X) reg bits */
+#define MV_IDMA_WIN_BARE_WIN_ENABLED(x)	(1 << (x))
+
+#define MV_IDMA_WIN_CHAP_REG(x)		(MV_IDMA_WIN_REGS_BASE + 0x70)
+
+/* These macros describe IDMA_WIN_EPAP(X) reg bits */
+#define MV_IDMA_WIN_CHAP_NOACCESS	0x0
+#define MV_IDMA_WIN_CHAP_RO		0x1
+#define MV_IDMA_WIN_CHAP_RESERVED	0x2
+#define MV_IDMA_WIN_CHAP_RW		0x3
+#define MV_IDMA_WIN_CHAP_CHANSHIFT(x)	((x) * 2)
+#define MV_IDMA_WIN_CHAP_CHANMASK(x)	(0x3 << ((x) * 2))
+
+#define MV_IDMA_CTRL_REG(x)		(MV_IDMA_REGS_BASE + 0x840 + (x) * 4)
+
+/* These macros describe IDMA_CTRL_REG(x) reg bits */
+#define MV_IDMA_CTRL_DBURST_MAX_8B	0x0
+#define MV_IDMA_CTRL_DBURST_MAX_16B	0x1
+#define MV_IDMA_CTRL_DBURST_MAX_32B	0x3
+#define MV_IDMA_CTRL_DBURST_MAX_64B	0x7
+#define MV_IDMA_CTRL_DBURST_MAX_128B	0x4
+#define MV_IDMA_CTRL_DBURST_MAX_MASK	0x7
+#define MV_IDMA_CTRL_SRC_HOLD		(1 << 3)
+#define MV_IDMA_CTRL_DST_HOLD		(1 << 5)
+#define MV_IDMA_CTRL_SBURST_MAX_8B	(0x0 << 6)
+#define MV_IDMA_CTRL_SBURST_MAX_16B	(0x1 << 6)
+#define MV_IDMA_CTRL_SBURST_MAX_32B	(0x3 << 6)
+#define MV_IDMA_CTRL_SBURST_MAX_64B	(0x7 << 6)
+#define MV_IDMA_CTRL_SBURST_MAX_128B	(0x4 << 6)
+#define MV_IDMA_CTRL_SBURST_MAX_MASK	(0x7 << 6)
+#define MV_IDMA_CTRL_NON_CHAINED_MODE	(1 << 9)
+#define MV_IDMA_CTRL_INTERRUPT_LAST	(1 << 10)
+#define MV_IDMA_CTRL_CHANNEL_ENABLED	(1 << 12)
+#define MV_IDMA_CTRL_FETCH_NEXT		(1 << 13)
+#define MV_IDMA_CTRL_CHANNEL_ACTIVITY	(1 << 14)
+#define MV_IDMA_CTRL_CDE_ENABLED	(1 << 17)
+#define MV_IDMA_CTRL_CHAN_ABORT		(1 << 20)
+#define MV_IDMA_CTRL_DESC_MODE_64K	(0 << 31)
+#define MV_IDMA_CTRL_DESC_MODE_16M	(1 << 31)
+#define MV_IDMA_CTRL_RESERVED		(1 << 11)
+
+#define MV_IDMA_CTRLH_REG(x)		(MV_IDMA_REGS_BASE + 0x880 + (x) * 4)
+#define MV_IDMA_CTRLH_RESERVED		0x3
+
+#define MV_IDMA_INTR_CAUSE_REG		(MV_IDMA_REGS_BASE + 0x8C0)
+#define MV_IDMA_INTR_MASK_REG		(MV_IDMA_REGS_BASE + 0x8C4)
+
+/* These macros describe IDMA_INTERRUPT_CAUSE/MASK reg bits */
+#define MV_IDMA_INTR_CHAN_ALL(x)	(0x1f << ((x) * 8))
+#define MV_IDMA_INTR_COMPLETE(x)	(1 << ((x) * 8))
+#define MV_IDMA_INTR_ADDR_MISS(x)	(1 << ((x) * 8 + 1))
+#define MV_IDMA_INTR_ACCESS_PROT(x)	(1 << ((x) * 8 + 2))
+#define MV_IDMA_INTR_WR_PROT(x)		(1 << ((x) * 8 + 3))
+#define MV_IDMA_INTR_CPU_OWN(x)		(1 << ((x) * 8 + 4))
+
+#define MV_IDMA_ERROR_ADDRESS_REG	(MV_IDMA_REGS_BASE + 0x8C8)
+#define MV_IDMA_ERROR_SELECT_REG	(MV_IDMA_REGS_BASE + 0x8CC)
+
+/* These macros describe IDMA_ERROR_SELECT_REG reg bits */
+#define MV_IDMA_ERROR_CHANNEL_GET(x)	((x) * 8)
+#define MV_IDMA_ERROR_ADDR_RESERVED	(0x0)
+#define MV_IDMA_ERROR_ADDR_MISS		(0x1)
+#define MV_IDMA_ERROR_ACCESS_PROT	(0x2)
+#define MV_IDMA_ERROR_WR_PROT		(0x3)
+#define MV_IDMA_ERROR_CPU_OWN		(0x4)
+
+
+/*
+ * Unit7: Gigabit ethernet registers
+ */
+#define MV_ETH_REGS_BASE		(0x70000)
+#define MV_ETH_REGS_SIZE		(0x2000)
+
+#define MV_ETH_PHY_ADDR_REG		(MV_ETH_REGS_BASE + 0x0)
+#define MV_ETH_SMI_REG			(MV_ETH_REGS_BASE + 0x4)
+
+/* These macros describe SMI reg */
+#define ETH_SMI_BUSY			0x10000000
+#define ETH_SMI_READ_VALID		0x08000000
+#define ETH_SMI_OPCODE_WRITE		0
+#define ETH_SMI_OPCODE_READ		0x04000000
+
+#define MV_ETH_UNIT_EUDA_REG		(MV_ETH_REGS_BASE + 0x8)
+#define MV_ETH_UNIT_EUDID_REG		(MV_ETH_REGS_BASE + 0xc)
+
+/* These macros describe EUDID reg */
+#define MV_ETH_UNIT_EUDID_TARGET_SHIFT	(0)
+#define MV_ETH_UNIT_EUDID_ATTR_SHIFT	(4)
+
+
+#define MV_ETH_UNIT_INTERRUPT_CAUSE_REG	(MV_ETH_REGS_BASE + 0x80)
+#define MV_ETH_UNIT_INTERRUPT_MASK_REG	(MV_ETH_REGS_BASE + 0x84)
+
+/* These macros describe Ethernet Unit Interrupt Cause/Mask reg bits */
+#define MV_ETH_UNIT_INT_SUMMARY		(1 << 0)
+#define MV_ETH_UNIT_INT_PARITY_ERROR	(1 << 1)
+#define MV_ETH_UNIT_INT_ADDR_VIOLATION	(1 << 2)
+#define MV_ETH_UNIT_INT_ADDR_NOMATCH	(1 << 3)
+#define MV_ETH_UNIT_INT_SMIDONE		(1 << 4)
+#define MV_ETH_UNIT_INT_COUNTERS_WRAP	(1 << 5)
+#define MV_ETH_UNIT_INT_IADDR_ERROR	(1 << 7)
+
+#define MV_ETH_UNIT_EUEA_REG		(MV_ETH_REGS_BASE + 0x94)
+
+
+/* Ethernet address map registers */
+#define MV_ETH_WIN_REGS_BASE		(MV_ETH_REGS_BASE + 0x200)
+
+#define MV_ETH_WIN_BASE_REG(x)		(MV_ETH_WIN_REGS_BASE + (x) * 8)
+
+/* These macros describe ETH_WIN_BASE(X) reg bits */
+#define MV_ETH_WIN_TARGET_MASK		0xf
+#define MV_ETH_WIN_TARGET_SHIFT		0
+#define MV_ETH_WIN_ATTR_MASK		0xff00
+#define MV_ETH_WIN_ATTR_SHIFT		8
+#define MV_ETH_WIN_BASE_MASK		0xffff0000
+#define MV_ETH_WIN_BASE_SHIFT		16
+
+#define MV_ETH_WIN_SIZE_REG(x)		(MV_ETH_WIN_REGS_BASE + (x) * 8 + 0x4)
+#define MV_ETH_WIN_SIZE_MASK		0xffff0000
+#define MV_ETH_WIN_SIZE_SHIFT		16
+
+#define MV_ETH_WIN_REMAP_REG(x)		(MV_ETH_WIN_REGS_BASE + 0x80 + (x) * 4)
+#define MV_ETH_WIN_BARE_REG		(MV_ETH_WIN_REGS_BASE + 0x90)
+
+/* These macros describe ETH_WIN_BARE(X) reg bits */
+#define MV_ETH_WIN_BARE_WIN_ENABLED(x)	(1 << (x))
+
+#define MV_ETH_WIN_EPAP_REG		(MV_ETH_WIN_REGS_BASE + 0x94)
+
+/* These macros describe ETH_WIN_EPAP(X) reg bits */
+#define MV_ETH_WIN_EPAP_NOACCESS	0x0
+#define MV_ETH_WIN_EPAP_RO		0x1
+#define MV_ETH_WIN_EPAP_RESERVED	0x2
+#define MV_ETH_WIN_EPAP_RW		0x3
+#define MV_ETH_WIN_EPAP_WINSHIFT(x)	((x) * 2)
+#define MV_ETH_WIN_EPAP_WINMASK(x)	(0x3 << ((x) * 2))
+
+
+#define MV_ETH_PORT_CONFIG_REG(port)					\
+			(MV_ETH_REGS_BASE + 0x400 + ((port) << 10))
+
+/* These macros describe Ethernet Port configuration reg (Px_cR) bits */
+#define MV_ETH_UNICAST_NORMAL_MODE		0
+#define MV_ETH_UNICAST_PROMISCUOUS_MODE		(1 << 0)
+#define MV_ETH_DEFAULT_RX_QUEUE_0		0
+#define MV_ETH_DEFAULT_RX_QUEUE_1		(1 << 1)
+#define MV_ETH_DEFAULT_RX_QUEUE_2		(1 << 2)
+#define MV_ETH_DEFAULT_RX_QUEUE_3		((1 << 2) | (1 << 1))
+#define MV_ETH_DEFAULT_RX_QUEUE_4		(1 << 3)
+#define MV_ETH_DEFAULT_RX_QUEUE_5		((1 << 3) | (1 << 1))
+#define MV_ETH_DEFAULT_RX_QUEUE_6		((1 << 3) | (1 << 2))
+#define MV_ETH_DEFAULT_RX_QUEUE_7		((1 << 3) | (1 << 2) | (1 << 1))
+#define MV_ETH_DEFAULT_RX_ARP_QUEUE_0		0
+#define MV_ETH_DEFAULT_RX_ARP_QUEUE_1		(1 << 4)
+#define MV_ETH_DEFAULT_RX_ARP_QUEUE_2		(1 << 5)
+#define MV_ETH_DEFAULT_RX_ARP_QUEUE_3		((1 << 5) | (1 << 4))
+#define MV_ETH_DEFAULT_RX_ARP_QUEUE_4		(1 << 6)
+#define MV_ETH_DEFAULT_RX_ARP_QUEUE_5		((1 << 6) | (1 << 4))
+#define MV_ETH_DEFAULT_RX_ARP_QUEUE_6		((1 << 6) | (1 << 5))
+#define MV_ETH_DEFAULT_RX_ARP_QUEUE_7		((1 << 6) | (1 << 5) | (1 << 4))
+#define MV_ETH_RECEIVE_BC_IF_NOT_IP_OR_ARP	0
+#define MV_ETH_REJECT_BC_IF_NOT_IP_OR_ARP	(1 << 7)
+#define MV_ETH_RECEIVE_BC_IF_IP			0
+#define MV_ETH_REJECT_BC_IF_IP			(1 << 8)
+#define MV_ETH_RECEIVE_BC_IF_ARP		0
+#define MV_ETH_REJECT_BC_IF_ARP			(1 << 9)
+#define MV_ETH_TX_AM_NO_UPDATE_ERROR_SUMMARY	(1 << 12)
+#define MV_ETH_CAPTURE_TCP_FRAMES_DIS		0
+#define MV_ETH_CAPTURE_TCP_FRAMES_EN		(1 << 14)
+#define MV_ETH_CAPTURE_UDP_FRAMES_DIS		0
+#define MV_ETH_CAPTURE_UDP_FRAMES_EN		(1 << 15)
+#define MV_ETH_DEFAULT_RX_TCP_QUEUE_0		0
+#define MV_ETH_DEFAULT_RX_TCP_QUEUE_1		(1 << 16)
+#define MV_ETH_DEFAULT_RX_TCP_QUEUE_2		(1 << 17)
+#define MV_ETH_DEFAULT_RX_TCP_QUEUE_3		((1 << 17) | (1 << 16))
+#define MV_ETH_DEFAULT_RX_TCP_QUEUE_4		(1 << 18)
+#define MV_ETH_DEFAULT_RX_TCP_QUEUE_5		((1 << 18) | (1 << 16))
+#define MV_ETH_DEFAULT_RX_TCP_QUEUE_6		((1 << 18) | (1 << 17))
+#define MV_ETH_DEFAULT_RX_TCP_QUEUE_7		((1 << 18) | (1 << 17) | (1 << 16))
+#define MV_ETH_DEFAULT_RX_UDP_QUEUE_0		0
+#define MV_ETH_DEFAULT_RX_UDP_QUEUE_1		(1 << 19)
+#define MV_ETH_DEFAULT_RX_UDP_QUEUE_2		(1 << 20)
+#define MV_ETH_DEFAULT_RX_UDP_QUEUE_3		((1 << 20) | (1 << 19))
+#define MV_ETH_DEFAULT_RX_UDP_QUEUE_4		((1 << 21)
+#define MV_ETH_DEFAULT_RX_UDP_QUEUE_5		((1 << 21) | (1 << 19))
+#define MV_ETH_DEFAULT_RX_UDP_QUEUE_6		((1 << 21) | (1 << 20))
+#define MV_ETH_DEFAULT_RX_UDP_QUEUE_7		((1 << 21) | (1 << 20) | (1 << 19))
+#define MV_ETH_DEFAULT_RX_BPDU_QUEUE_0		0
+#define MV_ETH_DEFAULT_RX_BPDU_QUEUE_1		(1 << 22)
+#define MV_ETH_DEFAULT_RX_BPDU_QUEUE_2		(1 << 23)
+#define MV_ETH_DEFAULT_RX_BPDU_QUEUE_3		((1 << 23) | (1 << 22))
+#define MV_ETH_DEFAULT_RX_BPDU_QUEUE_4		(1 << 24)
+#define MV_ETH_DEFAULT_RX_BPDU_QUEUE_5		((1 << 24) | (1 << 22))
+#define MV_ETH_DEFAULT_RX_BPDU_QUEUE_6		((1 << 24) | (1 << 23))
+#define MV_ETH_DEFAULT_RX_BPDU_QUEUE_7		((1 << 24) | (1 << 23) | (1 << 22))
+
+
+#define MV_ETH_PORT_CONFIG_EXTEND_REG(port)				\
+			(MV_ETH_REGS_BASE + 0x404 + ((port) << 10))
+
+/* These macros describe Ethernet Port configuration extend reg (Px_cXR) bits*/
+#define MV_ETH_SPAN_BPDU_PACKETS_AS_NORMAL		0
+#define MV_ETH_SPAN_BPDU_PACKETS_TO_RX_QUEUE_7		(1 << 1)
+
+#define MV_ETH_MAC_ADDR_LOW(port)					\
+			(MV_ETH_REGS_BASE + 0x414 + ((port) << 10))
+
+#define MV_ETH_MAC_ADDR_HIGH(port)					\
+			(MV_ETH_REGS_BASE + 0x418 + ((port) << 10))
+
+#define MV_ETH_SDMA_CONFIG_REG(port)					\
+			(MV_ETH_REGS_BASE + 0x41c + ((port) << 10))
+
+/* These macros describe Ethernet Port Sdma configuration reg (SDCR) bits */
+#define MV_ETH_RIFB				(1 << 0)
+#define MV_ETH_RX_BURST_SIZE_1_64BIT		0
+#define MV_ETH_RX_BURST_SIZE_2_64BIT		(1 << 1)
+#define MV_ETH_RX_BURST_SIZE_4_64BIT		(1 << 2)
+#define MV_ETH_RX_BURST_SIZE_8_64BIT		((1 << 2) | (1 << 1))
+#define MV_ETH_RX_BURST_SIZE_16_64BIT		(1 << 3)
+#define MV_ETH_BLM_RX_NO_SWAP			(1 << 4)
+#define MV_ETH_BLM_RX_BYTE_SWAP			0
+#define MV_ETH_BLM_TX_NO_SWAP			(1 << 5)
+#define MV_ETH_BLM_TX_BYTE_SWAP			0
+#define MV_ETH_DESCRIPTORS_BYTE_SWAP		(1 << 6)
+#define MV_ETH_DESCRIPTORS_NO_SWAP		0
+#define MV_ETH_TX_BURST_SIZE_1_64BIT		0
+#define MV_ETH_TX_BURST_SIZE_2_64BIT		(1 << 22)
+#define MV_ETH_TX_BURST_SIZE_4_64BIT		(1 << 23)
+#define MV_ETH_TX_BURST_SIZE_8_64BIT		((1 << 23) | (1 << 22))
+#define MV_ETH_TX_BURST_SIZE_16_64BIT		(1 << 24)
+
+#define	MV_ETH_IPG_INT_RX(value)		((value & 0x3fff) << 8)
+
+#define MV_ETH_PORT_SERIAL_CONTROL_REG(port)				\
+			(MV_ETH_REGS_BASE + 0x43c + ((port) << 10))
+
+/* These macros describe Ethernet Port serial control reg (PSCR) bits */
+#define MV_ETH_SERIAL_PORT_DISABLE		0
+#define MV_ETH_SERIAL_PORT_ENABLE		(1 << 0)
+#define MV_ETH_FORCE_LINK_PASS			(1 << 1)
+#define MV_ETH_DO_NOT_FORCE_LINK_PASS		0
+#define MV_ETH_ENABLE_AUTO_NEG_FOR_DUPLX	0
+#define MV_ETH_DISABLE_AUTO_NEG_FOR_DUPLX	(1 << 2)
+#define MV_ETH_ENABLE_AUTO_NEG_FOR_FLOW_CTRL	0
+#define MV_ETH_DISABLE_AUTO_NEG_FOR_FLOW_CTRL	(1 << 3)
+#define MV_ETH_ADV_NO_FLOW_CTRL			0
+#define MV_ETH_ADV_SYMMETRIC_FLOW_CTRL		(1 << 4)
+#define MV_ETH_FORCE_FC_MODE_NO_PAUSE_DIS_TX	0
+#define MV_ETH_FORCE_FC_MODE_TX_PAUSE_DIS	(1 << 5)
+#define MV_ETH_FORCE_BP_MODE_NO_JAM		0
+#define MV_ETH_FORCE_BP_MODE_JAM_TX		(1 << 7)
+#define MV_ETH_FORCE_BP_MODE_JAM_TX_ON_RX_ERR	(1 << 8)
+#define MV_ETH_SERIAL_PORT_CONTROL_RESERVED	(1 << 9)
+#define MV_ETH_FORCE_LINK_FAIL			0
+#define MV_ETH_DO_NOT_FORCE_LINK_FAIL		(1 << 10)
+#define MV_ETH_RETRANSMIT_16_ATTEMPTS		0
+#define MV_ETH_RETRANSMIT_FOREVER		(1 << 11)
+#define MV_ETH_DISABLE_AUTO_NEG_SPEED_GMII	(1 << 13)
+#define MV_ETH_ENABLE_AUTO_NEG_SPEED_GMII	0
+#define MV_ETH_DTE_ADV_0			0
+#define MV_ETH_DTE_ADV_1			(1 << 14)
+#define MV_ETH_DISABLE_AUTO_NEG_BYPASS		0
+#define MV_ETH_ENABLE_AUTO_NEG_BYPASS		(1 << 15)
+#define MV_ETH_AUTO_NEG_NO_CHANGE		0
+#define MV_ETH_RESTART_AUTO_NEG			(1 << 16)
+#define MV_ETH_MAX_RX_PACKET_1518BYTE		0
+#define MV_ETH_MAX_RX_PACKET_1522BYTE		(1 << 17)
+#define MV_ETH_MAX_RX_PACKET_1552BYTE		(1 << 18)
+#define MV_ETH_MAX_RX_PACKET_9022BYTE		((1 << 18) | (1 << 17))
+#define MV_ETH_MAX_RX_PACKET_9192BYTE		(1 << 19)
+#define MV_ETH_MAX_RX_PACKET_9700BYTE		((1 << 19) | (1 << 17))
+#define MV_ETH_SET_EXT_LOOPBACK			(1 << 20)
+#define MV_ETH_CLR_EXT_LOOPBACK			0
+#define MV_ETH_SET_FULL_DUPLEX_MODE		(1 << 21)
+#define MV_ETH_SET_HALF_DUPLEX_MODE		0
+#define MV_ETH_ENABLE_FLOW_CTRL_TX_RX_IN_FULL_DUPLEX (1 << 22)
+#define MV_ETH_DISABLE_FLOW_CTRL_TX_RX_IN_FULL_DUPLEX 0
+#define MV_ETH_SET_GMII_SPEED_TO_10_100		0
+#define MV_ETH_SET_GMII_SPEED_TO_1000		(1 << 23)
+#define MV_ETH_SET_MII_SPEED_TO_10		0
+#define MV_ETH_SET_MII_SPEED_TO_100		(1 << 24)
+
+#define MV_ETH_MAX_RX_PACKET_MASK		(0x7 << 17)
+
+
+#define MV_ETH_PORT_STATUS_REG(port)		(0x2444 + (port<<10))
+
+/* These macros describe Ethernet Port Status reg (PS) bits */
+#define MV_ETH_PORT_STATUS_MODE_10_BIT		(1 << 0)
+#define MV_ETH_PORT_STATUS_LINK_UP		(1 << 1)
+#define MV_ETH_PORT_STATUS_FULL_DUPLEX		(1 << 2)
+#define MV_ETH_PORT_STATUS_FLOW_CONTROL		(1 << 3)
+#define MV_ETH_PORT_STATUS_GMII_1000		(1 << 4)
+#define MV_ETH_PORT_STATUS_MII_100		(1 << 5)
+/* PS bit 6 is undocumented */
+#define MV_ETH_PORT_STATUS_TX_IN_PROGRESS	(1 << 7)
+#define MV_ETH_PORT_STATUS_AUTONEG_BYPASSED	(1 << 8)
+#define MV_ETH_PORT_STATUS_PARTITION		(1 << 9)
+#define MV_ETH_PORT_STATUS_TX_FIFO_EMPTY	(1 << 10)
+/* PS bits 11-31 are reserved */
+
+
+#define MV_ETH_TRANSMIT_QUEUE_COMMAND_REG(port)				\
+			(MV_ETH_REGS_BASE + 0x448 + ((port) << 10))
+
+
+/* the following register is not documented */
+#define MV_ETH_MAXIMUM_TRANSMIT_UNIT(port)				\
+			(MV_ETH_REGS_BASE + 0x458 + ((port) << 10))
+
+
+#define MV_ETH_INTERRUPT_CAUSE_REG(port)				\
+			(MV_ETH_REGS_BASE + 0x460 + ((port) << 10))
+
+/* These macros describe Ethernet Interrupt Cause/Mask reg bits */
+#define MV_ETH_INT_RX_BUFFER			(1 << 0)
+#define MV_ETH_INT_EXTEND			(1 << 1)
+#define MV_ETH_INT_RX_BUFFER_0			(1 << 2)
+#define MV_ETH_INT_RX_BUFFER_1			(1 << 3)
+#define MV_ETH_INT_RX_BUFFER_2			(1 << 4)
+#define MV_ETH_INT_RX_BUFFER_3			(1 << 5)
+#define MV_ETH_INT_RX_NOBUFFER_0		(1 << 11)
+#define MV_ETH_INT_TX_END			(1 << 19)
+#define MV_ETH_INT_SUMMARY			(1 << 31)
+
+
+#define MV_ETH_INTERRUPT_CAUSE_EXTEND_REG(port)				\
+			(MV_ETH_REGS_BASE + 0x464 + ((port) << 10))
+
+/* These macros describe Ethernet Interrupt Extended Cause/Mask reg bits */
+#define MV_ETH_INTEXT_TX_BUFFER			(1 << 0)
+#define MV_ETH_INTEXT_TX_ERROR			(1 << 8)
+#define MV_ETH_INTEXT_PHYSTC			(1 << 16)
+#define MV_ETH_INTEXT_RX_OVERRUN		(1 << 18)
+#define MV_ETH_INTEXT_TX_UNDERRUN		(1 << 19)
+#define MV_ETH_INTEXT_LINK_CHANGE		(1 << 20)
+#define MV_ETH_INTEXT_INTERNAL_ADDR_ERROR	(1 << 23)
+#define MV_ETH_INTEXT_SUMMARY			(1 << 31)
+
+
+#define MV_ETH_INTERRUPT_MASK_REG(port)					\
+			(MV_ETH_REGS_BASE + 0x468 + ((port) << 10))
+
+#define MV_ETH_INTERRUPT_EXTEND_MASK_REG(port)				\
+			(MV_ETH_REGS_BASE + 0x46c + ((port) << 10))
+
+#define MV_ETH_RX_CURRENT_QUEUE_DESC_PTR_0(port)			\
+			(MV_ETH_REGS_BASE + 0x60c + ((port) << 10))
+
+#define MV_ETH_RX_CURRENT_QUEUE_DESC_PTR_1(port)			\
+			(MV_ETH_REGS_BASE + 0x61c + ((port) << 10))
+
+#define MV_ETH_RX_CURRENT_QUEUE_DESC_PTR_2(port)			\
+			(MV_ETH_REGS_BASE + 0x62c + ((port) << 10))
+
+#define MV_ETH_RX_CURRENT_QUEUE_DESC_PTR_3(port)			\
+			(MV_ETH_REGS_BASE + 0x63c + ((port) << 10))
+
+#define MV_ETH_RX_CURRENT_QUEUE_DESC_PTR_4(port)			\
+			(MV_ETH_REGS_BASE + 0x64c + ((port) << 10))
+
+#define MV_ETH_RX_CURRENT_QUEUE_DESC_PTR_5(port)			\
+			(MV_ETH_REGS_BASE + 0x65c + ((port) << 10))
+
+#define MV_ETH_RX_CURRENT_QUEUE_DESC_PTR_6(port)			\
+			(MV_ETH_REGS_BASE + 0x66c + ((port) << 10))
+
+#define MV_ETH_RX_CURRENT_QUEUE_DESC_PTR_7(port)			\
+			(MV_ETH_REGS_BASE + 0x67c + ((port) << 10))
+
+
+#define MV_ETH_RECEIVE_QUEUE_COMMAND_REG(port)				\
+			(MV_ETH_REGS_BASE + 0x680 + ((port) << 10))
+
+/* These macros describe Ethernet Interrupt Extended Cause/Mask reg bits */
+#define MV_ETH_RECEIVE_QUEUE_ENABLE_0	(1 << 0)
+#define MV_ETH_RECEIVE_QUEUE_ENABLE_1	(1 << 1)
+#define MV_ETH_RECEIVE_QUEUE_ENABLE_2	(1 << 2)
+#define MV_ETH_RECEIVE_QUEUE_ENABLE_3	(1 << 3)
+
+#define MV_ETH_RECEIVE_QUEUE_DISABLE_0	(1 << 8)
+#define MV_ETH_RECEIVE_QUEUE_DISABLE_1	(1 << 9)
+#define MV_ETH_RECEIVE_QUEUE_DISABLE_2	(1 << 10)
+#define MV_ETH_RECEIVE_QUEUE_DISABLE_3	(1 << 11)
+
+
+#define MV_ETH_TX_CURRENT_QUEUE_DESC_PTR_0(port)			\
+			(MV_ETH_REGS_BASE + 0x6c0 + ((port) << 10))
+
+#define MV_ETH_TX_CURRENT_QUEUE_DESC_PTR_1(port)			\
+			(MV_ETH_REGS_BASE + 0x6c4 + ((port) << 10))
+
+#define MV_ETH_TX_CURRENT_QUEUE_DESC_PTR_2(port)			\
+			(MV_ETH_REGS_BASE + 0x6c8 + ((port) << 10))
+
+#define MV_ETH_TX_CURRENT_QUEUE_DESC_PTR_3(port)			\
+			(MV_ETH_REGS_BASE + 0x6cc + ((port) << 10))
+
+#define MV_ETH_TX_CURRENT_QUEUE_DESC_PTR_4(port)			\
+			(MV_ETH_REGS_BASE + 0x6d0 + ((port) << 10))
+
+#define MV_ETH_TX_CURRENT_QUEUE_DESC_PTR_5(port)			\
+			(MV_ETH_REGS_BASE + 0x6d4 + ((port) << 10))
+
+#define MV_ETH_TX_CURRENT_QUEUE_DESC_PTR_6(port)			\
+			(MV_ETH_REGS_BASE + 0x6d8 + ((port) << 10))
+
+#define MV_ETH_TX_CURRENT_QUEUE_DESC_PTR_7(port)			\
+			(MV_ETH_REGS_BASE + 0x6dc + ((port) << 10))
+
+#define MV_ETH_MIB_COUNTERS_BASE(port)					\
+			(MV_ETH_REGS_BASE + 0x1000 + ((port) << 7))
+
+#define MV_ETH_DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE(port)		\
+			(MV_ETH_REGS_BASE + 0x1400 + ((port) << 10))
+
+#define MV_ETH_DA_FILTER_OTHER_MULTICAST_TABLE_BASE(port)		\
+			(MV_ETH_REGS_BASE + 0x1500 + ((port) << 10))
+
+#define MV_ETH_DA_FILTER_UNICAST_TABLE_BASE(port)			\
+			(MV_ETH_REGS_BASE + 0x1600 + ((port) << 10))
+
+
+#define MV_ETH_TX_FIFO_URGENT_THRESHOLD_REG(port)			\
+			(MV_ETH_REGS_BASE + 0x474 + ((port) << 10))
+
+
+/*
+ * Unit?: TDM/SPI control registers
+ */
+
+/* TDM-to-Mbus Bridge Registers */
+#define MV_TDM_MBUS_REGS_BASE		(0xB4000)
+#define MV_TDM_SPI_MUX_REG		(MV_TDM_MBUS_REGS_BASE)
+#define MV_TDM_MISC_REG			(MV_TDM_MBUS_REGS_BASE + 0x70)
+ 
+#define MV_TDM_SPI_MUX_DISABLE		1
+#define MV_TDM_SPI_MUX_ENABLE		0
+
+/* TDM Control Registers */
+#define MV_TDM_REGS_BASE		(0xB0000)
+#define MV_TDM_PCM_CTRL_REG		(MV_TDM_REGS_BASE + 0x00)
+#define MV_TDM_TIMESLOT_CTRL_REG	(MV_TDM_REGS_BASE + 0x04)
+#define MV_TDM_CHANNEL1_ENABLE		(MV_TDM_REGS_BASE + 0x10)
+#define MV_TDM_CHANNEL2_ENABLE		(MV_TDM_REGS_BASE + 0x20)
+#define MV_TDM_FRAME_TIMESLOT_REG	(MV_TDM_REGS_BASE + 0x38)
+#define MV_TDM_PCM_CLK_RATE_DIV_REG	(MV_TDM_REGS_BASE + 0x3C)
+#define MV_TDM_INT_EVENT_MASK_REG	(MV_TDM_REGS_BASE + 0x40)
+#define MV_TDM_INT_STATUS_MASK_REG	(MV_TDM_REGS_BASE + 0x48)
+#define MV_TDM_INT_RESET_SELECT_REG	(MV_TDM_REGS_BASE + 0x4C)
+#define MV_TDM_INT_STATUS_REG		(MV_TDM_REGS_BASE + 0x50)
+#define MV_TDM_DUMMY_RX_WRITE_DATA_REG	(MV_TDM_REGS_BASE + 0x54)
+#define MV_TDM_MISC_CTRL_REG		(MV_TDM_REGS_BASE + 0x58)
+#define MV_TDM_TESTBUS_MUX_SELECT_REG	(MV_TDM_REGS_BASE + 0x5C)
+#define MV_TDM_CUR_TIMESLOT_REG		(MV_TDM_REGS_BASE + 0x70)
+#define MV_TDM_REV_REG			(MV_TDM_REGS_BASE + 0x74)
+#define MV_TDM_DMA_ABORT_ADDR_REG	(MV_TDM_REGS_BASE + 0x80)
+#define MV_TDM_DMA_ABORT_INFO_REG	(MV_TDM_REGS_BASE + 0x84)
+ 
+/* MV_TDM_PCM_CLK_RATE_DIV_REG bits */
+#define MV_TDM_PCM_256KHZ		(1 << 0)
+#define MV_TDM_PCM_512KHZ		(1 << 1)
+#define MV_TDM_PCM_1024KHZ		(1 << 2)
+#define MV_TDM_PCM_2048KHZ		(1 << 3)
+#define MV_TDM_PCM_4096KHZ		(1 << 4)
+#define MV_TDM_PCM_8192KHZ		(1 << 5)
+
+/* MV_TDM_FRAME_TIMESLOT_REG bits */
+#define MV_TDM_TIMESLOTS_4		(1 << 2)
+#define MV_TDM_TIMESLOTS_8		(1 << 3)
+#define MV_TDM_TIMESLOTS_16		(1 << 4)
+#define MV_TDM_TIMESLOTS_32		(1 << 5)
+#define MV_TDM_TIMESLOTS_64		(1 << 6)
+#define MV_TDM_TIMESLOTS_128		(1 << 7)
+
+/* MV_TDM_TIMESLOT_CTRL_REG bits */
+#define MV_TDM_CH0_RX_SLOT_OFFS		0
+#define MV_TDM_CH0_TX_SLOT_OFFS		8
+#define MV_TDM_CH1_RX_SLOT_OFFS		16
+#define MV_TDM_CH1_TX_SLOT_OFFS		24
+
+/* MV_TDM_PCM_CTRL_REG bits */
+#define MV_TDM_MASTER_PCLK_TDM		0
+#define MV_TDM_MASTER_PCLK_EXTERNAL	(1 << 0)
+#define MV_TDM_MASTER_FS_TDM		0
+#define MV_TDM_MASTER_FS_EXTERNAL	(1 << 1)
+#define MV_TDM_DATA_POLAR_NEG		0
+#define MV_TDM_DATA_POLAR_POS		(1 << 2)
+#define MV_TDM_FS_POLAR_NEG		0
+#define MV_TDM_FS_POLAR_POS		(1 << 3)
+#define MV_TDM_INVERT_FS_HI		0
+#define MV_TDM_INVERT_FS_LO		(1 << 4)
+#define MV_TDM_FS_TYPE_SHORT		0
+#define MV_TDM_FS_TYPE_LONG		(1 << 5)
+#define MV_TDM_PCM_SAMPLE_SIZE_1	0
+#define MV_TDM_PCM_SAMPLE_SIZE_2	(1 << 6)
+#define MV_TDM_CH_DELAY_DISABLE		0
+#define MV_TDM_CH_DELAY_ENABLE		(3 << 8)
+#define MV_TDM_CH_QUALITY_DISABLE	0
+#define MV_TDM_CH_QUALITY_ENABLE	(3 << 10)
+#define MV_TDM_QUALITY_POLARITY_NEG	0
+#define MV_TDM_QUALITY_POLARITY_POS	(1 << 12)
+#define MV_TDM_QUALITY_TYPE_TIME_SLOT	0
+#define MV_TDM_QUALITY_TYPE_MSB		(3 << 13)
+#define MV_TDM_CS_CTRL_DONT_CARE	0
+#define MV_TDM_CS_CTRL_0		0
+#define MV_TDM_CS_CTRL_1		(1 << 15)
+#define MV_TDM_WIDEBAND_OFF		0
+#define MV_TDM_WIDEBAND_ON		(1 << 16)
+#define MV_TDM_PERF_GBUS_ONE_ACCESS	0
+#define MV_TDM_PERF_GBUS_TWO_ACCESS	(1 << 31)
+
+/* MV_TDM_MISC_CTRL_REG bits */
+#define MV_TDM_MISC_RESET_BIT		(1 << 0)
+
+/* MV_TDM_INT_RESET_SELECT_REG */
+#define MV_TDM_CLEAR_INT_ON_ZERO	0
+#define MV_TDM_CLEAR_INT_ON_READ	(1 << 0)
+
+/* SPI Control Registers */
+#define MV_SPI_REGS_BASE		(0xB3100)
+#define MV_SPI_CLK_PRESCALAR_REG	(MV_SPI_REGS_BASE + 0x00)
+#define MV_SPI_GLOBAL_CTRL_REG		(MV_SPI_REGS_BASE + 0x04)
+#define MV_SPI_CTRL_REG			(MV_SPI_REGS_BASE + 0x08)
+#define MV_SPI_MICRO_WIRE_CTRL_REG	(MV_SPI_REGS_BASE + 0x0C)
+#define MV_SPI_TWSI_CTRL_REG		(MV_SPI_REGS_BASE + 0x10)
+#define MV_SPI_MV_STATUS_REG		(MV_SPI_REGS_BASE + 0x1C)
+#define MV_SPI_INT_MASK_REG		(MV_SPI_REGS_BASE + 0x20)
+#define MV_SPI_DATA_REG			(MV_SPI_REGS_BASE + 0x24)
+#define MV_SPI_ADDR_REG			(MV_SPI_REGS_BASE + 0x28)
+#define MV_SPI_CODEC_CMD_LO_REG		(MV_SPI_REGS_BASE + 0x30)
+#define MV_SPI_CODEC_CMD_HI_REG		(MV_SPI_REGS_BASE + 0x34)
+#define MV_SPI_CODEC_CTRL_REG		(MV_SPI_REGS_BASE + 0x38)
+#define MV_SPI_CODEC_READ_DATA_REG	(MV_SPI_REGS_BASE + 0x3C)
+
+#define MV_SPI_REGS_SIZE		0x40
+
+/* MV_SPI_CTRL_REG bits */
+#define MV_SPI_STAT_MASK		(1 << 10)
+#define MV_SPI_READY			0
+#define MV_SPI_ACTIVE			(1 << 10)
+
+/* MV_SPI_GLOBAL_CTRL_REG bits */
+#define MV_SPI_GLOBAL_DISABLE		0
+#define MV_SPI_GLOBAL_ENABLE		1
+
+/* MV_SPI_CODEC_CTRL_REG bits */
+#define MV_SPI_TRANSFER_BYTES(count)	(count - 1)
+#define MV_SPI_ENDIANESS_MSB_MODE	0
+#define MV_SPI_ENDIANESS_LSB_MODE	(1 << 2)
+#define MV_SPI_WR_MODE			0
+#define MV_SPI_RD_MODE			(1 << 3)
+#define MV_SPI_READ_BYTES(count)	((count - 1) << 4)
+#define MV_SPI_CLK_SPEED_LO_DIV		0
+#define MV_SPI_CLK_SPEED_HI_DIV		(1 << 5)
+#define MV_SPI_CS_HI_CNT_VAL_RD_OFFS	6
+#define MV_SPI_CS_HI_CNT_VAL_RD_MASK	(0xFF3 << 6)
+
+/*
+ * device controller registers
+ */
+#define MV_DEVICE_CONTROLLER_BASE	0x1045c
+#define MV_DEVICE_BANK0_PARAM_REG	(MV_DEVICE_CONTROLLER_BASE + 0x0)
+#define MV_DEVICE_BANK1_PARAM_REG	(MV_DEVICE_CONTROLLER_BASE + 0x4)
+#define MV_DEVICE_BANK2_PARAM_REG	(MV_DEVICE_CONTROLLER_BASE + 0x8)
+
+
+#endif /* __REGS_H */
diff -Nruw linux-2.6.20.14-fbx/include/asm-arm/arch-mv88fxx81./system.h linux-2.6.20.14-fbx/include/asm-arm/arch-mv88fxx81/system.h
--- linux-2.6.20.14-fbx/include/asm-arm/arch-mv88fxx81./system.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.20.14-fbx/include/asm-arm/arch-mv88fxx81/system.h	2010-12-29 19:30:08.431441341 +0100
@@ -0,0 +1,31 @@
+#ifndef __ASM_ARCH_SYSTEM_H
+# define __ASM_ARCH_SYSTEM_H
+
+#include <asm/arch/io.h>
+#include <asm/arch/regs.h>
+
+static inline void arch_idle(void)
+{
+	/*
+	 * This should do all the clock switching
+	 * and wait for interrupt tricks
+	 */
+	cpu_do_idle();
+}
+
+static inline void arch_reset(char mode)
+{
+	unsigned int val;
+
+	printk("Resetting using CPU registers ..\n");
+
+	val = mv_readl(MV_CPU_RSTOUTN_MASK_REG);
+	val |= 0x04;
+	mv_writel(val, MV_CPU_RSTOUTN_MASK_REG);
+
+	val = mv_readl(MV_CPU_SYS_SOFT_RST_REG);
+	val |= 0x1;
+	mv_writel(val, MV_CPU_SYS_SOFT_RST_REG);
+}
+
+#endif /* !__ASM_ARCH_SYSTEM_H */
diff -Nruw linux-2.6.20.14-fbx/include/asm-arm/arch-mv88fxx81./tdm.h linux-2.6.20.14-fbx/include/asm-arm/arch-mv88fxx81/tdm.h
--- linux-2.6.20.14-fbx/include/asm-arm/arch-mv88fxx81./tdm.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.20.14-fbx/include/asm-arm/arch-mv88fxx81/tdm.h	2010-12-29 19:30:08.431441341 +0100
@@ -0,0 +1,16 @@
+
+#ifndef __ASM_ARCH_TDM_H__
+# define __ASM_ARCH_TDM_H__
+
+struct mv_tdm_data
+{
+	uint32_t tdm_dev_mux;
+};
+
+void mv_spi_writeb(unsigned char data);
+unsigned char mv_spi_readb(unsigned char addr);
+unsigned short mv_spi_readw(unsigned char addr);
+
+void mv_tdm_init(struct mv_tdm_data *data);
+
+#endif /* !__ASM_ARCH_TDM_H__ */
diff -Nruw linux-2.6.20.14-fbx/include/asm-arm/arch-mv88fxx81./timer.h linux-2.6.20.14-fbx/include/asm-arm/arch-mv88fxx81/timer.h
--- linux-2.6.20.14-fbx/include/asm-arm/arch-mv88fxx81./timer.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.20.14-fbx/include/asm-arm/arch-mv88fxx81/timer.h	2010-12-29 19:30:08.431441341 +0100
@@ -0,0 +1,41 @@
+/*
+ * timer.h for freebox
+ * Created by <nschichan@corp.free.fr> on Tue Sep 19 21:36:35 2006
+ * Freebox SA
+ */
+
+#ifndef __ASM_ARCH_TIMER_H
+# define __ASM_ARCH_TIMER_H
+
+#define TIMER0         0
+#define TIMER1         1
+#define TIMERWDT       2
+
+#define NUM_TIMER      3
+
+int timer_autoreload(int timer, int reload);
+int timer_enable(int timer, int enable);
+int timer_set_reload_value(int timer, unsigned int reload_value);
+int timer_load_counter(int timer, unsigned int reload_value);
+unsigned int timer_get_counter(int timer);
+
+void timer_ack_interrupt(int timer);
+void timer_mask_interrupt(int timer);
+void timer_unmask_interrupt(int timer);
+
+/* MV_MPP_SAMPLE_AT_RESET_REG register content */
+#define TCLK_SHIFT	8
+#define TCLK_MASK	0x3
+
+#define TCLK_133	0x0
+#define TCLK_150	0x1
+#define TCLK_166	0x2
+
+#define ARMDDRCLK_SHIFT	4
+#define ARMDDRCLK_MASK	0xf
+
+unsigned int tclk_get_rate(void);
+unsigned int ddrclk_get_rate(void);
+unsigned int armclk_get_rate(void);
+
+#endif /* !__ASM_ARCH_TIMER_H */
diff -Nruw linux-2.6.20.14-fbx/include/asm-arm/arch-mv88fxx81./timex.h linux-2.6.20.14-fbx/include/asm-arm/arch-mv88fxx81/timex.h
--- linux-2.6.20.14-fbx/include/asm-arm/arch-mv88fxx81./timex.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.20.14-fbx/include/asm-arm/arch-mv88fxx81/timex.h	2010-12-29 19:30:08.431441341 +0100
@@ -0,0 +1,23 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+/*
+ * timex.h for mv kernel
+ * Created by <nschichan@corp.free.fr> on Tue Sep 19 11:59:49 2006
+ * Freebox SA
+ */
+
+#define CLOCK_TICK_RATE 500000000
diff -Nruw linux-2.6.20.14-fbx/include/asm-arm/arch-mv88fxx81./uncompress.h linux-2.6.20.14-fbx/include/asm-arm/arch-mv88fxx81/uncompress.h
--- linux-2.6.20.14-fbx/include/asm-arm/arch-mv88fxx81./uncompress.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.20.14-fbx/include/asm-arm/arch-mv88fxx81/uncompress.h	2010-12-29 19:30:08.431441341 +0100
@@ -0,0 +1,29 @@
+/*
+ * uncompress.h for freeboxo
+ * Created by <nschichan@corp.free.fr> on Tue Sep 19 14:46:27 2006
+ * Freebox SA
+ */
+
+#include <asm/arch/io.h>
+#include <asm/arch/regs.h>
+
+#define wait_uart() do { \
+ while ((mv_readb(MV_UART0_LSR_REG) & 0x20) == 0) ; \
+} while (0)
+
+static void
+putc(int c)
+{
+	wait_uart();
+	mv_writeb(c, MV_UART0_THR_REG);
+}
+
+static inline void flush(void)
+{
+}
+
+/*
+ * nothing to do
+ */
+#define arch_decomp_setup()
+#define arch_decomp_wdog()
diff -Nruw linux-2.6.20.14-fbx/include/asm-arm/arch-mv88fxx81./vmalloc.h linux-2.6.20.14-fbx/include/asm-arm/arch-mv88fxx81/vmalloc.h
--- linux-2.6.20.14-fbx/include/asm-arm/arch-mv88fxx81./vmalloc.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.20.14-fbx/include/asm-arm/arch-mv88fxx81/vmalloc.h	2010-12-29 19:30:08.431441341 +0100
@@ -0,0 +1,30 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+/*
+ * Just any arbitrary offset to the start of the vmalloc VM area: the
+ * current 8MB value just means that there will be a 8MB "hole" after the
+ * physical memory until the kernel virtual memory starts.  That means that
+ * any out-of-bounds memory accesses will hopefully be caught.
+ * The vmalloc() routines leaves a hole of 4kB between each vmalloced
+ * area for the same reason. ;)
+ */
+
+#define VMALLOC_OFFSET   (8*1024*1024)
+#define VMALLOC_START    (((unsigned long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
+/* pay notice: we have up to 512M of Dram and Vmalloc area together,
+   the dram comes on the expense of the vmalloc */
+#define VMALLOC_END       (PAGE_OFFSET + 0x20000000)
--- /dev/null	2011-06-03 14:51:38.633053002 +0200
+++ linux-2.6.20.14-fbx/include/linux/crash_zone.h	2010-12-29 19:30:08.681442167 +0100
@@ -0,0 +1,16 @@
+#ifndef CRASH_ZONE_H
+#define CRASH_ZONE_H
+
+#define CRASH_MAGIC	0xa7cecd6a
+
+struct crash_header
+{
+	unsigned int magic;
+	unsigned int len;
+	unsigned short checksum;
+	unsigned char data;
+};
+
+void __init crash_zone_set_param(unsigned char *zone, unsigned int size);
+
+#endif /* ! CRASH_ZONE_H */
--- /dev/null	2011-06-03 14:51:38.633053002 +0200
+++ linux-2.6.20.14-fbx/include/linux/fbxatm.h	2010-12-29 19:30:08.701442239 +0100
@@ -0,0 +1,202 @@
+/*
+ * Generic fbxatm definition, exported to userspace
+ */
+#ifndef LINUX_FBXATM_H_
+#define LINUX_FBXATM_H_
+
+#include <linux/types.h>
+#include <linux/if.h>
+
+#define FBXATM_IOCTL_MAGIC		0xd3
+
+/* allow userspace usage without up to date kernel headers */
+#ifndef PF_FBXATM
+#define PF_FBXATM			32
+#define AF_FBXATM			PF_FBXATM
+#endif
+
+struct fbxatm_vcc_id {
+	int				dev_idx;
+	__u32				vpi;
+	__u32				vci;
+};
+
+enum fbxatm_vcc_user {
+	FBXATM_VCC_USER_NONE = 0,
+	FBXATM_VCC_USER_2684,
+	FBXATM_VCC_USER_2684_RETX,
+	FBXATM_VCC_USER_PPPOA,
+};
+
+enum fbxatm_vcc_traffic_class {
+	FBXATM_VCC_TC_UBR_NO_PCR = 0,
+	FBXATM_VCC_TC_UBR,
+};
+
+#define FBXATM_VCC_MAX_PRIO		7
+
+struct fbxatm_vcc_qos {
+	__u32				traffic_class;
+	__u32				max_sdu;
+	__u32				max_buffered_pkt;
+	__u32				priority;
+};
+
+
+/*
+ * VCC related
+ */
+struct fbxatm_vcc_params {
+	/* ADD/DEL/GET */
+	struct fbxatm_vcc_id		id;
+
+	/* ADD/GET */
+	struct fbxatm_vcc_qos		qos;
+
+	/* GET */
+	enum fbxatm_vcc_user		user;
+};
+
+#define FBXATM_IOCADD		_IOW(FBXATM_IOCTL_MAGIC,	1,	\
+					struct fbxatm_vcc_params)
+
+#define FBXATM_IOCDEL		_IOR(FBXATM_IOCTL_MAGIC,	2,	\
+					struct fbxatm_vcc_params)
+
+#define FBXATM_IOCGET		_IOWR(FBXATM_IOCTL_MAGIC,	3,	\
+					struct fbxatm_vcc_params)
+
+
+struct fbxatm_vcc_drop_params {
+	struct fbxatm_vcc_id		id;
+	unsigned int			drop_count;
+};
+
+#define FBXATM_IOCDROP		_IOWR(FBXATM_IOCTL_MAGIC,	5,	\
+					struct fbxatm_vcc_drop_params)
+
+/*
+ * OAM related
+ */
+enum fbxatm_oam_ping_type {
+	FBXATM_OAM_PING_SEG_F4	= 0,
+	FBXATM_OAM_PING_SEG_F5,
+	FBXATM_OAM_PING_E2E_F4,
+	FBXATM_OAM_PING_E2E_F5,
+};
+
+struct fbxatm_oam_ping_req {
+	/* only dev_idx for F4 */
+	struct fbxatm_vcc_id		id;
+
+	__u8				llid[16];
+	enum fbxatm_oam_ping_type	type;
+};
+
+#define FBXATM_IOCOAMPING	_IOWR(FBXATM_IOCTL_MAGIC,	10,	\
+				      struct fbxatm_oam_ping_req)
+
+
+/*
+ * PPPOA related
+ */
+enum fbxatm_pppoa_encap {
+	FBXATM_EPPPOA_AUTODETECT = 0,
+	FBXATM_EPPPOA_VCMUX,
+	FBXATM_EPPPOA_LLC,
+};
+
+struct fbxatm_pppoa_vcc_params {
+	struct fbxatm_vcc_id		id;
+	__u32				encap;
+	__u32				cur_encap;
+};
+
+#define FBXATM_PPPOA_IOCADD	_IOW(FBXATM_IOCTL_MAGIC,	20,	\
+					struct fbxatm_pppoa_vcc_params)
+
+#define FBXATM_PPPOA_IOCDEL	_IOW(FBXATM_IOCTL_MAGIC,	21,	\
+					struct fbxatm_pppoa_vcc_params)
+
+#define FBXATM_PPPOA_IOCGET	_IOWR(FBXATM_IOCTL_MAGIC,	22,	\
+					struct fbxatm_pppoa_vcc_params)
+
+
+
+/*
+ * 2684 related
+ */
+enum fbxatm_2684_encap {
+	FBXATM_E2684_VCMUX = 0,
+	FBXATM_E2684_LLC,
+};
+
+enum fbxatm_2684_payload {
+	FBXATM_P2684_BRIDGE = 0,
+	FBXATM_P2684_ROUTED,
+};
+
+struct fbxatm_2684_vcc_params {
+	struct fbxatm_vcc_id		id;
+
+	__u32				encap;
+	__u32				payload;
+	char				dev_name[IFNAMSIZ];
+};
+
+
+#define FBXATM_2684_IOCADD	_IOW(FBXATM_IOCTL_MAGIC,	30,	\
+					struct fbxatm_2684_vcc_params)
+
+#define FBXATM_2684_IOCDEL	_IOW(FBXATM_IOCTL_MAGIC,	31,	\
+					struct fbxatm_2684_vcc_params)
+
+#define FBXATM_2684_IOCGET	_IOWR(FBXATM_IOCTL_MAGIC,	32,	\
+					struct fbxatm_2684_vcc_params)
+
+#define MAX_RETX_PORT			8
+
+struct fbxatm_2684_gretx_params {
+	int				enable;
+
+	struct fbxatm_vcc_id		retx_ports_vcc_id[MAX_RETX_PORT];
+	unsigned int			retx_ports_id[MAX_RETX_PORT];
+	unsigned int			retx_port_count;
+
+	__u32				retx_daddr;
+	__u32				retx_saddr;
+	struct fbxatm_vcc_id		retx_tx_vcc_id;
+	struct fbxatm_vcc_id		retx_ack_vcc_id;
+	__u32				rtt_daddr;
+	__u32				rtt_saddr;
+	struct fbxatm_vcc_id		rtt_tx_vcc_id;
+	struct fbxatm_vcc_id		rtt_ack_vcc_id;
+};
+
+#define FBXATM_2684_IOCSET_GRETX	_IOR(FBXATM_IOCTL_MAGIC, 33,	\
+					struct fbxatm_2684_gretx_params)
+
+#define FBXATM_2684_IOCGET_GRETX	_IOWR(FBXATM_IOCTL_MAGIC, 34,	\
+					struct fbxatm_2684_retx_params)
+
+enum fbxatm_2684_retx_action {
+	FBXATM_2684_RETX_IGNORE = 0,
+	FBXATM_2684_RETX_HANDLE,
+};
+
+struct fbxatm_2684_retx_params {
+	int				enable;
+
+	struct fbxatm_vcc_id		id;
+	__u32				action;
+	__u32				stream_id;
+};
+
+
+#define FBXATM_2684_IOCSET_RETX	_IOR(FBXATM_IOCTL_MAGIC,	35,	\
+					struct fbxatm_2684_retx_params)
+
+#define FBXATM_2684_IOCGET_RETX	_IOWR(FBXATM_IOCTL_MAGIC,	36,	\
+					struct fbxatm_2684_retx_params)
+
+#endif /* LINUX_FBXATM_H_ */
--- /dev/null	2011-06-03 14:51:38.633053002 +0200
+++ linux-2.6.20.14-fbx/include/linux/fbxdmamux.h	2010-12-29 19:30:08.701442239 +0100
@@ -0,0 +1,51 @@
+
+#ifndef FBXDMAMUX_H_
+#define FBXDMAMUX_H_
+
+#include <linux/list.h>
+
+/* wether to increment or not the src/dst address while
+ * transferring */
+#define FBXDMAMUX_FLAG_SRC_NO_INCR	(1 << 0)
+#define FBXDMAMUX_FLAG_DST_NO_INCR	(1 << 1)
+
+/* wether to src/dst address are already hw */
+#define FBXDMAMUX_FLAG_SRC_HW		(1 << 2)
+#define FBXDMAMUX_FLAG_DST_HW		(1 << 3)
+
+#define FBXDMAMUX_MAX_PRIO		CONFIG_FREEBOX_DMAMUX_MAX_PRIO
+
+struct fbxdmamux_req
+{
+	/* request with same channel cookie are guaranted to be served
+	 * in order */
+	unsigned char	chan_cookie;
+	/* 0 > fast > medium > slow > 4 */
+	unsigned char	priority;
+	unsigned char	flags;
+
+	void		*virt_src;
+	void		*virt_dst;
+	dma_addr_t	hw_src;
+	dma_addr_t	hw_dst;
+	unsigned int	len;
+
+	void		(*callback)(void *cb_data, int error);
+	void		*cb_data;
+
+	/* fields below are opaque to caller */
+	struct list_head list;
+};
+
+struct fbxdmamux_req *fbxdmamux_req_from_pool(void);
+
+int fbxdmamux_alloc_channel_cookie(void);
+
+int fbxdmamux_submit(struct fbxdmamux_req *req);
+
+int fbxdmamux_submit_and_sleep(struct fbxdmamux_req *req,
+			       unsigned int timeout);
+
+void fbxdmamux_flush_channel(unsigned int cookie);
+
+#endif /* !FBXDMAMUX_H_ */
--- /dev/null	2011-06-03 14:51:38.633053002 +0200
+++ linux-2.6.20.14-fbx/include/linux/fbxgpio_core.h	2010-12-29 19:30:08.701442239 +0100
@@ -0,0 +1,45 @@
+/*
+ * fbxgpio.h for linux-freebox
+ * Created by <nschichan@freebox.fr> on Wed Feb 21 22:09:46 2007
+ * Freebox SA
+ */
+
+#ifndef FBXGPIO_H
+# define FBXGPIO_H
+
+# include <linux/types.h>
+
+/* can change pin direction */
+#define FBXGPIO_PIN_DIR_RW	(1 << 0)
+#define FBXGPIO_PIN_REVERSE_POL	(1 << 1)
+
+struct fbxgpio_operations
+{
+	int  (*get_datain)(int gpio);
+	void (*set_dataout)(int gpio, int val);
+	int  (*get_dataout)(int gpio);
+	void (*set_direction)(int gpio, int dir);
+	int  (*get_direction)(int gpio);
+};
+
+
+struct fbxgpio_pin
+{
+	const char	*pin_name;
+	uint32_t	flags;
+	int		direction;
+	int		pin_num;
+	struct fbxgpio_operations	*ops;
+
+	struct class_device *class_dev;
+	unsigned int	cur_dataout;
+};
+
+
+#define GPIO_DIR_IN	0x1
+#define GPIO_DIR_OUT	0x0
+
+int fbxgpio_register_pin(struct fbxgpio_pin *pin);
+void fbxgpio_unregister_pin(struct fbxgpio_pin *pin);
+
+#endif /* !FBXGPIO_H */
--- /dev/null	2011-06-03 14:51:38.633053002 +0200
+++ linux-2.6.20.14-fbx/include/linux/fbxmtd.h	2010-12-29 19:30:08.701442239 +0100
@@ -0,0 +1,211 @@
+
+#ifndef FBXMTD_H_
+# define FBXMTD_H_
+
+#ifdef __KERNEL__
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/sched.h>
+#endif
+
+#define FBXMTD_MAX_DEVICES	2
+
+/* hide all hot stuff from userland */
+#ifdef __KERNEL__
+
+struct fbxmtd_dev;
+
+/*
+ * fbxmtd partition
+ */
+struct fbxmtd_part
+{
+	char			*name;
+	uint32_t		offset;
+	uint32_t		size;
+	int			idx;
+	int			rw;
+
+	struct fbxmtd_dev	*dev;
+};
+
+/*
+ * fbxmtd flash sector information
+ */
+struct fbxmtd_region
+{
+	uint32_t		offset;
+	uint32_t		size;
+	uint32_t		count;
+};
+
+/*
+ * fbxmtd device mapping
+ */
+struct fbxmtd_dev_map
+{
+	/* base physical address of mtd device */
+	dma_addr_t		base_phys;
+
+	/* flash bus width (1: 8bits, 2: 16buts, 4: 32bits) */
+	unsigned int		flash_width;
+
+	/* remapped address */
+	uint8_t			*base;
+};
+
+/*
+ * fbxmtd device
+ */
+#define FBXMTD_MAX_PART		16
+
+struct fbxmtd_dev
+{
+	char			*name;
+	struct semaphore	sem;
+	int			dead;
+	atomic_t		refcount;
+	int			idx;
+
+	struct fbxmtd_dev_map	map;
+
+	struct fbxmtd_part	parts[FBXMTD_MAX_PART];
+	unsigned int		part_count;
+
+
+	/* the following callback are set by chip backend */
+	int			(*erase)(struct fbxmtd_dev *dev,
+					 uint32_t offset);
+
+	int			(*chip_erase)(struct fbxmtd_dev *dev);
+
+	int			(*program)(struct fbxmtd_dev *dev,
+					   uint32_t offset, const uint8_t *buf,
+					   size_t count);
+
+	struct fbxmtd_region	*(*get_region_info)(struct fbxmtd_dev *dev);
+
+	uint32_t		(*get_size)(struct fbxmtd_dev *dev);
+
+	void			*priv_data;
+};
+
+/*
+ * notifier for partition add/dead
+ */
+#define FBXMTD_EVENT_ADD	(1 << 0)
+#define FBXMTD_EVENT_DEAD	(1 << 1)
+
+int fbxmtd_register_notifier(void (*cb)(void *, struct fbxmtd_part *,
+					uint32_t),
+			     void *cb_data, uint32_t mask);
+
+void fbxmtd_unregister_notifier(void (*cb)(void *, struct fbxmtd_part *,
+					   uint32_t));
+
+
+/*
+ * core functions
+ */
+struct fbxmtd_part *fbxmtd_get_part(unsigned int dev_idx,
+				    unsigned int part_idx);
+
+struct fbxmtd_part *fbxmtd_get_part_by_name(unsigned int dev_idx,
+					    const char *part_name);
+
+void fbxmtd_put_part(struct fbxmtd_part *part);
+
+void fbxmtd_put_device(struct fbxmtd_dev *dev);
+
+int fbxmtd_read_part(struct fbxmtd_part *part, uint32_t offset, char *buffer,
+		     unsigned int count, int can_sleep);
+
+int fbxmtd_read_dev(struct fbxmtd_dev *dev, uint32_t offset, char *buffer,
+		    unsigned int count);
+
+int fbxmtd_find_sector_boundary(struct fbxmtd_dev *dev,
+				uint32_t offset, uint32_t *boundary);
+
+int fbxmtd_find_next_sector_boundary(struct fbxmtd_dev *dev,
+				     uint32_t offset, uint32_t *boundary);
+
+int fbxmtd_write_part(struct fbxmtd_part *part, uint32_t offset, char *buffer,
+		      unsigned int count);
+
+int fbxmtd_set_partitions(struct fbxmtd_dev *dev, struct fbxmtd_part *parts,
+			  unsigned int count);
+
+struct fbxmtd_dev *fbxmtd_probe(const char *name, dma_addr_t base,
+				unsigned int flash_width);
+
+void fbxmtd_mark_dead_dev(struct fbxmtd_dev *dev);
+
+int fbxmtd_foreach_part(int (cb)(void *, struct fbxmtd_part *),
+			void *cb_data);
+
+int fbxmtd_foreach_dev(int (cb)(void *, struct fbxmtd_dev *),
+		       void *cb_data);
+
+/*
+ * generic map driver data
+ */
+struct fbxmtd_platform_data
+{
+	const char *			name;
+	int				status;
+	dma_addr_t			base;
+	uint32_t			width;
+	uint32_t			size; /* filled by map driver */
+	struct fbxmtd_platform_part	*parts;
+	uint32_t			num_parts;
+	struct fbxmtd_dev		*core_dev;
+};
+
+struct fbxmtd_platform_part
+{
+	char		*name;
+	char		*align_part;
+	uint32_t	offset;
+	uint32_t	roffset;
+	uint32_t	size;
+	uint32_t	flags;
+};
+
+/*
+ * bcm963xx map driver data
+ */
+struct fbxmtd_bcm963xx_platform_data {
+	char				*name;
+	dma_addr_t			base;
+	unsigned int			width;
+	unsigned int			psi_size;
+	u8				*boot_addr;
+	int				all_rw;
+};
+
+#endif /* __KERNEL__ */
+
+enum {
+	E_FBXMTD_NOT_PROBED,
+	E_FBXMTD_PROBED,
+	E_FBXMTD_FAULTY,
+};
+
+/* try to read image tag and set the _fs related partition */
+#define FBXMTD_PART_HAS_FS	(1 << 0)
+/* Read/Write partition */
+#define FBXMTD_PART_RW		(1 << 1)
+/* check CRC of image tag. */
+#define FBXMTD_PART_CHECK_CRC	(1 << 2)
+/*
+ * only for first partition: means that the size of the partition is
+ * the size of the flash
+ */
+#define FBXMTD_PART_MAP_ALL	(1 << 3)
+/* Adjust size automatically with the next partition */
+#define FBXMTD_PART_AUTOSIZE	(1 << 4)
+#define FBXMTD_PART_IGNORE_TAG	(1 << 5)
+#define FBXMTD_PART_SKIP_KERNEL	(1 << 6)
+
+
+#endif /* !FBXMTD_H_ */
--- /dev/null	2011-06-03 14:51:38.633053002 +0200
+++ linux-2.6.20.14-fbx/include/linux/fbxmtd_map_ioctl.h	2010-12-29 19:30:08.701442239 +0100
@@ -0,0 +1,52 @@
+/*
+ * fbxmtd_map_ioctl.h for linux-freebox
+ * Created by <nschichan@freebox.fr> on Thu Feb  8 20:37:28 2007
+ * Freebox SA
+ */
+
+#ifndef FBXMTD_MAP_IOCTL_H
+# define FBXMTD_MAP_IOCTL_H
+
+/*
+ * IOCTL interface
+ */
+#define FBXMTD_MINOR	242
+
+#define FBXMTD_MAP_IOCTL_MAX_DEV	2
+#define FBXMTD_MAP_IOCTL_MAX_PART	16
+
+struct fbxmtd_map_ioctl_part
+{
+	char		name[32];
+	uint32_t	offset;
+	uint32_t	size;
+	uint32_t	flags;
+};
+
+struct fbxmtd_map_ioctl_dev
+{
+	char				name[32];
+	uint32_t			base_phys;
+	int				bus_width;
+	uint32_t			size;
+	uint32_t			status;
+	struct fbxmtd_map_ioctl_part	parts[FBXMTD_MAP_IOCTL_MAX_PART];
+	int				num_parts;
+};
+
+#define FBXMTD_MAP_IOCTL_NR	0x42
+
+struct fbxmtd_map_ioctl_query
+{
+	uint32_t	cmd;
+	uint32_t	param;
+	int		result;
+	void __user	*user_buf;
+	uint32_t	user_buf_size;
+};
+
+#define FBXMTDCTL_CMD_GET_DEVICES	0x1
+#define FBXMTDCTL_CMD_ADD_DEVICE	0x2
+#define FBXMTDCTL_CMD_DEL_DEVICE	0x3
+
+#endif /* !FBXMTD_MAP_IOCTL_H */
--- /dev/null	2011-06-03 14:51:38.633053002 +0200
+++ linux-2.6.20.14-fbx/include/linux/fbxpanel.h	2010-12-29 19:30:08.701442239 +0100
@@ -0,0 +1,97 @@
+/*
+ * fbxpanel.h for linux-freebox
+ * Created by <nschichan@freebox.fr> on Wed Mar  7 22:07:50 2007
+ * Freebox SA
+ */
+
+#ifndef __FBXPANEL_H
+# define __FBXPANEL_H
+
+enum {
+	E_PANEL_METHOD_SPI,
+	E_PANEL_METHOD_I2C,
+};
+
+enum {
+	E_DIGIT_STATE_OFF,
+	E_DIGIT_STATE_ON,
+	E_DIGIT_STATE_BLINK,
+};
+
+/*
+ * panel digit layout:
+ *
+ * +---+    +---+
+ * |\|/|    |   |
+ * +-+-+ or +---+
+ * |/|\|    |   |
+ * +---+    +---+
+ *
+ * allow 7/14 seg display description.
+ *
+ * in 7 segment mode, d_* should be set to 0. h_middle_left and
+ * h_middle_right should be set to the same value.
+ */
+struct digit_seg_desc
+{
+	int h_top;
+	int h_middle_left;
+	int h_middle_right;
+	int h_bottom;
+
+	int v_top_left;
+	int v_top_middle;
+	int v_top_right;
+
+	int v_bottom_left;
+	int v_bottom_middle;
+	int v_bottom_right;
+
+	int d_bottom_right;
+	int d_bottom_left;
+	int d_top_right;
+	int d_top_left;
+};
+
+struct fbxpanel_pic_fbx_platform_data
+{
+	const char *name;
+	uint32_t i2c_addr;
+};
+
+struct fbxpanel
+{
+	const char *name;
+	int digit_count;
+	const uint16_t *ascii_table;
+
+	int (*set_digit)(struct fbxpanel *p, int digit, uint16_t val);
+	int (*set_colon_digit)(struct fbxpanel *p, int enable, int blink_msec);
+
+	void *priv;
+	struct class_device *class_dev;
+
+	struct digit_seg_desc *digit_seg_desc;
+
+	/* lock between animator and process storing data to sysfs */
+	struct semaphore mutex;
+
+	/* animator thread stuff */
+	struct task_struct *animator;
+	wait_queue_head_t animator_wq;
+
+	int current_anim;
+	int anim_count;
+	int last_anim;
+	int current_frame;
+
+	uint16_t *digit_cache;
+
+	/* device specific animation frames */
+	struct dev_anim_frame	**frames;
+};
+
+int fbxpanel_register(struct fbxpanel *p);
+int fbxpanel_unregister(struct fbxpanel *p);
+
+#endif /* !__FBXPANEL_H */
--- /dev/null	2011-06-03 14:51:38.633053002 +0200
+++ linux-2.6.20.14-fbx/include/linux/fbxserial.h	2011-09-09 16:10:02.080493458 +0200
@@ -0,0 +1,113 @@
+
+#ifndef FBXSERIAL_H_
+# define FBXSERIAL_H_
+
+/*
+ * some part of serial may vary, we use abstract struct to store this,
+ * data content depends on type.
+ */
+#define EXTINFO_SIZE		128
+#define EXTINFO_MAX_COUNT	16
+
+/*
+ * extdev desc
+ */
+#define EXTINFO_TYPE_EXTDEV	1
+
+#define EXTDEV_TYPE_BUNDLE	1
+#define EXTDEV_TYPE_MAX		2
+
+struct fbx_serial_extinfo
+{
+	uint32_t			type;
+
+	union {
+		/* extdev */
+		struct {
+			uint32_t	type;
+			uint32_t	model;
+			char		serial[64];
+		} extdev;
+
+		/* raw access */
+		unsigned char		data[EXTINFO_SIZE];
+	} u;
+}  __attribute__ ((packed));;
+
+
+/*
+ * master serial structure
+ */
+
+#define FBXSERIAL_VERSION	1
+
+#define FBXSERIAL_MAGIC		0x2d9521ab
+
+#define MAC_ADDR_SIZE		6
+#define RANDOM_DATA_SIZE	32
+
+/*
+ * this  is the  maximum size  we accept  to check  crc32  against, so
+ * structure may no grow larger than this
+ */
+#define FBXSERIAL_MAX_SIZE	8192
+
+struct fbx_serial
+{
+	uint32_t crc32;
+	uint32_t magic;
+	uint32_t struct_version;
+	uint32_t len;
+
+	/* board serial */
+        uint16_t type;
+        uint8_t version;
+        uint8_t manufacturer;
+        uint16_t year;
+        uint8_t week;
+        uint32_t number;
+        uint32_t flags;
+
+	/* mac address base */
+	uint8_t mac_addr_base[MAC_ADDR_SIZE];
+
+	/* mac address count */
+	uint8_t mac_count;
+
+	/* random data */
+	uint8_t random_data[RANDOM_DATA_SIZE];
+
+	/* last update of data (seconds since epoch) */
+	uint32_t last_modified;
+
+	/* count of following extinfo tag */
+	uint32_t extinfo_count;
+
+	/* beginning of extended info */
+	struct fbx_serial_extinfo	extinfos[EXTINFO_MAX_COUNT];
+
+} __attribute__ ((packed));
+
+
+/*
+ * default value to use in case magic is wrong (no cksum in that case)
+ */
+#define DEFAULT_SERIAL_INFO		\
+	{ 0, FBXSERIAL_MAGIC,		\
+	FBXSERIAL_VERSION,		\
+	sizeof (struct fbx_serial),	\
+	0, 0, '_', 0, 0, 0, 0,		\
+	"\x00\x07\xCB\x00\x00\xFD",	\
+	1,				\
+	{ 0 },				\
+	0, 0 }
+
+static inline void
+fbxserial_set_default(struct fbx_serial *serial)
+{
+        static const struct fbx_serial    def = DEFAULT_SERIAL_INFO;
+
+        memcpy(serial, &def, sizeof (def));
+}
+
+#endif /* FBXSERIAL_H_ */
--- /dev/null	2011-06-03 14:51:38.633053002 +0200
+++ linux-2.6.20.14-fbx/include/linux/fbxserialinfo.h	2010-12-29 19:30:08.701442239 +0100
@@ -0,0 +1,24 @@
+
+#ifndef FBXSERIALINFO_H_
+# define FBXSERIALINFO_H_
+
+struct fbx_serial;
+
+void
+fbxserialinfo_get_random(unsigned char *data, unsigned int len);
+
+void
+fbxserialinfo_get_mac_addr(unsigned char *data);
+
+int
+fbxserialinfo_read(void *data, struct fbx_serial *out);
+
+struct fbx_serial *fbxserialinfo_get(void);
+
+/*
+ * implemented in board specific code: we do not want static variables
+ * in the builtin fbxserial code.
+ */
+const struct fbx_serial *arch_get_serial(void);
+
+#endif /* FBXSERIALINFO_H_ */
--- /dev/null	2011-06-03 14:51:38.633053002 +0200
+++ linux-2.6.20.14-fbx/include/linux/fbxspi.h	2010-12-29 19:30:08.701442239 +0100
@@ -0,0 +1,131 @@
+/*
+ * fbxspi.h for linux-freebox
+ * Created by <nschichan@freebox.fr> on Tue Mar 13 00:09:31 2007
+ * Freebox SA
+ */
+
+#ifndef _FBXSPI_H
+# define _FBXSPI_H
+
+/*
+ * this was not invented here.
+ */
+
+struct fbxspi_master;
+struct fbxspi_device;
+
+struct fbxspi_device
+{
+	const char		name[BUS_ID_SIZE];
+	uint32_t		cs;
+	uint32_t		max_speed_hz;
+	int			probe_done;
+	int			lsb_first;
+
+	struct fbxspi_master	*master;
+	struct fbxspi_driver	*drv;
+
+	struct list_head	list;
+	void			(*chip_select_cb)(struct fbxspi_device *this,
+						  int select);
+};
+
+struct fbxspi_message
+{
+	const uint8_t	*tx;
+	uint8_t		*rx;
+
+	uint32_t	tx_count;
+	uint32_t	rx_count;
+};
+
+struct fbxspi_master
+{
+	char			name[BUS_ID_SIZE];
+	struct module		*owner;
+	spinlock_t		lock;
+	void			*priv;
+	uint32_t		num_cs;
+
+	/*
+	 * prepare transfer, set chip select, fix clock rates.
+	 */
+	int	(*setup)(struct fbxspi_device *dev);
+
+	/*
+	 * actually transfer data.
+	 */
+	int	(*transfer)(struct fbxspi_device *dev,
+			    struct fbxspi_message *msg);
+
+	/*
+	 * cleanup after transfer, release chip select.
+	 */
+	int	(*cleanup)(struct fbxspi_device *dev);
+
+
+	struct list_head	list;
+};
+
+struct fbxspi_driver
+{
+	int			(*probe)(struct fbxspi_device *dev);
+	int			(*remove)(struct fbxspi_device *dev);
+	struct fbxspi_device	*dev;
+	void			*drvdata;
+
+	struct module		*owner;
+	const char		name[BUS_ID_SIZE];
+
+	struct list_head	list;
+};
+
+#ifdef CONFIG_FREEBOX_SPI
+int fbxspi_register_device(struct fbxspi_device *dev);
+int fbxspi_register_driver(struct fbxspi_driver *drv);
+int fbxspi_register_master(struct fbxspi_master *master);
+#else
+static inline int fbxspi_register_device(struct fbxspi_device *d)
+{
+	return 0;
+}
+#endif
+
+
+void fbxspi_unregister_master(struct fbxspi_master *master);
+void fbxspi_unregister_driver(struct fbxspi_driver *drv);
+
+int fbxspi_write_then_read(struct fbxspi_device *dev,
+			   const uint8_t *tx, uint32_t tx_count,
+			   uint8_t *rx, uint32_t rx_count);
+
+static inline int
+fbxspi_w8r16(struct fbxspi_device *dev, uint8_t data)
+{
+	int status;
+	uint16_t val;
+
+	status = fbxspi_write_then_read(dev, &data, 1, (uint8_t*)&val, 2);
+	return status ? status : val;
+}
+
+static inline int
+fbxspi_w8r8(struct fbxspi_device *dev, uint8_t data)
+{
+	int status;
+	uint8_t val;
+
+	status = fbxspi_write_then_read(dev, &data, 1, &val, 1);
+	return status ? status : val;
+}
+
+static inline int
+fbxspi_w8(struct fbxspi_device *dev, uint8_t data)
+{
+	int status;
+
+	status = fbxspi_write_then_read(dev, &data, 1, NULL, 0);
+	return status;
+}
+
+#endif /* !_FBXSPI_H */
--- /dev/null	2011-06-03 14:51:38.633053002 +0200
+++ linux-2.6.20.14-fbx/include/linux/fbxwatchdog.h	2010-12-29 19:30:08.701442239 +0100
@@ -0,0 +1,54 @@
+/*
+ * fbxwatchdog.h for fbxwatchdog
+ * Created by <nschichan@freebox.fr> on Mon Jun 11 20:48:54 2007
+ * Freebox SA
+ */
+
+#ifndef FBXWATCHDOG_H
+# define FBXWATCHDOG_H
+
+struct fbxwatchdog
+{
+	const char *name;
+	void *priv;
+
+	int enabled;
+	int countdown;
+	int countdown_min;
+
+	int (*wdt_init)(struct fbxwatchdog *wdt);
+	int (*wdt_cleanup)(struct fbxwatchdog *wdt);
+
+	/*
+	 * wdt_start and wdt_stop are called with wdt->lock held and irq
+	 * disabled.
+	 */
+	int (*wdt_start)(struct fbxwatchdog *wdt);
+	int (*wdt_stop)(struct fbxwatchdog *wdt);
+
+	/*
+	 * cb is called from interrupt/softirq context (depends on the
+	 * underlying driver/hardware).
+	 */
+	void (*cb)(struct fbxwatchdog *wdt);
+
+	struct timer_list timer;
+
+	struct class_device *class_dev;
+
+	/*
+	 * protect interrupt handlers & start/stop methods running in
+	 * thead context.
+	 */
+	spinlock_t	lock;
+};
+
+int fbxwatchdog_register(struct fbxwatchdog *wdt);
+int fbxwatchdog_unregister(struct fbxwatchdog *wdt);
+
+#ifdef CONFIG_FREEBOX_WATCHDOG_CHAR
+int fbxwatchdog_char_add(struct fbxwatchdog *wdt);
+void fbxwatchdog_char_remove(struct fbxwatchdog *wdt);
+#endif
+
+#endif /* !FBXWATCHDOG_H */
--- /dev/null	2011-06-03 14:51:38.633053002 +0200
+++ linux-2.6.20.14-fbx/include/linux/LzmaDecode.h	2010-12-29 19:30:08.671444866 +0100
@@ -0,0 +1,113 @@
+/* 
+  LzmaDecode.h
+  LZMA Decoder interface
+
+  LZMA SDK 4.40 Copyright (c) 1999-2006 Igor Pavlov (2006-05-01)
+  http://www.7-zip.org/
+
+  LZMA SDK is licensed under two licenses:
+  1) GNU Lesser General Public License (GNU LGPL)
+  2) Common Public License (CPL)
+  It means that you can select one of these two licenses and 
+  follow rules of that license.
+
+  SPECIAL EXCEPTION:
+  Igor Pavlov, as the author of this code, expressly permits you to 
+  statically or dynamically link your code (or bind by name) to the 
+  interfaces of this file without subjecting your linked code to the 
+  terms of the CPL or GNU LGPL. Any modifications or additions 
+  to this file, however, are subject to the LGPL or CPL terms.
+*/
+
+#ifndef __LZMADECODE_H
+#define __LZMADECODE_H
+
+#include "LzmaTypes.h"
+
+/* #define _LZMA_IN_CB */
+/* Use callback for input data */
+
+/* #define _LZMA_OUT_READ */
+/* Use read function for output data */
+
+/* #define _LZMA_PROB32 */
+/* It can increase speed on some 32-bit CPUs, 
+   but memory usage will be doubled in that case */
+
+/* #define _LZMA_LOC_OPT */
+/* Enable local speed optimizations inside code */
+
+#ifdef _LZMA_PROB32
+#define CProb UInt32
+#else
+#define CProb UInt16
+#endif
+
+#define LZMA_RESULT_OK 0
+#define LZMA_RESULT_DATA_ERROR 1
+
+#ifdef _LZMA_IN_CB
+typedef struct _ILzmaInCallback
+{
+  int (*Read)(void *object, const unsigned char **buffer, SizeT *bufferSize);
+} ILzmaInCallback;
+#endif
+
+#define LZMA_BASE_SIZE 1846
+#define LZMA_LIT_SIZE 768
+
+#define LZMA_PROPERTIES_SIZE 5
+
+typedef struct _CLzmaProperties
+{
+  int lc;
+  int lp;
+  int pb;
+  #ifdef _LZMA_OUT_READ
+  UInt32 DictionarySize;
+  #endif
+}CLzmaProperties;
+
+int LzmaDecodeProperties(CLzmaProperties *propsRes, const unsigned char *propsData, int size);
+
+#define LzmaGetNumProbs(Properties) (LZMA_BASE_SIZE + (LZMA_LIT_SIZE << ((Properties)->lc + (Properties)->lp)))
+
+#define kLzmaNeedInitId (-2)
+
+typedef struct _CLzmaDecoderState
+{
+  CLzmaProperties Properties;
+  CProb *Probs;
+
+  #ifdef _LZMA_IN_CB
+  const unsigned char *Buffer;
+  const unsigned char *BufferLim;
+  #endif
+
+  #ifdef _LZMA_OUT_READ
+  unsigned char *Dictionary;
+  UInt32 Range;
+  UInt32 Code;
+  UInt32 DictionaryPos;
+  UInt32 GlobalPos;
+  UInt32 DistanceLimit;
+  UInt32 Reps[4];
+  int State;
+  int RemainLen;
+  unsigned char TempDictionary[4];
+  #endif
+} CLzmaDecoderState;
+
+#ifdef _LZMA_OUT_READ
+#define LzmaDecoderInit(vs) { (vs)->RemainLen = kLzmaNeedInitId; }
+#endif
+
+int LzmaDecode(CLzmaDecoderState *vs,
+    #ifdef _LZMA_IN_CB
+    ILzmaInCallback *inCallback,
+    #else
+    const unsigned char *inStream, SizeT inSize, SizeT *inSizeProcessed,
+    #endif
+    unsigned char *outStream, SizeT outSize, SizeT *outSizeProcessed);
+
+#endif
--- /dev/null	2011-06-03 14:51:38.633053002 +0200
+++ linux-2.6.20.14-fbx/include/linux/LzmaTypes.h	2010-12-29 19:30:08.671444866 +0100
@@ -0,0 +1,45 @@
+/* 
+LzmaTypes.h 
+
+Types for LZMA Decoder
+
+This file written and distributed to public domain by Igor Pavlov.
+This file is part of LZMA SDK 4.40 (2006-05-01)
+*/
+
+#ifndef __LZMATYPES_H
+#define __LZMATYPES_H
+
+#ifndef _7ZIP_BYTE_DEFINED
+#define _7ZIP_BYTE_DEFINED
+typedef unsigned char Byte;
+#endif 
+
+#ifndef _7ZIP_UINT16_DEFINED
+#define _7ZIP_UINT16_DEFINED
+typedef unsigned short UInt16;
+#endif 
+
+#ifndef _7ZIP_UINT32_DEFINED
+#define _7ZIP_UINT32_DEFINED
+#ifdef _LZMA_UINT32_IS_ULONG
+typedef unsigned long UInt32;
+#else
+typedef unsigned int UInt32;
+#endif
+#endif 
+
+/* #define _LZMA_SYSTEM_SIZE_T */
+/* Use system's size_t. You can use it to enable 64-bit sizes supporting */
+
+#ifndef _7ZIP_SIZET_DEFINED
+#define _7ZIP_SIZET_DEFINED
+#ifdef _LZMA_SYSTEM_SIZE_T
+#include <stddef.h>
+typedef size_t SizeT;
+#else
+typedef UInt32 SizeT;
+#endif
+#endif
+
+#endif
--- /dev/null	2011-06-03 14:51:38.633053002 +0200
+++ linux-2.6.20.14-fbx/include/linux/netfilter_ipv4/iptable_tproxy.h	2010-12-29 19:30:08.731445169 +0100
@@ -0,0 +1,66 @@
+#ifndef _IPTABLE_TPROXY_H
+#define _IPTABLE_TPROXY_H
+
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/time.h>
+#include <asm/atomic.h>
+#include <net/sock.h>
+
+struct ip_tproxy_sockref;
+
+struct ip_tproxy_hash
+{
+	struct list_head list;
+	struct ip_tproxy_sockref *sockref;
+};
+
+struct ip_tproxy_sockref
+{
+	int flags;
+	atomic_t references;
+
+	u8 proto;
+
+	/* foreign address associated with a local socket */
+	u32 faddr;
+	u16 fport;
+
+	/* local socket address */
+	u32 laddr;
+	u16 lport;
+
+	/* remote addresses, needed for datagram protocols when the peer
+	 * sends the packet triggering the NAT translation. (as there might
+	 * be multiple sockrefs on the same foreign address).
+	 */
+	u32 raddr;
+	u16 rport;
+
+	/* hash chains indexed by local and foreign addresses */
+	struct ip_tproxy_hash bylocal, byforeign;
+
+	/* lock protecting access to related list */
+	spinlock_t relatedlock;
+	/* number of related connections */
+	atomic_t related;
+	/* list of related connections */
+	struct list_head relatedct;
+
+	/* socket which we were assigned to */
+	struct sock *assigned_to;
+
+	/* How many sockets use this sockref? Used for mark-only sockrefs,
+	 * which can be shared between multiple sockets bound to the same local
+	 * address */
+	atomic_t socket_count;
+
+	/* when was this entry inserted in hash */
+	struct timespec tv_hashed;
+};
+
+extern int
+ip_tproxy_setup_nat(struct sk_buff **pskb, int hooknum,
+		    struct ip_tproxy_sockref *sr, unsigned int flags);
+
+#endif
--- /dev/null	2011-06-03 14:51:38.633053002 +0200
+++ linux-2.6.20.14-fbx/include/linux/netfilter_ipv4/ip_tproxy.h	2010-12-29 19:30:08.731445169 +0100
@@ -0,0 +1,78 @@
+/*
+ * Transparent proxy support for Linux/iptables
+ *
+ * Copyright (c) 2002-2004 BalaBit IT Ltd.
+ * Author: Balázs Scheidler
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#ifndef _IP_TPROXY_H
+#define _IP_TPROXY_H
+
+#ifdef __KERNEL__
+#include <linux/types.h>
+#include <linux/in.h>
+#else
+#include <netinet/in.h>
+#ifndef IP_RECVORIGADDRS
+#define IP_RECVORIGADDRS	11273
+#define IP_ORIGADDRS	IP_RECVORIGADDRS
+struct in_origaddrs {
+        struct in_addr ioa_srcaddr;
+        struct in_addr ioa_dstaddr;
+        unsigned short int ioa_srcport;
+        unsigned short int ioa_dstport;
+};
+#endif
+#endif
+
+/*
+ * used in setsockopt(SOL_IP, IP_TPROXY) should not collide
+ * with values in <linux/in.h>
+ */
+
+#define IP_TPROXY	11274
+
+/* tproxy operations */
+enum {
+	TPROXY_VERSION = 0,
+	TPROXY_ASSIGN,
+	TPROXY_UNASSIGN,
+	TPROXY_QUERY,
+	TPROXY_FLAGS,
+	TPROXY_ALLOC,
+	TPROXY_CONNECT
+};
+
+/* bitfields in IP_TPROXY_FLAGS */
+#define ITP_CONNECT     0x00000001
+#define ITP_LISTEN      0x00000002
+#define ITP_ESTABLISHED 0x00000004
+
+#define ITP_ONCE        0x00010000
+#define ITP_MARK        0x00020000
+#define ITP_APPLIED     0x00040000
+#define ITP_UNIDIR      0x00080000
+
+struct in_tproxy_addr{
+	struct in_addr	faddr;
+	u_int16_t	fport;
+};
+
+struct in_tproxy {
+	/* fixed part, should not change between versions */
+	u_int32_t op;
+	/* extensible part */
+	union _in_args {
+		u_int32_t		version;
+		struct in_tproxy_addr	addr;
+		u_int32_t		flags;
+	} v;
+};
+
+#endif
+
--- /dev/null	2011-06-03 14:51:38.633053002 +0200
+++ linux-2.6.20.14-fbx/include/linux/netfilter_ipv4/ipt_TPROXY.h	2010-12-29 19:30:08.731445169 +0100
@@ -0,0 +1,24 @@
+/*
+ * Transparent proxy support for Linux/iptables
+ *
+ * Copyright (c) 2002-2004 BalaBit IT Ltd.
+ * Author: Balázs Scheidler
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#ifndef _IPT_TPROXY_H_target
+#define _IPT_TPROXY_H_target
+
+struct ipt_tproxy_target_info {
+	u_int32_t mark_mask;
+	u_int32_t mark_value;
+	u_int32_t laddr;
+	u_int16_t lport;
+};
+
+#endif /*_IPT_TPROXY_H_target*/
+
--- /dev/null	2011-06-03 14:51:38.633053002 +0200
+++ linux-2.6.20.14-fbx/include/linux/sqlzma.h	2010-12-29 19:30:08.751517111 +0100
@@ -0,0 +1,79 @@
+/*
+ * Copyright (C) 2006 Junjiro Okajima
+ * Copyright (C) 2006 Tomas Matejicek, slax.org
+ *
+ * LICENSE follows the described one in lzma.
+ */
+
+/* $Id: sqlzma.h,v 1.13 2007/01/07 15:12:48 jro Exp $ */
+
+#ifndef __sqlzma_h__
+#define __sqlzma_h__
+
+#ifndef __KERNEL__
+#include <stdlib.h>
+#include <string.h>
+#include <zlib.h>
+#ifdef _REENTRANT
+#include <pthread.h>
+#endif
+#else
+#include <linux/zlib.h>
+#endif
+#define _7ZIP_BYTE_DEFINED
+
+/*
+ * detect the compression method automatically by the first byte of compressed
+ * data.
+ * according to rfc1950, the first byte of zlib compression must be 0x?8.
+ */
+#define is_lzma(c)	(c == 0x5d)
+
+/* ---------------------------------------------------------------------- */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifndef __KERNEL__
+/* for mksquashfs only */
+int sqlzma_cm(int lzma, z_stream *stream, Bytef *next_in, uInt avail_in,
+	      Bytef *next_out, uInt avail_out);
+#endif
+
+/* ---------------------------------------------------------------------- */
+/*
+ * Three patterns for sqlzma uncompression. very dirty code.
+ * - kernel space (squashfs kernel module)
+ * - user space with pthread (mksquashfs)
+ * - user space without pthread (unsquashfs)
+ */
+
+struct sized_buf {
+	unsigned int	sz;
+	unsigned char	*buf;
+};
+
+enum {SQUN_PROB, SQUN_RESULT, SQUN_LAST};
+struct sqlzma_un {
+	int			un_lzma;
+	struct sized_buf	un_a[SQUN_LAST];
+	unsigned char		un_prob[31960]; /* unlzma 64KB */
+	z_stream		un_stream;
+#define un_cmbuf	un_stream.next_in
+#define un_cmlen	un_stream.avail_in
+#define un_resbuf	un_stream.next_out
+#define un_resroom	un_stream.avail_out
+#define un_reslen	un_stream.total_out
+};
+
+int sqlzma_init(struct sqlzma_un *un, int do_lzma, unsigned int res_sz);
+int sqlzma_un(struct sqlzma_un *un, struct sized_buf *src, struct sized_buf *dst);
+void sqlzma_fin(struct sqlzma_un *un);
+
+/* ---------------------------------------------------------------------- */
+
+#ifdef __cplusplus
+};
+#endif
+#endif
--- /dev/null	2011-06-03 14:51:38.633053002 +0200
+++ linux-2.6.20.14-fbx/include/linux/sqmagic.h	2010-12-29 19:30:08.751517111 +0100
@@ -0,0 +1,17 @@
+/*
+ * Copyright (C) 2006 Junjiro Okajima
+ * Copyright (C) 2006 Tomas Matejicek, slax.org
+ *
+ * LICENSE must follow the one in squashfs.
+ */
+
+/* $Id: sqmagic.h,v 1.2 2006/11/27 03:54:58 jro Exp $ */
+
+#ifndef __sqmagic_h__
+#define __sqmagic_h__
+
+/* see SQUASHFS_MAGIC in squashfs_fs.h */
+#define SQUASHFS_MAGIC_LZMA		0x71736873
+#define SQUASHFS_MAGIC_LZMA_SWAP	0x73687371
+
+#endif
--- /dev/null	2011-06-03 14:51:38.633053002 +0200
+++ linux-2.6.20.14-fbx/include/linux/squashfs_fs.h	2010-12-29 19:30:08.751517111 +0100
@@ -0,0 +1,934 @@
+#ifndef SQUASHFS_FS
+#define SQUASHFS_FS
+
+/*
+ * Squashfs
+ *
+ * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007
+ * Phillip Lougher <phillip@lougher.org.uk>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2,
+ * or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * squashfs_fs.h
+ */
+
+#ifndef CONFIG_SQUASHFS_2_0_COMPATIBILITY
+#define CONFIG_SQUASHFS_2_0_COMPATIBILITY
+#endif
+
+#ifdef	CONFIG_SQUASHFS_VMALLOC
+#define SQUASHFS_ALLOC(a)		vmalloc(a)
+#define SQUASHFS_FREE(a)		vfree(a)
+#else
+#define SQUASHFS_ALLOC(a)		kmalloc(a, GFP_KERNEL)
+#define SQUASHFS_FREE(a)		kfree(a)
+#endif
+#define SQUASHFS_CACHED_FRAGMENTS	CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE	
+#define SQUASHFS_MAJOR			3
+#define SQUASHFS_MINOR			0
+#define SQUASHFS_MAGIC			0x73717368
+#define SQUASHFS_MAGIC_SWAP		0x68737173
+#define SQUASHFS_START			0
+
+/* size of metadata (inode and directory) blocks */
+#define SQUASHFS_METADATA_SIZE		8192
+#define SQUASHFS_METADATA_LOG		13
+
+/* default size of data blocks */
+#define SQUASHFS_FILE_SIZE		65536
+#define SQUASHFS_FILE_LOG		16
+
+#define SQUASHFS_FILE_MAX_SIZE		65536
+
+/* Max number of uids and gids */
+#define SQUASHFS_UIDS			256
+#define SQUASHFS_GUIDS			255
+
+/* Max length of filename (not 255) */
+#define SQUASHFS_NAME_LEN		256
+
+#define SQUASHFS_INVALID		((long long) 0xffffffffffff)
+#define SQUASHFS_INVALID_FRAG		((unsigned int) 0xffffffff)
+#define SQUASHFS_INVALID_BLK		((long long) -1)
+#define SQUASHFS_USED_BLK		((long long) -2)
+
+/* Filesystem flags */
+#define SQUASHFS_NOI			0
+#define SQUASHFS_NOD			1
+#define SQUASHFS_CHECK			2
+#define SQUASHFS_NOF			3
+#define SQUASHFS_NO_FRAG		4
+#define SQUASHFS_ALWAYS_FRAG		5
+#define SQUASHFS_DUPLICATE		6
+#define SQUASHFS_EXPORT			7
+
+#define SQUASHFS_BIT(flag, bit)		((flag >> bit) & 1)
+
+#define SQUASHFS_UNCOMPRESSED_INODES(flags)	SQUASHFS_BIT(flags, \
+						SQUASHFS_NOI)
+
+#define SQUASHFS_UNCOMPRESSED_DATA(flags)	SQUASHFS_BIT(flags, \
+						SQUASHFS_NOD)
+
+#define SQUASHFS_UNCOMPRESSED_FRAGMENTS(flags)	SQUASHFS_BIT(flags, \
+						SQUASHFS_NOF)
+
+#define SQUASHFS_NO_FRAGMENTS(flags)		SQUASHFS_BIT(flags, \
+						SQUASHFS_NO_FRAG)
+
+#define SQUASHFS_ALWAYS_FRAGMENTS(flags)	SQUASHFS_BIT(flags, \
+						SQUASHFS_ALWAYS_FRAG)
+
+#define SQUASHFS_DUPLICATES(flags)		SQUASHFS_BIT(flags, \
+						SQUASHFS_DUPLICATE)
+
+#define SQUASHFS_EXPORTABLE(flags)		SQUASHFS_BIT(flags, \
+						SQUASHFS_EXPORT)
+
+#define SQUASHFS_CHECK_DATA(flags)		SQUASHFS_BIT(flags, \
+						SQUASHFS_CHECK)
+
+#define SQUASHFS_MKFLAGS(noi, nod, check_data, nof, no_frag, always_frag, \
+		duplicate_checking, exortable)	(noi | (nod << 1) | (check_data << 2) \
+		| (nof << 3) | (no_frag << 4) | (always_frag << 5) | \
+		(duplicate_checking << 6) | (exportable << 7))
+
+/* Max number of types and file types */
+#define SQUASHFS_DIR_TYPE		1
+#define SQUASHFS_FILE_TYPE		2
+#define SQUASHFS_SYMLINK_TYPE		3
+#define SQUASHFS_BLKDEV_TYPE		4
+#define SQUASHFS_CHRDEV_TYPE		5
+#define SQUASHFS_FIFO_TYPE		6
+#define SQUASHFS_SOCKET_TYPE		7
+#define SQUASHFS_LDIR_TYPE		8
+#define SQUASHFS_LREG_TYPE		9
+
+/* 1.0 filesystem type definitions */
+#define SQUASHFS_TYPES			5
+#define SQUASHFS_IPC_TYPE		0
+
+/* Flag whether block is compressed or uncompressed, bit is set if block is
+ * uncompressed */
+#define SQUASHFS_COMPRESSED_BIT		(1 << 15)
+
+#define SQUASHFS_COMPRESSED_SIZE(B)	(((B) & ~SQUASHFS_COMPRESSED_BIT) ? \
+		(B) & ~SQUASHFS_COMPRESSED_BIT :  SQUASHFS_COMPRESSED_BIT)
+
+#define SQUASHFS_COMPRESSED(B)		(!((B) & SQUASHFS_COMPRESSED_BIT))
+
+#define SQUASHFS_COMPRESSED_BIT_BLOCK		(1 << 24)
+
+#define SQUASHFS_COMPRESSED_SIZE_BLOCK(B)	(((B) & \
+	~SQUASHFS_COMPRESSED_BIT_BLOCK) ? (B) & \
+	~SQUASHFS_COMPRESSED_BIT_BLOCK : SQUASHFS_COMPRESSED_BIT_BLOCK)
+
+#define SQUASHFS_COMPRESSED_BLOCK(B)	(!((B) & SQUASHFS_COMPRESSED_BIT_BLOCK))
+
+/*
+ * Inode number ops.  Inodes consist of a compressed block number, and an
+ * uncompressed  offset within that block
+ */
+#define SQUASHFS_INODE_BLK(a)		((unsigned int) ((a) >> 16))
+
+#define SQUASHFS_INODE_OFFSET(a)	((unsigned int) ((a) & 0xffff))
+
+#define SQUASHFS_MKINODE(A, B)		((squashfs_inode_t)(((squashfs_inode_t) (A)\
+					<< 16) + (B)))
+
+/* Compute 32 bit VFS inode number from squashfs inode number */
+#define SQUASHFS_MK_VFS_INODE(a, b)	((unsigned int) (((a) << 8) + \
+					((b) >> 2) + 1))
+/* XXX */
+
+/* Translate between VFS mode and squashfs mode */
+#define SQUASHFS_MODE(a)		((a) & 0xfff)
+
+/* fragment and fragment table defines */
+#define SQUASHFS_FRAGMENT_BYTES(A)	((A) * sizeof(struct squashfs_fragment_entry))
+
+#define SQUASHFS_FRAGMENT_INDEX(A)	(SQUASHFS_FRAGMENT_BYTES(A) / \
+					SQUASHFS_METADATA_SIZE)
+
+#define SQUASHFS_FRAGMENT_INDEX_OFFSET(A)	(SQUASHFS_FRAGMENT_BYTES(A) % \
+						SQUASHFS_METADATA_SIZE)
+
+#define SQUASHFS_FRAGMENT_INDEXES(A)	((SQUASHFS_FRAGMENT_BYTES(A) + \
+					SQUASHFS_METADATA_SIZE - 1) / \
+					SQUASHFS_METADATA_SIZE)
+
+#define SQUASHFS_FRAGMENT_INDEX_BYTES(A)	(SQUASHFS_FRAGMENT_INDEXES(A) *\
+						sizeof(long long))
+
+/* inode lookup table defines */
+#define SQUASHFS_LOOKUP_BYTES(A)	((A) * sizeof(squashfs_inode_t))
+
+#define SQUASHFS_LOOKUP_BLOCK(A)		(SQUASHFS_LOOKUP_BYTES(A) / \
+						SQUASHFS_METADATA_SIZE)
+
+#define SQUASHFS_LOOKUP_BLOCK_OFFSET(A)		(SQUASHFS_LOOKUP_BYTES(A) % \
+						SQUASHFS_METADATA_SIZE)
+
+#define SQUASHFS_LOOKUP_BLOCKS(A)	((SQUASHFS_LOOKUP_BYTES(A) + \
+					SQUASHFS_METADATA_SIZE - 1) / \
+					SQUASHFS_METADATA_SIZE)
+
+#define SQUASHFS_LOOKUP_BLOCK_BYTES(A)	(SQUASHFS_LOOKUP_BLOCKS(A) *\
+					sizeof(long long))
+
+/* cached data constants for filesystem */
+#define SQUASHFS_CACHED_BLKS		8
+
+#define SQUASHFS_MAX_FILE_SIZE_LOG	64
+
+#define SQUASHFS_MAX_FILE_SIZE		((long long) 1 << \
+					(SQUASHFS_MAX_FILE_SIZE_LOG - 2))
+
+#define SQUASHFS_MARKER_BYTE		0xff
+
+/* meta index cache */
+#define SQUASHFS_META_INDEXES	(SQUASHFS_METADATA_SIZE / sizeof(unsigned int))
+#define SQUASHFS_META_ENTRIES	31
+#define SQUASHFS_META_NUMBER	8
+#define SQUASHFS_SLOTS		4
+
+struct meta_entry {
+	long long		data_block;
+	unsigned int		index_block;
+	unsigned short		offset;
+	unsigned short		pad;
+};
+
+struct meta_index {
+	unsigned int		inode_number;
+	unsigned int		offset;
+	unsigned short		entries;
+	unsigned short		skip;
+	unsigned short		locked;
+	unsigned short		pad;
+	struct meta_entry	meta_entry[SQUASHFS_META_ENTRIES];
+};
+
+
+/*
+ * definitions for structures on disk
+ */
+
+typedef long long		squashfs_block_t;
+typedef long long		squashfs_inode_t;
+
+struct squashfs_super_block {
+	unsigned int		s_magic;
+	unsigned int		inodes;
+	unsigned int		bytes_used_2;
+	unsigned int		uid_start_2;
+	unsigned int		guid_start_2;
+	unsigned int		inode_table_start_2;
+	unsigned int		directory_table_start_2;
+	unsigned int		s_major:16;
+	unsigned int		s_minor:16;
+	unsigned int		block_size_1:16;
+	unsigned int		block_log:16;
+	unsigned int		flags:8;
+	unsigned int		no_uids:8;
+	unsigned int		no_guids:8;
+	unsigned int		mkfs_time /* time of filesystem creation */;
+	squashfs_inode_t	root_inode;
+	unsigned int		block_size;
+	unsigned int		fragments;
+	unsigned int		fragment_table_start_2;
+	long long		bytes_used;
+	long long		uid_start;
+	long long		guid_start;
+	long long		inode_table_start;
+	long long		directory_table_start;
+	long long		fragment_table_start;
+	long long		lookup_table_start;
+} __attribute__ ((packed));
+
+struct squashfs_dir_index {
+	unsigned int		index;
+	unsigned int		start_block;
+	unsigned char		size;
+	unsigned char		name[0];
+} __attribute__ ((packed));
+
+#define SQUASHFS_BASE_INODE_HEADER		\
+	unsigned int		inode_type:4;	\
+	unsigned int		mode:12;	\
+	unsigned int		uid:8;		\
+	unsigned int		guid:8;		\
+	unsigned int		mtime;		\
+	unsigned int 		inode_number;
+
+struct squashfs_base_inode_header {
+	SQUASHFS_BASE_INODE_HEADER;
+} __attribute__ ((packed));
+
+struct squashfs_ipc_inode_header {
+	SQUASHFS_BASE_INODE_HEADER;
+	unsigned int		nlink;
+} __attribute__ ((packed));
+
+struct squashfs_dev_inode_header {
+	SQUASHFS_BASE_INODE_HEADER;
+	unsigned int		nlink;
+	unsigned short		rdev;
+} __attribute__ ((packed));
+	
+struct squashfs_symlink_inode_header {
+	SQUASHFS_BASE_INODE_HEADER;
+	unsigned int		nlink;
+	unsigned short		symlink_size;
+	char			symlink[0];
+} __attribute__ ((packed));
+
+struct squashfs_reg_inode_header {
+	SQUASHFS_BASE_INODE_HEADER;
+	squashfs_block_t	start_block;
+	unsigned int		fragment;
+	unsigned int		offset;
+	unsigned int		file_size;
+	unsigned short		block_list[0];
+} __attribute__ ((packed));
+
+struct squashfs_lreg_inode_header {
+	SQUASHFS_BASE_INODE_HEADER;
+	unsigned int		nlink;
+	squashfs_block_t	start_block;
+	unsigned int		fragment;
+	unsigned int		offset;
+	long long		file_size;
+	unsigned short		block_list[0];
+} __attribute__ ((packed));
+
+struct squashfs_dir_inode_header {
+	SQUASHFS_BASE_INODE_HEADER;
+	unsigned int		nlink;
+	unsigned int		file_size:19;
+	unsigned int		offset:13;
+	unsigned int		start_block;
+	unsigned int		parent_inode;
+} __attribute__  ((packed));
+
+struct squashfs_ldir_inode_header {
+	SQUASHFS_BASE_INODE_HEADER;
+	unsigned int		nlink;
+	unsigned int		file_size:27;
+	unsigned int		offset:13;
+	unsigned int		start_block;
+	unsigned int		i_count:16;
+	unsigned int		parent_inode;
+	struct squashfs_dir_index	index[0];
+} __attribute__  ((packed));
+
+union squashfs_inode_header {
+	struct squashfs_base_inode_header	base;
+	struct squashfs_dev_inode_header	dev;
+	struct squashfs_symlink_inode_header	symlink;
+	struct squashfs_reg_inode_header	reg;
+	struct squashfs_lreg_inode_header	lreg;
+	struct squashfs_dir_inode_header	dir;
+	struct squashfs_ldir_inode_header	ldir;
+	struct squashfs_ipc_inode_header	ipc;
+};
+	
+struct squashfs_dir_entry {
+	unsigned int		offset:13;
+	unsigned int		type:3;
+	unsigned int		size:8;
+	int			inode_number:16;
+	char			name[0];
+} __attribute__ ((packed));
+
+struct squashfs_dir_header {
+	unsigned int		count:8;
+	unsigned int		start_block;
+	unsigned int		inode_number;
+} __attribute__ ((packed));
+
+struct squashfs_fragment_entry {
+	long long		start_block;
+	unsigned int		size;
+	unsigned int		pending;
+} __attribute__ ((packed));
+
+extern int squashfs_uncompress_block(void *d, int dstlen, void *s, int srclen);
+extern int squashfs_uncompress_init(void);
+extern int squashfs_uncompress_exit(void);
+
+/*
+ * macros to convert each packed bitfield structure from little endian to big
+ * endian and vice versa.  These are needed when creating or using a filesystem
+ * on a machine with different byte ordering to the target architecture.
+ *
+ */
+
+#define SQUASHFS_SWAP_START \
+	int bits;\
+	int b_pos;\
+	unsigned long long val;\
+	unsigned char *s;\
+	unsigned char *d;
+
+#define SQUASHFS_SWAP_SUPER_BLOCK(s, d) {\
+	SQUASHFS_SWAP_START\
+	SQUASHFS_MEMSET(s, d, sizeof(struct squashfs_super_block));\
+	SQUASHFS_SWAP((s)->s_magic, d, 0, 32);\
+	SQUASHFS_SWAP((s)->inodes, d, 32, 32);\
+	SQUASHFS_SWAP((s)->bytes_used_2, d, 64, 32);\
+	SQUASHFS_SWAP((s)->uid_start_2, d, 96, 32);\
+	SQUASHFS_SWAP((s)->guid_start_2, d, 128, 32);\
+	SQUASHFS_SWAP((s)->inode_table_start_2, d, 160, 32);\
+	SQUASHFS_SWAP((s)->directory_table_start_2, d, 192, 32);\
+	SQUASHFS_SWAP((s)->s_major, d, 224, 16);\
+	SQUASHFS_SWAP((s)->s_minor, d, 240, 16);\
+	SQUASHFS_SWAP((s)->block_size_1, d, 256, 16);\
+	SQUASHFS_SWAP((s)->block_log, d, 272, 16);\
+	SQUASHFS_SWAP((s)->flags, d, 288, 8);\
+	SQUASHFS_SWAP((s)->no_uids, d, 296, 8);\
+	SQUASHFS_SWAP((s)->no_guids, d, 304, 8);\
+	SQUASHFS_SWAP((s)->mkfs_time, d, 312, 32);\
+	SQUASHFS_SWAP((s)->root_inode, d, 344, 64);\
+	SQUASHFS_SWAP((s)->block_size, d, 408, 32);\
+	SQUASHFS_SWAP((s)->fragments, d, 440, 32);\
+	SQUASHFS_SWAP((s)->fragment_table_start_2, d, 472, 32);\
+	SQUASHFS_SWAP((s)->bytes_used, d, 504, 64);\
+	SQUASHFS_SWAP((s)->uid_start, d, 568, 64);\
+	SQUASHFS_SWAP((s)->guid_start, d, 632, 64);\
+	SQUASHFS_SWAP((s)->inode_table_start, d, 696, 64);\
+	SQUASHFS_SWAP((s)->directory_table_start, d, 760, 64);\
+	SQUASHFS_SWAP((s)->fragment_table_start, d, 824, 64);\
+	SQUASHFS_SWAP((s)->lookup_table_start, d, 888, 64);\
+}
+
+#define SQUASHFS_SWAP_BASE_INODE_CORE(s, d, n)\
+	SQUASHFS_MEMSET(s, d, n);\
+	SQUASHFS_SWAP((s)->inode_type, d, 0, 4);\
+	SQUASHFS_SWAP((s)->mode, d, 4, 12);\
+	SQUASHFS_SWAP((s)->uid, d, 16, 8);\
+	SQUASHFS_SWAP((s)->guid, d, 24, 8);\
+	SQUASHFS_SWAP((s)->mtime, d, 32, 32);\
+	SQUASHFS_SWAP((s)->inode_number, d, 64, 32);
+
+#define SQUASHFS_SWAP_BASE_INODE_HEADER(s, d, n) {\
+	SQUASHFS_SWAP_START\
+	SQUASHFS_SWAP_BASE_INODE_CORE(s, d, n)\
+}
+
+#define SQUASHFS_SWAP_IPC_INODE_HEADER(s, d) {\
+	SQUASHFS_SWAP_START\
+	SQUASHFS_SWAP_BASE_INODE_CORE(s, d, \
+			sizeof(struct squashfs_ipc_inode_header))\
+	SQUASHFS_SWAP((s)->nlink, d, 96, 32);\
+}
+
+#define SQUASHFS_SWAP_DEV_INODE_HEADER(s, d) {\
+	SQUASHFS_SWAP_START\
+	SQUASHFS_SWAP_BASE_INODE_CORE(s, d, \
+			sizeof(struct squashfs_dev_inode_header)); \
+	SQUASHFS_SWAP((s)->nlink, d, 96, 32);\
+	SQUASHFS_SWAP((s)->rdev, d, 128, 16);\
+}
+
+#define SQUASHFS_SWAP_SYMLINK_INODE_HEADER(s, d) {\
+	SQUASHFS_SWAP_START\
+	SQUASHFS_SWAP_BASE_INODE_CORE(s, d, \
+			sizeof(struct squashfs_symlink_inode_header));\
+	SQUASHFS_SWAP((s)->nlink, d, 96, 32);\
+	SQUASHFS_SWAP((s)->symlink_size, d, 128, 16);\
+}
+
+#define SQUASHFS_SWAP_REG_INODE_HEADER(s, d) {\
+	SQUASHFS_SWAP_START\
+	SQUASHFS_SWAP_BASE_INODE_CORE(s, d, \
+			sizeof(struct squashfs_reg_inode_header));\
+	SQUASHFS_SWAP((s)->start_block, d, 96, 64);\
+	SQUASHFS_SWAP((s)->fragment, d, 160, 32);\
+	SQUASHFS_SWAP((s)->offset, d, 192, 32);\
+	SQUASHFS_SWAP((s)->file_size, d, 224, 32);\
+}
+
+#define SQUASHFS_SWAP_LREG_INODE_HEADER(s, d) {\
+	SQUASHFS_SWAP_START\
+	SQUASHFS_SWAP_BASE_INODE_CORE(s, d, \
+			sizeof(struct squashfs_lreg_inode_header));\
+	SQUASHFS_SWAP((s)->nlink, d, 96, 32);\
+	SQUASHFS_SWAP((s)->start_block, d, 128, 64);\
+	SQUASHFS_SWAP((s)->fragment, d, 192, 32);\
+	SQUASHFS_SWAP((s)->offset, d, 224, 32);\
+	SQUASHFS_SWAP((s)->file_size, d, 256, 64);\
+}
+
+#define SQUASHFS_SWAP_DIR_INODE_HEADER(s, d) {\
+	SQUASHFS_SWAP_START\
+	SQUASHFS_SWAP_BASE_INODE_CORE(s, d, \
+			sizeof(struct squashfs_dir_inode_header));\
+	SQUASHFS_SWAP((s)->nlink, d, 96, 32);\
+	SQUASHFS_SWAP((s)->file_size, d, 128, 19);\
+	SQUASHFS_SWAP((s)->offset, d, 147, 13);\
+	SQUASHFS_SWAP((s)->start_block, d, 160, 32);\
+	SQUASHFS_SWAP((s)->parent_inode, d, 192, 32);\
+}
+
+#define SQUASHFS_SWAP_LDIR_INODE_HEADER(s, d) {\
+	SQUASHFS_SWAP_START\
+	SQUASHFS_SWAP_BASE_INODE_CORE(s, d, \
+			sizeof(struct squashfs_ldir_inode_header));\
+	SQUASHFS_SWAP((s)->nlink, d, 96, 32);\
+	SQUASHFS_SWAP((s)->file_size, d, 128, 27);\
+	SQUASHFS_SWAP((s)->offset, d, 155, 13);\
+	SQUASHFS_SWAP((s)->start_block, d, 168, 32);\
+	SQUASHFS_SWAP((s)->i_count, d, 200, 16);\
+	SQUASHFS_SWAP((s)->parent_inode, d, 216, 32);\
+}
+
+#define SQUASHFS_SWAP_DIR_INDEX(s, d) {\
+	SQUASHFS_SWAP_START\
+	SQUASHFS_MEMSET(s, d, sizeof(struct squashfs_dir_index));\
+	SQUASHFS_SWAP((s)->index, d, 0, 32);\
+	SQUASHFS_SWAP((s)->start_block, d, 32, 32);\
+	SQUASHFS_SWAP((s)->size, d, 64, 8);\
+}
+
+#define SQUASHFS_SWAP_DIR_HEADER(s, d) {\
+	SQUASHFS_SWAP_START\
+	SQUASHFS_MEMSET(s, d, sizeof(struct squashfs_dir_header));\
+	SQUASHFS_SWAP((s)->count, d, 0, 8);\
+	SQUASHFS_SWAP((s)->start_block, d, 8, 32);\
+	SQUASHFS_SWAP((s)->inode_number, d, 40, 32);\
+}
+
+#define SQUASHFS_SWAP_DIR_ENTRY(s, d) {\
+	SQUASHFS_SWAP_START\
+	SQUASHFS_MEMSET(s, d, sizeof(struct squashfs_dir_entry));\
+	SQUASHFS_SWAP((s)->offset, d, 0, 13);\
+	SQUASHFS_SWAP((s)->type, d, 13, 3);\
+	SQUASHFS_SWAP((s)->size, d, 16, 8);\
+	SQUASHFS_SWAP((s)->inode_number, d, 24, 16);\
+}
+
+#define SQUASHFS_SWAP_FRAGMENT_ENTRY(s, d) {\
+	SQUASHFS_SWAP_START\
+	SQUASHFS_MEMSET(s, d, sizeof(struct squashfs_fragment_entry));\
+	SQUASHFS_SWAP((s)->start_block, d, 0, 64);\
+	SQUASHFS_SWAP((s)->size, d, 64, 32);\
+}
+
+#define SQUASHFS_SWAP_INODE_T(s, d) SQUASHFS_SWAP_LONG_LONGS(s, d, 1)
+
+#define SQUASHFS_SWAP_SHORTS(s, d, n) {\
+	int entry;\
+	int bit_position;\
+	SQUASHFS_SWAP_START\
+	SQUASHFS_MEMSET(s, d, n * 2);\
+	for(entry = 0, bit_position = 0; entry < n; entry++, bit_position += \
+			16)\
+		SQUASHFS_SWAP(s[entry], d, bit_position, 16);\
+}
+
+#define SQUASHFS_SWAP_INTS(s, d, n) {\
+	int entry;\
+	int bit_position;\
+	SQUASHFS_SWAP_START\
+	SQUASHFS_MEMSET(s, d, n * 4);\
+	for(entry = 0, bit_position = 0; entry < n; entry++, bit_position += \
+			32)\
+		SQUASHFS_SWAP(s[entry], d, bit_position, 32);\
+}
+
+#define SQUASHFS_SWAP_LONG_LONGS(s, d, n) {\
+	int entry;\
+	int bit_position;\
+	SQUASHFS_SWAP_START\
+	SQUASHFS_MEMSET(s, d, n * 8);\
+	for(entry = 0, bit_position = 0; entry < n; entry++, bit_position += \
+			64)\
+		SQUASHFS_SWAP(s[entry], d, bit_position, 64);\
+}
+
+#define SQUASHFS_SWAP_DATA(s, d, n, bits) {\
+	int entry;\
+	int bit_position;\
+	SQUASHFS_SWAP_START\
+	SQUASHFS_MEMSET(s, d, n * bits / 8);\
+	for(entry = 0, bit_position = 0; entry < n; entry++, bit_position += \
+			bits)\
+		SQUASHFS_SWAP(s[entry], d, bit_position, bits);\
+}
+
+#define SQUASHFS_SWAP_FRAGMENT_INDEXES(s, d, n) SQUASHFS_SWAP_LONG_LONGS(s, d, n)
+#define SQUASHFS_SWAP_LOOKUP_BLOCKS(s, d, n) SQUASHFS_SWAP_LONG_LONGS(s, d, n)
+
+#ifdef CONFIG_SQUASHFS_1_0_COMPATIBILITY
+
+struct squashfs_base_inode_header_1 {
+	unsigned int		inode_type:4;
+	unsigned int		mode:12; /* protection */
+	unsigned int		uid:4; /* index into uid table */
+	unsigned int		guid:4; /* index into guid table */
+} __attribute__ ((packed));
+
+struct squashfs_ipc_inode_header_1 {
+	unsigned int		inode_type:4;
+	unsigned int		mode:12; /* protection */
+	unsigned int		uid:4; /* index into uid table */
+	unsigned int		guid:4; /* index into guid table */
+	unsigned int		type:4;
+	unsigned int		offset:4;
+} __attribute__ ((packed));
+
+struct squashfs_dev_inode_header_1 {
+	unsigned int		inode_type:4;
+	unsigned int		mode:12; /* protection */
+	unsigned int		uid:4; /* index into uid table */
+	unsigned int		guid:4; /* index into guid table */
+	unsigned short		rdev;
+} __attribute__ ((packed));
+	
+struct squashfs_symlink_inode_header_1 {
+	unsigned int		inode_type:4;
+	unsigned int		mode:12; /* protection */
+	unsigned int		uid:4; /* index into uid table */
+	unsigned int		guid:4; /* index into guid table */
+	unsigned short		symlink_size;
+	char			symlink[0];
+} __attribute__ ((packed));
+
+struct squashfs_reg_inode_header_1 {
+	unsigned int		inode_type:4;
+	unsigned int		mode:12; /* protection */
+	unsigned int		uid:4; /* index into uid table */
+	unsigned int		guid:4; /* index into guid table */
+	unsigned int		mtime;
+	unsigned int		start_block;
+	unsigned int		file_size:32;
+	unsigned short		block_list[0];
+} __attribute__ ((packed));
+
+struct squashfs_dir_inode_header_1 {
+	unsigned int		inode_type:4;
+	unsigned int		mode:12; /* protection */
+	unsigned int		uid:4; /* index into uid table */
+	unsigned int		guid:4; /* index into guid table */
+	unsigned int		file_size:19;
+	unsigned int		offset:13;
+	unsigned int		mtime;
+	unsigned int		start_block:24;
+} __attribute__  ((packed));
+
+#define SQUASHFS_SWAP_BASE_INODE_CORE_1(s, d, n) \
+	SQUASHFS_MEMSET(s, d, n);\
+	SQUASHFS_SWAP((s)->inode_type, d, 0, 4);\
+	SQUASHFS_SWAP((s)->mode, d, 4, 12);\
+	SQUASHFS_SWAP((s)->uid, d, 16, 4);\
+	SQUASHFS_SWAP((s)->guid, d, 20, 4);
+
+#define SQUASHFS_SWAP_BASE_INODE_HEADER_1(s, d, n) {\
+	SQUASHFS_SWAP_START\
+	SQUASHFS_SWAP_BASE_INODE_CORE_1(s, d, n)\
+}
+
+#define SQUASHFS_SWAP_IPC_INODE_HEADER_1(s, d) {\
+	SQUASHFS_SWAP_START\
+	SQUASHFS_SWAP_BASE_INODE_CORE_1(s, d, \
+			sizeof(struct squashfs_ipc_inode_header_1));\
+	SQUASHFS_SWAP((s)->type, d, 24, 4);\
+	SQUASHFS_SWAP((s)->offset, d, 28, 4);\
+}
+
+#define SQUASHFS_SWAP_DEV_INODE_HEADER_1(s, d) {\
+	SQUASHFS_SWAP_START\
+	SQUASHFS_SWAP_BASE_INODE_CORE_1(s, d, \
+			sizeof(struct squashfs_dev_inode_header_1));\
+	SQUASHFS_SWAP((s)->rdev, d, 24, 16);\
+}
+
+#define SQUASHFS_SWAP_SYMLINK_INODE_HEADER_1(s, d) {\
+	SQUASHFS_SWAP_START\
+	SQUASHFS_SWAP_BASE_INODE_CORE_1(s, d, \
+			sizeof(struct squashfs_symlink_inode_header_1));\
+	SQUASHFS_SWAP((s)->symlink_size, d, 24, 16);\
+}
+
+#define SQUASHFS_SWAP_REG_INODE_HEADER_1(s, d) {\
+	SQUASHFS_SWAP_START\
+	SQUASHFS_SWAP_BASE_INODE_CORE_1(s, d, \
+			sizeof(struct squashfs_reg_inode_header_1));\
+	SQUASHFS_SWAP((s)->mtime, d, 24, 32);\
+	SQUASHFS_SWAP((s)->start_block, d, 56, 32);\
+	SQUASHFS_SWAP((s)->file_size, d, 88, 32);\
+}
+
+#define SQUASHFS_SWAP_DIR_INODE_HEADER_1(s, d) {\
+	SQUASHFS_SWAP_START\
+	SQUASHFS_SWAP_BASE_INODE_CORE_1(s, d, \
+			sizeof(struct squashfs_dir_inode_header_1));\
+	SQUASHFS_SWAP((s)->file_size, d, 24, 19);\
+	SQUASHFS_SWAP((s)->offset, d, 43, 13);\
+	SQUASHFS_SWAP((s)->mtime, d, 56, 32);\
+	SQUASHFS_SWAP((s)->start_block, d, 88, 24);\
+}
+
+#endif
+
+#ifdef CONFIG_SQUASHFS_2_0_COMPATIBILITY
+
+struct squashfs_dir_index_2 {
+	unsigned int		index:27;
+	unsigned int		start_block:29;
+	unsigned char		size;
+	unsigned char		name[0];
+} __attribute__ ((packed));
+
+struct squashfs_base_inode_header_2 {
+	unsigned int		inode_type:4;
+	unsigned int		mode:12; /* protection */
+	unsigned int		uid:8; /* index into uid table */
+	unsigned int		guid:8; /* index into guid table */
+} __attribute__ ((packed));
+
+struct squashfs_ipc_inode_header_2 {
+	unsigned int		inode_type:4;
+	unsigned int		mode:12; /* protection */
+	unsigned int		uid:8; /* index into uid table */
+	unsigned int		guid:8; /* index into guid table */
+} __attribute__ ((packed));
+
+struct squashfs_dev_inode_header_2 {
+	unsigned int		inode_type:4;
+	unsigned int		mode:12; /* protection */
+	unsigned int		uid:8; /* index into uid table */
+	unsigned int		guid:8; /* index into guid table */
+	unsigned short		rdev;
+} __attribute__ ((packed));
+	
+struct squashfs_symlink_inode_header_2 {
+	unsigned int		inode_type:4;
+	unsigned int		mode:12; /* protection */
+	unsigned int		uid:8; /* index into uid table */
+	unsigned int		guid:8; /* index into guid table */
+	unsigned short		symlink_size;
+	char			symlink[0];
+} __attribute__ ((packed));
+
+struct squashfs_reg_inode_header_2 {
+	unsigned int		inode_type:4;
+	unsigned int		mode:12; /* protection */
+	unsigned int		uid:8; /* index into uid table */
+	unsigned int		guid:8; /* index into guid table */
+	unsigned int		mtime;
+	unsigned int		start_block;
+	unsigned int		fragment;
+	unsigned int		offset;
+	unsigned int		file_size:32;
+	unsigned short		block_list[0];
+} __attribute__ ((packed));
+
+struct squashfs_dir_inode_header_2 {
+	unsigned int		inode_type:4;
+	unsigned int		mode:12; /* protection */
+	unsigned int		uid:8; /* index into uid table */
+	unsigned int		guid:8; /* index into guid table */
+	unsigned int		file_size:19;
+	unsigned int		offset:13;
+	unsigned int		mtime;
+	unsigned int		start_block:24;
+} __attribute__  ((packed));
+
+struct squashfs_ldir_inode_header_2 {
+	unsigned int		inode_type:4;
+	unsigned int		mode:12; /* protection */
+	unsigned int		uid:8; /* index into uid table */
+	unsigned int		guid:8; /* index into guid table */
+	unsigned int		file_size:27;
+	unsigned int		offset:13;
+	unsigned int		mtime;
+	unsigned int		start_block:24;
+	unsigned int		i_count:16;
+	struct squashfs_dir_index_2	index[0];
+} __attribute__  ((packed));
+
+union squashfs_inode_header_2 {
+	struct squashfs_base_inode_header_2	base;
+	struct squashfs_dev_inode_header_2	dev;
+	struct squashfs_symlink_inode_header_2	symlink;
+	struct squashfs_reg_inode_header_2	reg;
+	struct squashfs_dir_inode_header_2	dir;
+	struct squashfs_ldir_inode_header_2	ldir;
+	struct squashfs_ipc_inode_header_2	ipc;
+};
+	
+struct squashfs_dir_header_2 {
+	unsigned int		count:8;
+	unsigned int		start_block:24;
+} __attribute__ ((packed));
+
+struct squashfs_dir_entry_2 {
+	unsigned int		offset:13;
+	unsigned int		type:3;
+	unsigned int		size:8;
+	char			name[0];
+} __attribute__ ((packed));
+
+struct squashfs_fragment_entry_2 {
+	unsigned int		start_block;
+	unsigned int		size;
+} __attribute__ ((packed));
+
+#define SQUASHFS_SWAP_BASE_INODE_CORE_2(s, d, n)\
+	SQUASHFS_MEMSET(s, d, n);\
+	SQUASHFS_SWAP((s)->inode_type, d, 0, 4);\
+	SQUASHFS_SWAP((s)->mode, d, 4, 12);\
+	SQUASHFS_SWAP((s)->uid, d, 16, 8);\
+	SQUASHFS_SWAP((s)->guid, d, 24, 8);\
+
+#define SQUASHFS_SWAP_BASE_INODE_HEADER_2(s, d, n) {\
+	SQUASHFS_SWAP_START\
+	SQUASHFS_SWAP_BASE_INODE_CORE_2(s, d, n)\
+}
+
+#define SQUASHFS_SWAP_IPC_INODE_HEADER_2(s, d) \
+	SQUASHFS_SWAP_BASE_INODE_HEADER_2(s, d, sizeof(struct squashfs_ipc_inode_header_2))
+
+#define SQUASHFS_SWAP_DEV_INODE_HEADER_2(s, d) {\
+	SQUASHFS_SWAP_START\
+	SQUASHFS_SWAP_BASE_INODE_CORE_2(s, d, \
+			sizeof(struct squashfs_dev_inode_header_2)); \
+	SQUASHFS_SWAP((s)->rdev, d, 32, 16);\
+}
+
+#define SQUASHFS_SWAP_SYMLINK_INODE_HEADER_2(s, d) {\
+	SQUASHFS_SWAP_START\
+	SQUASHFS_SWAP_BASE_INODE_CORE_2(s, d, \
+			sizeof(struct squashfs_symlink_inode_header_2));\
+	SQUASHFS_SWAP((s)->symlink_size, d, 32, 16);\
+}
+
+#define SQUASHFS_SWAP_REG_INODE_HEADER_2(s, d) {\
+	SQUASHFS_SWAP_START\
+	SQUASHFS_SWAP_BASE_INODE_CORE_2(s, d, \
+			sizeof(struct squashfs_reg_inode_header_2));\
+	SQUASHFS_SWAP((s)->mtime, d, 32, 32);\
+	SQUASHFS_SWAP((s)->start_block, d, 64, 32);\
+	SQUASHFS_SWAP((s)->fragment, d, 96, 32);\
+	SQUASHFS_SWAP((s)->offset, d, 128, 32);\
+	SQUASHFS_SWAP((s)->file_size, d, 160, 32);\
+}
+
+#define SQUASHFS_SWAP_DIR_INODE_HEADER_2(s, d) {\
+	SQUASHFS_SWAP_START\
+	SQUASHFS_SWAP_BASE_INODE_CORE_2(s, d, \
+			sizeof(struct squashfs_dir_inode_header_2));\
+	SQUASHFS_SWAP((s)->file_size, d, 32, 19);\
+	SQUASHFS_SWAP((s)->offset, d, 51, 13);\
+	SQUASHFS_SWAP((s)->mtime, d, 64, 32);\
+	SQUASHFS_SWAP((s)->start_block, d, 96, 24);\
+}
+
+#define SQUASHFS_SWAP_LDIR_INODE_HEADER_2(s, d) {\
+	SQUASHFS_SWAP_START\
+	SQUASHFS_SWAP_BASE_INODE_CORE_2(s, d, \
+			sizeof(struct squashfs_ldir_inode_header_2));\
+	SQUASHFS_SWAP((s)->file_size, d, 32, 27);\
+	SQUASHFS_SWAP((s)->offset, d, 59, 13);\
+	SQUASHFS_SWAP((s)->mtime, d, 72, 32);\
+	SQUASHFS_SWAP((s)->start_block, d, 104, 24);\
+	SQUASHFS_SWAP((s)->i_count, d, 128, 16);\
+}
+
+#define SQUASHFS_SWAP_DIR_INDEX_2(s, d) {\
+	SQUASHFS_SWAP_START\
+	SQUASHFS_MEMSET(s, d, sizeof(struct squashfs_dir_index_2));\
+	SQUASHFS_SWAP((s)->index, d, 0, 27);\
+	SQUASHFS_SWAP((s)->start_block, d, 27, 29);\
+	SQUASHFS_SWAP((s)->size, d, 56, 8);\
+}
+#define SQUASHFS_SWAP_DIR_HEADER_2(s, d) {\
+	SQUASHFS_SWAP_START\
+	SQUASHFS_MEMSET(s, d, sizeof(struct squashfs_dir_header_2));\
+	SQUASHFS_SWAP((s)->count, d, 0, 8);\
+	SQUASHFS_SWAP((s)->start_block, d, 8, 24);\
+}
+
+#define SQUASHFS_SWAP_DIR_ENTRY_2(s, d) {\
+	SQUASHFS_SWAP_START\
+	SQUASHFS_MEMSET(s, d, sizeof(struct squashfs_dir_entry_2));\
+	SQUASHFS_SWAP((s)->offset, d, 0, 13);\
+	SQUASHFS_SWAP((s)->type, d, 13, 3);\
+	SQUASHFS_SWAP((s)->size, d, 16, 8);\
+}
+
+#define SQUASHFS_SWAP_FRAGMENT_ENTRY_2(s, d) {\
+	SQUASHFS_SWAP_START\
+	SQUASHFS_MEMSET(s, d, sizeof(struct squashfs_fragment_entry_2));\
+	SQUASHFS_SWAP((s)->start_block, d, 0, 32);\
+	SQUASHFS_SWAP((s)->size, d, 32, 32);\
+}
+
+#define SQUASHFS_SWAP_FRAGMENT_INDEXES_2(s, d, n) SQUASHFS_SWAP_INTS(s, d, n)
+
+/* fragment and fragment table defines */
+#define SQUASHFS_FRAGMENT_BYTES_2(A)	(A * sizeof(struct squashfs_fragment_entry_2))
+
+#define SQUASHFS_FRAGMENT_INDEX_2(A)	(SQUASHFS_FRAGMENT_BYTES_2(A) / \
+					SQUASHFS_METADATA_SIZE)
+
+#define SQUASHFS_FRAGMENT_INDEX_OFFSET_2(A)	(SQUASHFS_FRAGMENT_BYTES_2(A) % \
+						SQUASHFS_METADATA_SIZE)
+
+#define SQUASHFS_FRAGMENT_INDEXES_2(A)	((SQUASHFS_FRAGMENT_BYTES_2(A) + \
+					SQUASHFS_METADATA_SIZE - 1) / \
+					SQUASHFS_METADATA_SIZE)
+
+#define SQUASHFS_FRAGMENT_INDEX_BYTES_2(A)	(SQUASHFS_FRAGMENT_INDEXES_2(A) *\
+						sizeof(int))
+
+#endif
+
+#ifdef __KERNEL__
+
+/*
+ * macros used to swap each structure entry, taking into account
+ * bitfields and different bitfield placing conventions on differing
+ * architectures
+ */
+
+#include <asm/byteorder.h>
+
+#ifdef __BIG_ENDIAN
+	/* convert from little endian to big endian */
+#define SQUASHFS_SWAP(value, p, pos, tbits) _SQUASHFS_SWAP(value, p, pos, \
+		tbits, b_pos)
+#else
+	/* convert from big endian to little endian */ 
+#define SQUASHFS_SWAP(value, p, pos, tbits) _SQUASHFS_SWAP(value, p, pos, \
+		tbits, 64 - tbits - b_pos)
+#endif
+
+#define _SQUASHFS_SWAP(value, p, pos, tbits, SHIFT) {\
+	b_pos = pos % 8;\
+	val = 0;\
+	s = (unsigned char *)p + (pos / 8);\
+	d = ((unsigned char *) &val) + 7;\
+	for(bits = 0; bits < (tbits + b_pos); bits += 8) \
+		*d-- = *s++;\
+	value = (val >> (SHIFT))/* & ((1 << tbits) - 1)*/;\
+}
+
+#define SQUASHFS_MEMSET(s, d, n)	memset(s, 0, n);
+
+#endif
+#endif
--- /dev/null	2011-06-03 14:51:38.633053002 +0200
+++ linux-2.6.20.14-fbx/include/linux/squashfs_fs_i.h	2010-12-29 19:30:08.751517111 +0100
@@ -0,0 +1,45 @@
+#ifndef SQUASHFS_FS_I
+#define SQUASHFS_FS_I
+/*
+ * Squashfs
+ *
+ * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007
+ * Phillip Lougher <phillip@lougher.org.uk>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2,
+ * or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * squashfs_fs_i.h
+ */
+
+struct squashfs_inode_info {
+	long long	start_block;
+	unsigned int	offset;
+	union {
+		struct {
+			long long	fragment_start_block;
+			unsigned int	fragment_size;
+			unsigned int	fragment_offset;
+			long long	block_list_start;
+		} s1;
+		struct {
+			long long	directory_index_start;
+			unsigned int	directory_index_offset;
+			unsigned int	directory_index_count;
+			unsigned int	parent_inode;
+		} s2;
+	} u;
+	struct inode	vfs_inode;
+};
+#endif
--- /dev/null	2011-06-03 14:51:38.633053002 +0200
+++ linux-2.6.20.14-fbx/include/linux/squashfs_fs_sb.h	2010-12-29 19:30:08.751517111 +0100
@@ -0,0 +1,74 @@
+#ifndef SQUASHFS_FS_SB
+#define SQUASHFS_FS_SB
+/*
+ * Squashfs
+ *
+ * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007
+ * Phillip Lougher <phillip@lougher.org.uk>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2,
+ * or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * squashfs_fs_sb.h
+ */
+
+#include <linux/squashfs_fs.h>
+#include "sqlzma.h"
+
+struct squashfs_cache {
+	long long	block;
+	int		length;
+	long long	next_index;
+	char		*data;
+};
+
+struct squashfs_fragment_cache {
+	long long	block;
+	int		length;
+	unsigned int	locked;
+	char		*data;
+};
+
+struct squashfs_sb_info {
+	struct squashfs_super_block	sblk;
+	int			devblksize;
+	int			devblksize_log2;
+	int			swap;
+	struct squashfs_cache	*block_cache;
+	struct squashfs_fragment_cache	*fragment;
+	int			next_cache;
+	int			next_fragment;
+	int			next_meta_index;
+	unsigned int		*uid;
+	unsigned int		*guid;
+	long long		*fragment_index;
+	unsigned int		*fragment_index_2;
+	char			*read_page;
+	//struct mutex		read_data_mutex;
+	struct mutex		read_page_mutex;
+	struct mutex		block_cache_mutex;
+	struct mutex		fragment_mutex;
+	struct mutex		meta_index_mutex;
+	wait_queue_head_t	waitq;
+	wait_queue_head_t	fragment_wait_queue;
+	struct meta_index	*meta_index;
+	long long		*inode_lookup_table;
+	int			(*read_inode)(struct inode *i,  squashfs_inode_t \
+				inode);
+	long long		(*read_blocklist)(struct inode *inode, int \
+				index, int readahead_blks, char *block_list, \
+				unsigned short **block_p, unsigned int *bsize);
+	int			(*read_fragment_index_table)(struct super_block *s);
+};
+#endif
--- /dev/null	2011-06-03 14:51:38.633053002 +0200
+++ linux-2.6.20.14-fbx/lib/builtin-fbxserial.c	2010-12-29 19:30:08.851442681 +0100
@@ -0,0 +1,131 @@
+/*
+ * builtin-fbxserialinfo.c for linux-freebox
+ * Created by <nschichan@freebox.fr> on Thu Feb  1 19:12:08 2007
+ * Freebox SA
+ *
+ * Licence GPL: see COPYING for details.
+ */
+
+/*
+ * this file contains fbxserialinfo related function that need to
+ * built-in in the kernel.
+ */
+
+#include <linux/compiler.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/vmalloc.h>
+#include <linux/crc32.h>
+
+#include <asm/io.h>
+
+#include <linux/fbxserial.h>
+#include <linux/fbxserialinfo.h>
+
+#define PFX "builtin-fbxserial: "
+
+static void __init
+fbxserialinfo_use_default(struct fbx_serial *serial)
+{
+	printk(KERN_WARNING PFX "warning: using default serial infos\n");
+	fbxserial_set_default(serial);
+}
+
+/*
+ * add trailing 0 for bundle string here.
+ */
+static void __init
+bundle_fixup(struct fbx_serial *serial)
+{
+	struct fbx_serial_extinfo *p;
+	int i;
+
+	for (i = 0; i < be32_to_cpu(serial->extinfo_count); i++) {
+
+		if (i >= EXTINFO_MAX_COUNT)
+			break;
+
+		p = &serial->extinfos[i];
+		if (be32_to_cpu(p->type) == EXTINFO_TYPE_EXTDEV &&
+		    be32_to_cpu(p->u.extdev.type) == EXTDEV_TYPE_BUNDLE) {
+			int size;
+
+			size = sizeof (p->u.extdev.serial);
+			p->u.extdev.serial[size - 1] = 0;
+		}
+	}
+}
+
+/*
+ * called from  arch code early  in the boot sequence.   This function
+ * returns 1  in case serial infos are  invalid/unreadable and default
+ * values have been used.
+ */
+int __init
+fbxserialinfo_read(void *data, struct fbx_serial *out)
+{
+	uint32_t sum;
+
+	/*
+	 * get partial serial data from flash/whatever.
+	 */
+	memcpy(out, data, sizeof (*out));
+
+	/* check magic first */
+	if (be32_to_cpu(out->magic) != FBXSERIAL_MAGIC) {
+		printk(KERN_NOTICE PFX "invalid magic (%08x, expected %08x), "
+			"using defaults !\n", be32_to_cpu(out->magic),
+		       FBXSERIAL_MAGIC);
+		goto out_default;
+	}
+
+	/* fetch size for which we have to check CRC */
+	if (be32_to_cpu(out->len) > FBXSERIAL_MAX_SIZE) {
+		printk(KERN_NOTICE PFX "structure size too big (%d), "
+		       "using defaults !\n", be32_to_cpu(out->len));
+		goto out_default;
+	}
+
+	/* compute and check checksum */
+	sum = crc32(0, data + 4, be32_to_cpu(out->len) - 4);
+
+	if (be32_to_cpu(out->crc32) != sum) {
+		printk(KERN_NOTICE PFX "invalid checksum (%08x, "
+		       "expected %08x), using defaults !\n", sum,
+		       be32_to_cpu(out->crc32));
+		goto out_default;
+	}
+
+	printk(KERN_INFO PFX "Found valid serial infos !\n");
+	bundle_fixup(out);
+	return 0;
+
+ out_default:
+	fbxserialinfo_use_default(out);
+	bundle_fixup(out);
+	return 1;
+}
+
+void
+fbxserialinfo_get_random(unsigned char *data, unsigned int len)
+{
+	const struct fbx_serial *s;
+
+	s = arch_get_serial();
+
+	if (len > sizeof (s->random_data))
+		len = sizeof (s->random_data);
+
+	memcpy(data, s->random_data, len);
+}
+EXPORT_SYMBOL(fbxserialinfo_get_random);
+
+void
+fbxserialinfo_get_mac_addr(unsigned char *data)
+{
+	const struct fbx_serial *s;
+
+	s = arch_get_serial();
+	memcpy(data, s->mac_addr_base, MAC_ADDR_SIZE);
+}
+EXPORT_SYMBOL(fbxserialinfo_get_mac_addr);
--- /dev/null	2011-06-03 14:51:38.633053002 +0200
+++ linux-2.6.20.14-fbx/lib/LzmaDecode.c	2010-12-29 19:30:08.851442681 +0100
@@ -0,0 +1,584 @@
+/*
+  LzmaDecode.c
+  LZMA Decoder (optimized for Speed version)
+  
+  LZMA SDK 4.40 Copyright (c) 1999-2006 Igor Pavlov (2006-05-01)
+  http://www.7-zip.org/
+
+  LZMA SDK is licensed under two licenses:
+  1) GNU Lesser General Public License (GNU LGPL)
+  2) Common Public License (CPL)
+  It means that you can select one of these two licenses and 
+  follow rules of that license.
+
+  SPECIAL EXCEPTION:
+  Igor Pavlov, as the author of this Code, expressly permits you to 
+  statically or dynamically link your Code (or bind by name) to the 
+  interfaces of this file without subjecting your linked Code to the 
+  terms of the CPL or GNU LGPL. Any modifications or additions 
+  to this file, however, are subject to the LGPL or CPL terms.
+*/
+
+#include "LzmaDecode.h"
+
+#define kNumTopBits 24
+#define kTopValue ((UInt32)1 << kNumTopBits)
+
+#define kNumBitModelTotalBits 11
+#define kBitModelTotal (1 << kNumBitModelTotalBits)
+#define kNumMoveBits 5
+
+#define RC_READ_BYTE (*Buffer++)
+
+#define RC_INIT2 Code = 0; Range = 0xFFFFFFFF; \
+  { int i; for(i = 0; i < 5; i++) { RC_TEST; Code = (Code << 8) | RC_READ_BYTE; }}
+
+#ifdef _LZMA_IN_CB
+
+#define RC_TEST { if (Buffer == BufferLim) \
+  { SizeT size; int result = InCallback->Read(InCallback, &Buffer, &size); if (result != LZMA_RESULT_OK) return result; \
+  BufferLim = Buffer + size; if (size == 0) return LZMA_RESULT_DATA_ERROR; }}
+
+#define RC_INIT Buffer = BufferLim = 0; RC_INIT2
+
+#else
+
+#define RC_TEST { if (Buffer == BufferLim) return LZMA_RESULT_DATA_ERROR; }
+
+#define RC_INIT(buffer, bufferSize) Buffer = buffer; BufferLim = buffer + bufferSize; RC_INIT2
+ 
+#endif
+
+#define RC_NORMALIZE if (Range < kTopValue) { RC_TEST; Range <<= 8; Code = (Code << 8) | RC_READ_BYTE; }
+
+#define IfBit0(p) RC_NORMALIZE; bound = (Range >> kNumBitModelTotalBits) * *(p); if (Code < bound)
+#define UpdateBit0(p) Range = bound; *(p) += (kBitModelTotal - *(p)) >> kNumMoveBits;
+#define UpdateBit1(p) Range -= bound; Code -= bound; *(p) -= (*(p)) >> kNumMoveBits;
+
+#define RC_GET_BIT2(p, mi, A0, A1) IfBit0(p) \
+  { UpdateBit0(p); mi <<= 1; A0; } else \
+  { UpdateBit1(p); mi = (mi + mi) + 1; A1; } 
+  
+#define RC_GET_BIT(p, mi) RC_GET_BIT2(p, mi, ; , ;)               
+
+#define RangeDecoderBitTreeDecode(probs, numLevels, res) \
+  { int i = numLevels; res = 1; \
+  do { CProb *p = probs + res; RC_GET_BIT(p, res) } while(--i != 0); \
+  res -= (1 << numLevels); }
+
+
+#define kNumPosBitsMax 4
+#define kNumPosStatesMax (1 << kNumPosBitsMax)
+
+#define kLenNumLowBits 3
+#define kLenNumLowSymbols (1 << kLenNumLowBits)
+#define kLenNumMidBits 3
+#define kLenNumMidSymbols (1 << kLenNumMidBits)
+#define kLenNumHighBits 8
+#define kLenNumHighSymbols (1 << kLenNumHighBits)
+
+#define LenChoice 0
+#define LenChoice2 (LenChoice + 1)
+#define LenLow (LenChoice2 + 1)
+#define LenMid (LenLow + (kNumPosStatesMax << kLenNumLowBits))
+#define LenHigh (LenMid + (kNumPosStatesMax << kLenNumMidBits))
+#define kNumLenProbs (LenHigh + kLenNumHighSymbols) 
+
+
+#define kNumStates 12
+#define kNumLitStates 7
+
+#define kStartPosModelIndex 4
+#define kEndPosModelIndex 14
+#define kNumFullDistances (1 << (kEndPosModelIndex >> 1))
+
+#define kNumPosSlotBits 6
+#define kNumLenToPosStates 4
+
+#define kNumAlignBits 4
+#define kAlignTableSize (1 << kNumAlignBits)
+
+#define kMatchMinLen 2
+
+#define IsMatch 0
+#define IsRep (IsMatch + (kNumStates << kNumPosBitsMax))
+#define IsRepG0 (IsRep + kNumStates)
+#define IsRepG1 (IsRepG0 + kNumStates)
+#define IsRepG2 (IsRepG1 + kNumStates)
+#define IsRep0Long (IsRepG2 + kNumStates)
+#define PosSlot (IsRep0Long + (kNumStates << kNumPosBitsMax))
+#define SpecPos (PosSlot + (kNumLenToPosStates << kNumPosSlotBits))
+#define Align (SpecPos + kNumFullDistances - kEndPosModelIndex)
+#define LenCoder (Align + kAlignTableSize)
+#define RepLenCoder (LenCoder + kNumLenProbs)
+#define Literal (RepLenCoder + kNumLenProbs)
+
+#if Literal != LZMA_BASE_SIZE
+StopCompilingDueBUG
+#endif
+
+int LzmaDecodeProperties(CLzmaProperties *propsRes, const unsigned char *propsData, int size)
+{
+  unsigned char prop0;
+  if (size < LZMA_PROPERTIES_SIZE)
+    return LZMA_RESULT_DATA_ERROR;
+  prop0 = propsData[0];
+  if (prop0 >= (9 * 5 * 5))
+    return LZMA_RESULT_DATA_ERROR;
+  {
+    for (propsRes->pb = 0; prop0 >= (9 * 5); propsRes->pb++, prop0 -= (9 * 5));
+    for (propsRes->lp = 0; prop0 >= 9; propsRes->lp++, prop0 -= 9);
+    propsRes->lc = prop0;
+    /*
+    unsigned char remainder = (unsigned char)(prop0 / 9);
+    propsRes->lc = prop0 % 9;
+    propsRes->pb = remainder / 5;
+    propsRes->lp = remainder % 5;
+    */
+  }
+
+  #ifdef _LZMA_OUT_READ
+  {
+    int i;
+    propsRes->DictionarySize = 0;
+    for (i = 0; i < 4; i++)
+      propsRes->DictionarySize += (UInt32)(propsData[1 + i]) << (i * 8);
+    if (propsRes->DictionarySize == 0)
+      propsRes->DictionarySize = 1;
+  }
+  #endif
+  return LZMA_RESULT_OK;
+}
+
+#define kLzmaStreamWasFinishedId (-1)
+
+int LzmaDecode(CLzmaDecoderState *vs,
+    #ifdef _LZMA_IN_CB
+    ILzmaInCallback *InCallback,
+    #else
+    const unsigned char *inStream, SizeT inSize, SizeT *inSizeProcessed,
+    #endif
+    unsigned char *outStream, SizeT outSize, SizeT *outSizeProcessed)
+{
+  CProb *p = vs->Probs;
+  SizeT nowPos = 0;
+  Byte previousByte = 0;
+  UInt32 posStateMask = (1 << (vs->Properties.pb)) - 1;
+  UInt32 literalPosMask = (1 << (vs->Properties.lp)) - 1;
+  int lc = vs->Properties.lc;
+
+  #ifdef _LZMA_OUT_READ
+  
+  UInt32 Range = vs->Range;
+  UInt32 Code = vs->Code;
+  #ifdef _LZMA_IN_CB
+  const Byte *Buffer = vs->Buffer;
+  const Byte *BufferLim = vs->BufferLim;
+  #else
+  const Byte *Buffer = inStream;
+  const Byte *BufferLim = inStream + inSize;
+  #endif
+  int state = vs->State;
+  UInt32 rep0 = vs->Reps[0], rep1 = vs->Reps[1], rep2 = vs->Reps[2], rep3 = vs->Reps[3];
+  int len = vs->RemainLen;
+  UInt32 globalPos = vs->GlobalPos;
+  UInt32 distanceLimit = vs->DistanceLimit;
+
+  Byte *dictionary = vs->Dictionary;
+  UInt32 dictionarySize = vs->Properties.DictionarySize;
+  UInt32 dictionaryPos = vs->DictionaryPos;
+
+  Byte tempDictionary[4];
+
+  #ifndef _LZMA_IN_CB
+  *inSizeProcessed = 0;
+  #endif
+  *outSizeProcessed = 0;
+  if (len == kLzmaStreamWasFinishedId)
+    return LZMA_RESULT_OK;
+
+  if (dictionarySize == 0)
+  {
+    dictionary = tempDictionary;
+    dictionarySize = 1;
+    tempDictionary[0] = vs->TempDictionary[0];
+  }
+
+  if (len == kLzmaNeedInitId)
+  {
+    {
+      UInt32 numProbs = Literal + ((UInt32)LZMA_LIT_SIZE << (lc + vs->Properties.lp));
+      UInt32 i;
+      for (i = 0; i < numProbs; i++)
+        p[i] = kBitModelTotal >> 1; 
+      rep0 = rep1 = rep2 = rep3 = 1;
+      state = 0;
+      globalPos = 0;
+      distanceLimit = 0;
+      dictionaryPos = 0;
+      dictionary[dictionarySize - 1] = 0;
+      #ifdef _LZMA_IN_CB
+      RC_INIT;
+      #else
+      RC_INIT(inStream, inSize);
+      #endif
+    }
+    len = 0;
+  }
+  while(len != 0 && nowPos < outSize)
+  {
+    UInt32 pos = dictionaryPos - rep0;
+    if (pos >= dictionarySize)
+      pos += dictionarySize;
+    outStream[nowPos++] = dictionary[dictionaryPos] = dictionary[pos];
+    if (++dictionaryPos == dictionarySize)
+      dictionaryPos = 0;
+    len--;
+  }
+  if (dictionaryPos == 0)
+    previousByte = dictionary[dictionarySize - 1];
+  else
+    previousByte = dictionary[dictionaryPos - 1];
+
+  #else /* if !_LZMA_OUT_READ */
+
+  int state = 0;
+  UInt32 rep0 = 1, rep1 = 1, rep2 = 1, rep3 = 1;
+  int len = 0;
+  const Byte *Buffer;
+  const Byte *BufferLim;
+  UInt32 Range;
+  UInt32 Code;
+
+  #ifndef _LZMA_IN_CB
+  *inSizeProcessed = 0;
+  #endif
+  *outSizeProcessed = 0;
+
+  {
+    UInt32 i;
+    UInt32 numProbs = Literal + ((UInt32)LZMA_LIT_SIZE << (lc + vs->Properties.lp));
+    for (i = 0; i < numProbs; i++)
+      p[i] = kBitModelTotal >> 1;
+  }
+  
+  #ifdef _LZMA_IN_CB
+  RC_INIT;
+  #else
+  RC_INIT(inStream, inSize);
+  #endif
+
+  #endif /* _LZMA_OUT_READ */
+
+  while(nowPos < outSize)
+  {
+    CProb *prob;
+    UInt32 bound;
+    int posState = (int)(
+        (nowPos 
+        #ifdef _LZMA_OUT_READ
+        + globalPos
+        #endif
+        )
+        & posStateMask);
+
+    prob = p + IsMatch + (state << kNumPosBitsMax) + posState;
+    IfBit0(prob)
+    {
+      int symbol = 1;
+      UpdateBit0(prob)
+      prob = p + Literal + (LZMA_LIT_SIZE * 
+        (((
+        (nowPos 
+        #ifdef _LZMA_OUT_READ
+        + globalPos
+        #endif
+        )
+        & literalPosMask) << lc) + (previousByte >> (8 - lc))));
+
+      if (state >= kNumLitStates)
+      {
+        int matchByte;
+        #ifdef _LZMA_OUT_READ
+        UInt32 pos = dictionaryPos - rep0;
+        if (pos >= dictionarySize)
+          pos += dictionarySize;
+        matchByte = dictionary[pos];
+        #else
+        matchByte = outStream[nowPos - rep0];
+        #endif
+        do
+        {
+          int bit;
+          CProb *probLit;
+          matchByte <<= 1;
+          bit = (matchByte & 0x100);
+          probLit = prob + 0x100 + bit + symbol;
+          RC_GET_BIT2(probLit, symbol, if (bit != 0) break, if (bit == 0) break)
+        }
+        while (symbol < 0x100);
+      }
+      while (symbol < 0x100)
+      {
+        CProb *probLit = prob + symbol;
+        RC_GET_BIT(probLit, symbol)
+      }
+      previousByte = (Byte)symbol;
+
+      outStream[nowPos++] = previousByte;
+      #ifdef _LZMA_OUT_READ
+      if (distanceLimit < dictionarySize)
+        distanceLimit++;
+
+      dictionary[dictionaryPos] = previousByte;
+      if (++dictionaryPos == dictionarySize)
+        dictionaryPos = 0;
+      #endif
+      if (state < 4) state = 0;
+      else if (state < 10) state -= 3;
+      else state -= 6;
+    }
+    else             
+    {
+      UpdateBit1(prob);
+      prob = p + IsRep + state;
+      IfBit0(prob)
+      {
+        UpdateBit0(prob);
+        rep3 = rep2;
+        rep2 = rep1;
+        rep1 = rep0;
+        state = state < kNumLitStates ? 0 : 3;
+        prob = p + LenCoder;
+      }
+      else
+      {
+        UpdateBit1(prob);
+        prob = p + IsRepG0 + state;
+        IfBit0(prob)
+        {
+          UpdateBit0(prob);
+          prob = p + IsRep0Long + (state << kNumPosBitsMax) + posState;
+          IfBit0(prob)
+          {
+            #ifdef _LZMA_OUT_READ
+            UInt32 pos;
+            #endif
+            UpdateBit0(prob);
+            
+            #ifdef _LZMA_OUT_READ
+            if (distanceLimit == 0)
+            #else
+            if (nowPos == 0)
+            #endif
+              return LZMA_RESULT_DATA_ERROR;
+            
+            state = state < kNumLitStates ? 9 : 11;
+            #ifdef _LZMA_OUT_READ
+            pos = dictionaryPos - rep0;
+            if (pos >= dictionarySize)
+              pos += dictionarySize;
+            previousByte = dictionary[pos];
+            dictionary[dictionaryPos] = previousByte;
+            if (++dictionaryPos == dictionarySize)
+              dictionaryPos = 0;
+            #else
+            previousByte = outStream[nowPos - rep0];
+            #endif
+            outStream[nowPos++] = previousByte;
+            #ifdef _LZMA_OUT_READ
+            if (distanceLimit < dictionarySize)
+              distanceLimit++;
+            #endif
+
+            continue;
+          }
+          else
+          {
+            UpdateBit1(prob);
+          }
+        }
+        else
+        {
+          UInt32 distance;
+          UpdateBit1(prob);
+          prob = p + IsRepG1 + state;
+          IfBit0(prob)
+          {
+            UpdateBit0(prob);
+            distance = rep1;
+          }
+          else 
+          {
+            UpdateBit1(prob);
+            prob = p + IsRepG2 + state;
+            IfBit0(prob)
+            {
+              UpdateBit0(prob);
+              distance = rep2;
+            }
+            else
+            {
+              UpdateBit1(prob);
+              distance = rep3;
+              rep3 = rep2;
+            }
+            rep2 = rep1;
+          }
+          rep1 = rep0;
+          rep0 = distance;
+        }
+        state = state < kNumLitStates ? 8 : 11;
+        prob = p + RepLenCoder;
+      }
+      {
+        int numBits, offset;
+        CProb *probLen = prob + LenChoice;
+        IfBit0(probLen)
+        {
+          UpdateBit0(probLen);
+          probLen = prob + LenLow + (posState << kLenNumLowBits);
+          offset = 0;
+          numBits = kLenNumLowBits;
+        }
+        else
+        {
+          UpdateBit1(probLen);
+          probLen = prob + LenChoice2;
+          IfBit0(probLen)
+          {
+            UpdateBit0(probLen);
+            probLen = prob + LenMid + (posState << kLenNumMidBits);
+            offset = kLenNumLowSymbols;
+            numBits = kLenNumMidBits;
+          }
+          else
+          {
+            UpdateBit1(probLen);
+            probLen = prob + LenHigh;
+            offset = kLenNumLowSymbols + kLenNumMidSymbols;
+            numBits = kLenNumHighBits;
+          }
+        }
+        RangeDecoderBitTreeDecode(probLen, numBits, len);
+        len += offset;
+      }
+
+      if (state < 4)
+      {
+        int posSlot;
+        state += kNumLitStates;
+        prob = p + PosSlot +
+            ((len < kNumLenToPosStates ? len : kNumLenToPosStates - 1) << 
+            kNumPosSlotBits);
+        RangeDecoderBitTreeDecode(prob, kNumPosSlotBits, posSlot);
+        if (posSlot >= kStartPosModelIndex)
+        {
+          int numDirectBits = ((posSlot >> 1) - 1);
+          rep0 = (2 | ((UInt32)posSlot & 1));
+          if (posSlot < kEndPosModelIndex)
+          {
+            rep0 <<= numDirectBits;
+            prob = p + SpecPos + rep0 - posSlot - 1;
+          }
+          else
+          {
+            numDirectBits -= kNumAlignBits;
+            do
+            {
+              RC_NORMALIZE
+              Range >>= 1;
+              rep0 <<= 1;
+              if (Code >= Range)
+              {
+                Code -= Range;
+                rep0 |= 1;
+              }
+            }
+            while (--numDirectBits != 0);
+            prob = p + Align;
+            rep0 <<= kNumAlignBits;
+            numDirectBits = kNumAlignBits;
+          }
+          {
+            int i = 1;
+            int mi = 1;
+            do
+            {
+              CProb *prob3 = prob + mi;
+              RC_GET_BIT2(prob3, mi, ; , rep0 |= i);
+              i <<= 1;
+            }
+            while(--numDirectBits != 0);
+          }
+        }
+        else
+          rep0 = posSlot;
+        if (++rep0 == (UInt32)(0))
+        {
+          /* it's for stream version */
+          len = kLzmaStreamWasFinishedId;
+          break;
+        }
+      }
+
+      len += kMatchMinLen;
+      #ifdef _LZMA_OUT_READ
+      if (rep0 > distanceLimit) 
+      #else
+      if (rep0 > nowPos)
+      #endif
+        return LZMA_RESULT_DATA_ERROR;
+
+      #ifdef _LZMA_OUT_READ
+      if (dictionarySize - distanceLimit > (UInt32)len)
+        distanceLimit += len;
+      else
+        distanceLimit = dictionarySize;
+      #endif
+
+      do
+      {
+        #ifdef _LZMA_OUT_READ
+        UInt32 pos = dictionaryPos - rep0;
+        if (pos >= dictionarySize)
+          pos += dictionarySize;
+        previousByte = dictionary[pos];
+        dictionary[dictionaryPos] = previousByte;
+        if (++dictionaryPos == dictionarySize)
+          dictionaryPos = 0;
+        #else
+        previousByte = outStream[nowPos - rep0];
+        #endif
+        len--;
+        outStream[nowPos++] = previousByte;
+      }
+      while(len != 0 && nowPos < outSize);
+    }
+  }
+  RC_NORMALIZE;
+
+  #ifdef _LZMA_OUT_READ
+  vs->Range = Range;
+  vs->Code = Code;
+  vs->DictionaryPos = dictionaryPos;
+  vs->GlobalPos = globalPos + (UInt32)nowPos;
+  vs->DistanceLimit = distanceLimit;
+  vs->Reps[0] = rep0;
+  vs->Reps[1] = rep1;
+  vs->Reps[2] = rep2;
+  vs->Reps[3] = rep3;
+  vs->State = state;
+  vs->RemainLen = len;
+  vs->TempDictionary[0] = tempDictionary[0];
+  #endif
+
+  #ifdef _LZMA_IN_CB
+  vs->Buffer = Buffer;
+  vs->BufferLim = BufferLim;
+  #else
+  *inSizeProcessed = (SizeT)(Buffer - inStream);
+  #endif
+  *outSizeProcessed = nowPos;
+  return LZMA_RESULT_OK;
+}
--- /dev/null	2011-06-03 14:51:38.633053002 +0200
+++ linux-2.6.20.14-fbx/lib/LzmaDecode.h	2010-12-29 19:30:08.851442681 +0100
@@ -0,0 +1,113 @@
+/* 
+  LzmaDecode.h
+  LZMA Decoder interface
+
+  LZMA SDK 4.40 Copyright (c) 1999-2006 Igor Pavlov (2006-05-01)
+  http://www.7-zip.org/
+
+  LZMA SDK is licensed under two licenses:
+  1) GNU Lesser General Public License (GNU LGPL)
+  2) Common Public License (CPL)
+  It means that you can select one of these two licenses and 
+  follow rules of that license.
+
+  SPECIAL EXCEPTION:
+  Igor Pavlov, as the author of this code, expressly permits you to 
+  statically or dynamically link your code (or bind by name) to the 
+  interfaces of this file without subjecting your linked code to the 
+  terms of the CPL or GNU LGPL. Any modifications or additions 
+  to this file, however, are subject to the LGPL or CPL terms.
+*/
+
+#ifndef __LZMADECODE_H
+#define __LZMADECODE_H
+
+#include "linux/LzmaTypes.h"
+
+/* #define _LZMA_IN_CB */
+/* Use callback for input data */
+
+/* #define _LZMA_OUT_READ */
+/* Use read function for output data */
+
+/* #define _LZMA_PROB32 */
+/* It can increase speed on some 32-bit CPUs, 
+   but memory usage will be doubled in that case */
+
+/* #define _LZMA_LOC_OPT */
+/* Enable local speed optimizations inside code */
+
+#ifdef _LZMA_PROB32
+#define CProb UInt32
+#else
+#define CProb UInt16
+#endif
+
+#define LZMA_RESULT_OK 0
+#define LZMA_RESULT_DATA_ERROR 1
+
+#ifdef _LZMA_IN_CB
+typedef struct _ILzmaInCallback
+{
+  int (*Read)(void *object, const unsigned char **buffer, SizeT *bufferSize);
+} ILzmaInCallback;
+#endif
+
+#define LZMA_BASE_SIZE 1846
+#define LZMA_LIT_SIZE 768
+
+#define LZMA_PROPERTIES_SIZE 5
+
+typedef struct _CLzmaProperties
+{
+  int lc;
+  int lp;
+  int pb;
+  #ifdef _LZMA_OUT_READ
+  UInt32 DictionarySize;
+  #endif
+}CLzmaProperties;
+
+int LzmaDecodeProperties(CLzmaProperties *propsRes, const unsigned char *propsData, int size);
+
+#define LzmaGetNumProbs(Properties) (LZMA_BASE_SIZE + (LZMA_LIT_SIZE << ((Properties)->lc + (Properties)->lp)))
+
+#define kLzmaNeedInitId (-2)
+
+typedef struct _CLzmaDecoderState
+{
+  CLzmaProperties Properties;
+  CProb *Probs;
+
+  #ifdef _LZMA_IN_CB
+  const unsigned char *Buffer;
+  const unsigned char *BufferLim;
+  #endif
+
+  #ifdef _LZMA_OUT_READ
+  unsigned char *Dictionary;
+  UInt32 Range;
+  UInt32 Code;
+  UInt32 DictionaryPos;
+  UInt32 GlobalPos;
+  UInt32 DistanceLimit;
+  UInt32 Reps[4];
+  int State;
+  int RemainLen;
+  unsigned char TempDictionary[4];
+  #endif
+} CLzmaDecoderState;
+
+#ifdef _LZMA_OUT_READ
+#define LzmaDecoderInit(vs) { (vs)->RemainLen = kLzmaNeedInitId; }
+#endif
+
+int LzmaDecode(CLzmaDecoderState *vs,
+    #ifdef _LZMA_IN_CB
+    ILzmaInCallback *inCallback,
+    #else
+    const unsigned char *inStream, SizeT inSize, SizeT *inSizeProcessed,
+    #endif
+    unsigned char *outStream, SizeT outSize, SizeT *outSizeProcessed);
+
+#endif
--- /dev/null	2011-06-03 14:51:38.633053002 +0200
+++ linux-2.6.20.14-fbx/lib/sqlzma-uncomp.c	2010-12-29 19:30:08.861442742 +0100
@@ -0,0 +1,221 @@
+/*
+ * Copyright (C) 2006, 2007 Junjiro Okajima
+ * Copyright (C) 2006, 2007 Tomas Matejicek, slax.org
+ *
+ * LICENSE follows the described one in lzma.txt.
+ */
+
+/* $Id: uncomp.c,v 1.29 2007/01/08 05:12:50 jro Exp $ */
+
+/* extract some parts from lzma443/C/7zip/Compress/LZMA_C/LzmaTest.c */
+
+#ifndef __KERNEL__
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <errno.h>
+#include <assert.h>
+#include <pthread.h>
+#define unlikely(x)		__builtin_expect(!!(x), 0)
+#define BUG_ON(x)		assert(!(x))
+/* sqlzma buffers are always larger than a page. true? */
+#define kmalloc(sz,gfp)		malloc(sz)
+#define kfree(p)		free(p)
+#define zlib_inflate(s, f)	inflate(s, f)
+#define zlib_inflateInit(s)	inflateInit(s)
+#define zlib_inflateReset(s)	inflateReset(s)
+#define zlib_inflateEnd(s)	inflateEnd(s)
+#else
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/vmalloc.h>
+#ifndef WARN_ON_ONCE
+#define WARN_ON_ONCE(b)	WARN_ON(b)
+#endif
+#endif /* __KERNEL__ */
+
+#include "linux/sqlzma.h"
+#include "linux/LzmaDecode.h"
+
+static int LzmaUncompress(struct sqlzma_un *un)
+{
+	int err, i, ret;
+	size_t outSize, inProcessed, outProcessed, srclen;
+	/* it's about 24-80 bytes structure, if int is 32-bit */
+	CLzmaDecoderState state;
+	unsigned char *dst, *src, a[8];
+	struct sized_buf *sbuf;
+
+	/* Decode LZMA properties and allocate memory */
+	err = -EINVAL;
+	src = un->un_cmbuf;
+	ret = LzmaDecodeProperties(&state.Properties, src, LZMA_PROPERTIES_SIZE);
+	src += LZMA_PROPERTIES_SIZE;
+	if (unlikely(ret != LZMA_RESULT_OK))
+		goto out;
+	i = LzmaGetNumProbs(&state.Properties);
+	if (unlikely(i <= 0))
+		i = 1;
+	i *= sizeof(CProb);
+	sbuf = un->un_a + SQUN_PROB;
+	if (unlikely(sbuf->sz < i)) {
+		if (sbuf->buf && sbuf->buf != un->un_prob)
+			kfree(sbuf->buf);
+#ifdef __KERNEL__
+		printk("%s:%d: %d --> %d\n", __func__, __LINE__, sbuf->sz, i);
+#else
+		printf("%d --> %d\n", sbuf->sz, i);
+#endif
+		err = -ENOMEM;
+		sbuf->sz = 0;
+		sbuf->buf = kmalloc(i, GFP_ATOMIC);
+		if (unlikely(!sbuf->buf))
+			goto out;
+		sbuf->sz = i;
+	}
+	state.Probs = (void*)sbuf->buf;
+
+	/* Read uncompressed size */
+	memcpy(a, src, sizeof(a));
+	src += sizeof(a);
+	outSize = a[0] | (a[1] << 8) | (a[2] << 16) | (a[3] << 24);
+
+	err = -EINVAL;
+	dst = un->un_resbuf;
+	if (unlikely(!dst || outSize > un->un_reslen))
+		goto out;
+	un->un_reslen = outSize;
+	srclen = un->un_cmlen - (src - un->un_cmbuf);
+
+	/* Decompress */
+	err = LzmaDecode(&state, src, srclen, &inProcessed, dst, outSize,
+			 &outProcessed);
+	if (err)
+		err = -EINVAL;
+
+ out:
+#ifndef __KERNEL__
+	if (err)
+		fprintf(stderr, "err %d\n", err);
+#endif
+	return err;
+}
+
+int sqlzma_un(struct sqlzma_un *un, struct sized_buf *src,
+	      struct sized_buf *dst)
+{
+	int err, by_lzma = 0;
+	if (un->un_lzma && is_lzma(*src->buf)) {
+		by_lzma = 1;
+		un->un_cmbuf = src->buf;
+		un->un_cmlen = src->sz;
+		un->un_resbuf = dst->buf;
+		un->un_reslen = dst->sz;
+
+		/* this library is thread-safe */
+		err = LzmaUncompress(un);
+		goto out;
+	}
+
+	err = zlib_inflateReset(&un->un_stream);
+	if (unlikely(err != Z_OK))
+		goto out;
+	un->un_stream.next_in = src->buf;
+	un->un_stream.avail_in = src->sz;
+	un->un_stream.next_out = dst->buf;
+	un->un_stream.avail_out = dst->sz;
+	err = zlib_inflate(&un->un_stream, Z_FINISH);
+	if (err == Z_STREAM_END)
+		err = 0;
+
+ out:
+	if (err) {
+#ifdef __KERNEL__
+		WARN_ON_ONCE(1);
+#else
+		char a[64] = "ZLIB ";
+		if (by_lzma) {
+			strcpy(a, "LZMA ");
+#ifdef _REENTRANT
+			strerror_r(err, a + 5, sizeof(a) - 5);
+#else
+			strncat(a, strerror(err), sizeof(a) - 5);
+#endif
+		} else
+			strncat(a, zError(err), sizeof(a) - 5);
+		fprintf(stderr, "%s: %.*s\n", __func__, sizeof(a), a);
+#endif
+	}
+	return err;
+}
+
+int sqlzma_init(struct sqlzma_un *un, int do_lzma, unsigned int res_sz)
+{
+	int err;
+
+	err = -ENOMEM;
+	un->un_lzma = do_lzma;
+	memset(un->un_a, 0, sizeof(un->un_a));
+	un->un_a[SQUN_PROB].buf = un->un_prob;
+	un->un_a[SQUN_PROB].sz = sizeof(un->un_prob);
+	if (res_sz) {
+		un->un_a[SQUN_RESULT].buf = kmalloc(res_sz, GFP_KERNEL);
+		if (unlikely(!un->un_a[SQUN_RESULT].buf))
+			return err;
+		un->un_a[SQUN_RESULT].sz = res_sz;
+	}
+
+	un->un_stream.next_in = NULL;
+	un->un_stream.avail_in = 0;
+#ifdef __KERNEL__
+	un->un_stream.workspace = kmalloc(zlib_inflate_workspacesize(), GFP_KERNEL);
+	if (unlikely(!un->un_stream.workspace))
+		return err;
+#else
+	un->un_stream.opaque = NULL;
+	un->un_stream.zalloc = Z_NULL;
+	un->un_stream.zfree = Z_NULL;
+#endif
+	err = zlib_inflateInit(&un->un_stream);
+	if (unlikely(err == Z_MEM_ERROR))
+		return -ENOMEM;
+	BUG_ON(err);
+	return err;
+}
+
+void sqlzma_fin(struct sqlzma_un *un)
+{
+	int i;
+	for (i = 0; i < SQUN_LAST; i++)
+		if (un->un_a[i].buf && un->un_a[i].buf != un->un_prob)
+			kfree(un->un_a[i].buf);
+	BUG_ON(zlib_inflateEnd(&un->un_stream) != Z_OK);
+}
+
+#ifdef __KERNEL__
+EXPORT_SYMBOL(sqlzma_un);
+EXPORT_SYMBOL(sqlzma_init);
+EXPORT_SYMBOL(sqlzma_fin);
+
+#if 0
+static int __init sqlzma_init(void)
+{
+	return 0;
+}
+
+static void __exit sqlzma_exit(void)
+{
+}
+
+module_init(sqlzma_init);
+module_exit(sqlzma_exit);
+#endif
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Junjiro Okajima <hooanon05 at yahoo dot co dot jp>");
+MODULE_VERSION("$Id: uncomp.c,v 1.29 2007/01/08 05:12:50 jro Exp $");
+MODULE_DESCRIPTION("LZMA uncompress for squashfs. "
+		   "Some functions for squashfs to support LZMA and "
+		   "a tiny wrapper for LzmaDecode.c in LZMA SDK from www.7-zip.org.");
+#endif
diff -Nruw linux-2.6.20.14-fbx/net/fbxatm./Kconfig linux-2.6.20.14-fbx/net/fbxatm/Kconfig
--- linux-2.6.20.14-fbx/net/fbxatm./Kconfig	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.20.14-fbx/net/fbxatm/Kconfig	2010-12-29 19:30:08.901441381 +0100
@@ -0,0 +1,7 @@
+config FBXATM
+	tristate "Freebox Asynchronous Transfer Mode (ATM)"
+
+config FBXATM_2684_RETX
+	bool "support freebox retx scheme"
+	depends on FBXATM
+	default n
diff -Nruw linux-2.6.20.14-fbx/net/fbxatm./Makefile linux-2.6.20.14-fbx/net/fbxatm/Makefile
--- linux-2.6.20.14-fbx/net/fbxatm./Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-2.6.20.14-fbx/net/fbxatm/Makefile	2010-12-29 19:30:08.901441381 +0100
@@ -0,0 +1,10 @@
+obj-$(CONFIG_FBXATM) += fbxatm.o
+
+fbxatm-y := 	fbxatm_core.o	\
+		fbxatm_2684.o	\
+		fbxatm_dev.o	\
+		fbxatm_procfs.o	\
+		fbxatm_sysfs.o	\
+		crc10.o
+
+fbxatm-$(CONFIG_PPP) += fbxatm_pppoa.o
--- /dev/null	2011-06-03 14:51:38.633053002 +0200
+++ linux-2.6.20.14-fbx/net/ipv4/ip_ffn.c	2010-12-29 19:30:08.911437411 +0100
@@ -0,0 +1,424 @@
+/*
+ * IP fast forwarding and NAT
+ *
+ * Very restrictive code, that only cope with linear non fragmented
+ * UDP and TCP packets, that are routed and NATed with no other
+ * modification.
+ *
+ * Provide a fast path for established conntrack entries so that
+ * packets go out ASAP.
+ */
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/net.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <linux/jhash.h>
+#include <net/ip.h>
+#include <linux/netfilter_ipv4/ip_conntrack.h>
+
+#define FFN_CACHE_SIZE		128
+#define MAX_FFN_ENTRY		512
+
+struct ffn_data {
+	u32 new_sip;
+	u32 new_dip;
+	u16 new_sport;
+	u16 new_dport;
+	struct dst_entry *dst;
+	struct ip_conntrack *ct;
+	enum ip_conntrack_info ctinfo;
+};
+
+struct ffn_lookup_entry {
+	u32 sip;
+	u32 dip;
+	u16 sport;
+	u16 dport;
+	u8 protocol;
+	struct list_head next;
+	struct ffn_data manip;
+	struct list_head all_next;
+	atomic_t users;
+};
+
+static struct list_head ffn_cache[FFN_CACHE_SIZE];
+static struct list_head ffn_all;
+static unsigned int ffn_entry_count;
+
+/*
+ * hash on five parameter
+ */
+static inline unsigned int ffn_hash(u32 sip, u32 dip, u16 sport, u16 dport,
+				    int is_tcp)
+{
+	return jhash_3words(sip, is_tcp ? dip : ~dip, sport | dport << 16, 0);
+}
+
+/*
+ * attempt to find entry with given value in cache
+ */
+static inline struct ffn_lookup_entry *__ip_ffn_find(u32 sip, u32 dip,
+						     u16 sport, u16 dport,
+						     u8 protocol,
+						     unsigned int hash)
+{
+	struct ffn_lookup_entry *tmp;
+
+	list_for_each_entry(tmp, &ffn_cache[hash % FFN_CACHE_SIZE], next) {
+
+		/* compare entry */
+		if (tmp->sip == sip && tmp->dip == dip &&
+		    tmp->sport == sport && tmp->dport == dport &&
+		    tmp->protocol == protocol)
+			return tmp;
+	}
+	return NULL;
+}
+
+static inline struct ffn_lookup_entry *ip_ffn_find(u32 sip, u32 dip,
+						   u16 sport, u16 dport,
+						   int is_tcp)
+{
+	struct ffn_lookup_entry *e;
+	unsigned int hash;
+	u8 protocol;
+
+	/* lookup entry in cache */
+	protocol = (is_tcp) ? IPPROTO_TCP : IPPROTO_UDP;
+	hash = ffn_hash(sip, dip, sport, dport, is_tcp);
+
+	local_bh_disable();
+	e = __ip_ffn_find(sip, dip, sport, dport, protocol, hash);
+	if (e)
+		atomic_inc(&e->users);
+	local_bh_enable();
+	return e;
+}
+
+/*
+ * helper to recompute checksum
+ */
+static inline u16 recheck2(u32 oldvalinv, u32 newval,
+			   u32 oldvalinv2, u32 newval2, u16 oldcheck)
+{
+	u32 diffs[] = { oldvalinv, newval, oldvalinv2, newval2 };
+
+	return csum_fold(csum_partial((char *)diffs, sizeof (diffs),
+				      oldcheck ^ 0xFFFF));
+}
+
+static inline u16 recheck4(u32 oldvalinv, u32 newval,
+			   u32 oldvalinv2, u32 newval2,
+			   u32 oldvalinv3, u32 newval3,
+			   u32 oldvalinv4, u32 newval4,
+			   u16 oldcheck)
+{
+	u32 diffs[] = { oldvalinv, newval, oldvalinv2, newval2,
+			oldvalinv3, newval3,
+			oldvalinv4, newval4 };
+
+	return csum_fold(csum_partial((char *)diffs, sizeof (diffs),
+				      oldcheck ^ 0xFFFF));
+}
+
+/*
+ * drop reference counter, and free when it reaches 0
+ */
+static inline void ffn_put_entry(struct ffn_lookup_entry *e)
+{
+	if (atomic_dec_and_test(&e->users)) {
+		dst_release(e->manip.dst);
+		kfree(e);
+	}
+}
+
+/*
+ * two hooks into netfilter code
+ */
+extern int external_tcp_packet(struct ip_conntrack *conntrack,
+			       const struct sk_buff *skb,
+			       enum ip_conntrack_info ctinfo);
+
+extern int external_udp_packet(struct ip_conntrack *conntrack,
+			       const struct sk_buff *skb,
+			       enum ip_conntrack_info ctinfo);
+
+/*
+ * skb->nh.ip must be set.
+ *
+ * return 0 if match, 1 when no match is found and 2 if packet is not
+ * a candidate for caching
+ */
+int ip_ffn_process(struct sk_buff *skb)
+{
+	struct ffn_lookup_entry *e;
+	struct iphdr *iph;
+	struct tcphdr *tcph = NULL;
+	struct udphdr *udph = NULL;
+	u8 proto;
+	int res;
+
+	iph = skb->nh.iph;
+
+	/* refuse fragmented IP packet, or packets with IP options */
+	if (iph->ihl > 5 || (iph->frag_off & htons(IP_MF | IP_OFFSET)))
+		return 2;
+
+	/* check encapsulated protocol is udp or tcp */
+	if (iph->protocol != IPPROTO_TCP && iph->protocol != IPPROTO_UDP)
+		return 2;
+
+	if (skb->nh.iph->ttl <= 1)
+		return 2;
+
+	proto = iph->protocol;
+	if (proto == IPPROTO_TCP) {
+
+		if (skb->len < (iph->ihl * 4) + sizeof (struct tcphdr))
+			return 2;
+		tcph = (struct tcphdr *)((unsigned char *)iph +
+					 (iph->ihl * 4));
+		e = ip_ffn_find(iph->saddr, iph->daddr, tcph->source,
+				tcph->dest, 1);
+	} else {
+
+		if (skb->len < (iph->ihl * 4) + sizeof (struct udphdr))
+			return 2;
+		udph = (struct udphdr *)((unsigned char *)iph +
+					 (iph->ihl * 4));
+		e = ip_ffn_find(iph->saddr, iph->daddr, udph->source,
+				udph->dest, 1);
+	}
+
+	if (e) {
+		u16 ipcheck;
+		u16 tcheck;
+		u32 tmp;
+
+		if (proto == IPPROTO_TCP) {
+			/* do sequence number checking and update
+			 * conntrack info */
+			res = external_tcp_packet(e->manip.ct, skb,
+						  e->manip.ctinfo);
+			tcheck = tcph->check;
+		} else {
+			res = external_udp_packet(e->manip.ct, skb,
+						  e->manip.ctinfo);
+			tcheck = udph->check;
+		}
+
+		if (unlikely(res != NF_ACCEPT)) {
+			dev_kfree_skb(skb);
+			ffn_put_entry(e);
+			return 0;
+		}
+
+		/* decrease ttl */
+		tmp = iph->check;
+		tmp += htons(0x0100);
+		ipcheck = tmp + (tmp >= 0xFFFF);
+		--iph->ttl;
+
+		/* change ip addresses & fix all checksums */
+		ipcheck = recheck2(~iph->saddr, e->manip.new_sip,
+				   ~iph->daddr, e->manip.new_dip, ipcheck);
+
+		/* fix ports & transport protocol checksum */
+		if (proto == IPPROTO_TCP) {
+			tcheck = recheck4(~iph->saddr, e->manip.new_sip,
+					  ~iph->daddr, e->manip.new_dip,
+					  tcph->source ^ 0xFFFF,
+					  e->manip.new_sport,
+					  tcph->dest ^ 0xFFFF,
+					  e->manip.new_dport,
+					  tcheck);
+			tcph->source = e->manip.new_sport;
+			tcph->dest = e->manip.new_dport;
+			tcph->check = tcheck;
+		} else {
+			tcheck = recheck4(~iph->saddr, e->manip.new_sip,
+					  ~iph->daddr, e->manip.new_dip,
+					  udph->source ^ 0xFFFF,
+					  e->manip.new_sport,
+					  udph->dest ^ 0xFFFF,
+					  e->manip.new_dport,
+					  tcheck);
+			udph->source = e->manip.new_sport;
+			udph->dest = e->manip.new_dport;
+			if (udph->check)
+				udph->check = tcheck ? tcheck : 0xffff;
+		}
+
+		iph->saddr = e->manip.new_sip;
+		iph->daddr = e->manip.new_dip;
+		iph->check = ipcheck;
+
+		/* forward skb */
+		skb->dst = dst_clone(e->manip.dst);
+		skb->ffn_state = 2;
+		dst_output(skb);
+		ffn_put_entry(e);
+		return 0;
+	}
+
+	/* track this skb for further addition */
+	skb->ffn_state = 1;
+	return 1;
+}
+
+/*
+ * check if skb is candidate for ffn, and if so add it to ffn cache
+ */
+void ip_ffn_add(struct sk_buff *skb)
+{
+	struct ip_conntrack *ct;
+	struct ip_conntrack_tuple *tuple, *rtuple;
+	enum ip_conntrack_info ctinfo;
+	struct ffn_lookup_entry *e, *old;
+	unsigned int hash;
+	int dir;
+	u8 proto;
+
+	if (!skb->nfct || skb->dst->output != ip_output)
+		return;
+
+	ct = ip_conntrack_get(skb, &ctinfo);
+	if ((ctinfo != IP_CT_ESTABLISHED) &&
+	    (ctinfo != IP_CT_ESTABLISHED + IP_CT_IS_REPLY))
+		return;
+
+	if (ct->helper)
+		return;
+
+	dir = (ctinfo == IP_CT_ESTABLISHED) ?
+		IP_CT_DIR_ORIGINAL : IP_CT_DIR_REPLY;
+	tuple = &ct->tuplehash[dir].tuple;
+
+	if (tuple->dst.protonum != IPPROTO_TCP &&
+	    tuple->dst.protonum != IPPROTO_UDP)
+		return;
+
+	rtuple = &ct->tuplehash[1 - dir].tuple;
+
+	e = kmalloc(sizeof (*e), GFP_ATOMIC);
+	if (!e)
+		return;
+	e->sip = tuple->src.ip;
+	e->dip = tuple->dst.ip;
+	e->sport = tuple->src.u.all;
+	e->dport = tuple->dst.u.all;
+	e->protocol = tuple->dst.protonum;
+
+	e->manip.new_sip = rtuple->dst.ip;
+	e->manip.new_dip = rtuple->src.ip;
+	e->manip.new_sport = rtuple->dst.u.all;
+	e->manip.new_dport = rtuple->src.u.all;
+	e->manip.dst = skb->dst;
+	dst_hold(e->manip.dst);
+	e->manip.ct = ct;
+	e->manip.ctinfo = ctinfo;
+
+	hash = ffn_hash(e->sip, e->dip, e->sport, e->dport,
+			e->protocol == IPPROTO_TCP);
+	proto = (e->protocol == IPPROTO_TCP) ? IPPROTO_TCP : IPPROTO_UDP;
+
+	old = NULL;
+	local_bh_disable();
+
+	/* make sure it's not present */
+	if (__ip_ffn_find(e->sip, e->dip, e->sport, e->dport, proto, hash)) {
+		local_bh_enable();
+		dst_release(e->manip.dst);
+		kfree(e);
+		return;
+	}
+
+	/* remove first entry if we have too many */
+	if (ffn_entry_count > MAX_FFN_ENTRY) {
+		old = list_entry(ffn_all.next, struct ffn_lookup_entry,
+				 all_next);
+		list_del(&old->next);
+		list_del(&old->all_next);
+	} else
+		ffn_entry_count++;
+
+	/* add new entry */
+	list_add_tail(&e->next, &ffn_cache[hash % FFN_CACHE_SIZE]);
+	list_add_tail(&e->all_next, &ffn_all);
+	atomic_set(&e->users, 1);
+	local_bh_enable();
+
+	if (old)
+		ffn_put_entry(old);
+}
+
+/*
+ * netfilter callback when conntrack is about to be destroyed
+ */
+void ip_ffn_ct_destroy(struct ip_conntrack *ct)
+{
+	struct ip_conntrack_tuple *tuple;
+	struct ffn_lookup_entry *e;
+	int dir;
+
+	/* locate all entry that use this conntrack */
+	for (dir = 0; dir < 2; dir++) {
+		tuple = &ct->tuplehash[dir].tuple;
+
+		if (tuple->dst.protonum != IPPROTO_TCP &&
+		    tuple->dst.protonum != IPPROTO_UDP)
+			return;
+
+		e = ip_ffn_find(tuple->src.ip, tuple->dst.ip,
+				tuple->src.u.all, tuple->dst.u.all,
+				tuple->dst.protonum == IPPROTO_TCP);
+		if (!e)
+			continue;
+
+		local_bh_disable();
+		list_del(&e->next);
+		list_del(&e->all_next);
+		atomic_dec(&e->users);
+		ffn_entry_count--;
+		local_bh_enable();
+		ffn_put_entry(e);
+	}
+}
+
+/*
+ * initialize ffn cache data
+ */
+static inline void ip_ffn_init_cache(void)
+{
+	int i;
+
+	for (i = 0; i < FFN_CACHE_SIZE; i++)
+		INIT_LIST_HEAD(&ffn_cache[i]);
+	INIT_LIST_HEAD(&ffn_all);
+	ffn_entry_count = 0;
+}
+
+/*
+ * flush all ffn cache
+ */
+void ip_ffn_flush_all(void)
+{
+	struct ffn_lookup_entry *e, *tmp;
+
+	local_bh_disable();
+	list_for_each_entry_safe(e, tmp, &ffn_all, all_next)
+		ffn_put_entry(e);
+	ip_ffn_init_cache();
+	local_bh_enable();
+}
+
+/*
+ * initialize ffn
+ */
+void __init ip_ffn_init(void)
+{
+	printk("IP Fast Forward and NAT enabled\n");
+	ip_ffn_init_cache();
+}
--- /dev/null	2011-06-03 14:51:38.633053002 +0200
+++ linux-2.6.20.14-fbx/net/ipv4/netfilter/iptable_tproxy.c	2010-12-29 19:30:08.921500487 +0100
@@ -0,0 +1,1860 @@
+/*
+ * Transparent proxy support for Linux/iptables
+ *
+ * Copyright (c) 2002-2004 BalaBit IT Ltd.
+ * Author: Balázs Scheidler, Krisztián Kovács
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/version.h>
+#include <linux/module.h>
+
+#include <linux/sysctl.h>
+#include <linux/vmalloc.h>
+#include <linux/net.h>
+#include <linux/slab.h>
+#include <linux/if.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/netdevice.h>
+#include <linux/time.h>
+#include <net/tcp.h>
+#include <net/udp.h>
+#include <net/sock.h>
+#include <asm/uaccess.h>
+#include <net/inet_timewait_sock.h>
+
+#include <linux/netfilter.h>
+#include <linux/netfilter_ipv4.h>
+#include <linux/netfilter_ipv4/ip_tables.h>
+#include <linux/netfilter_ipv4/ipt_TPROXY.h>
+#include <linux/netfilter_ipv4/ip_conntrack.h>
+#include <linux/netfilter_ipv4/ip_nat.h>
+#include <linux/netfilter_ipv4/ip_nat_core.h>
+
+#include <linux/netfilter_ipv4/ip_tproxy.h>
+#include <linux/netfilter_ipv4/iptable_tproxy.h>
+
+#define ASSERT_READ_LOCK(x)
+#define ASSERT_WRITE_LOCK(x)
+DEFINE_RWLOCK(ip_tproxy_lock);
+
+#define TPROXY_VALID_HOOKS ((1 << NF_IP_PRE_ROUTING) | (1 << NF_IP_LOCAL_OUT))
+
+#if 0
+#define DEBUGP printk
+#else
+#define DEBUGP(args...)
+#endif
+
+#define TPROXY_MAJOR_VERSION 2
+#define TPROXY_MINOR_VERSION 0
+#define TPROXY_PATCH_VERSION 6
+
+#define TPROXY_FULL_VERSION ((TPROXY_MAJOR_VERSION << 24) | \
+			     (TPROXY_MINOR_VERSION << 16) | \
+			     TPROXY_PATCH_VERSION)
+
+#define MAJOR_VERSION(x) ((x >> 24) & 0xff)
+#define MINOR_VERSION(x) ((x >> 16) & 0xff)
+#define PATCH_VERSION(x) (x & 0xffff)
+
+/* simple and buggy, but enough for us */
+#define MIN(a,b) ((a < b) ? a : b)
+
+static struct
+{
+	struct ipt_replace repl;
+	struct ipt_standard entries[2];
+	struct ipt_error term;
+} initial_table
+= { { "tproxy", TPROXY_VALID_HOOKS, 3,
+      sizeof(struct ipt_standard) * 2 + sizeof(struct ipt_error),
+      { [NF_IP_PRE_ROUTING] 0,
+	[NF_IP_LOCAL_OUT] sizeof(struct ipt_standard) },
+      { [NF_IP_PRE_ROUTING] 0,
+	[NF_IP_LOCAL_OUT] sizeof(struct ipt_standard) },
+      0, NULL, { } },
+    {
+	    /* PRE_ROUTING */
+	    { { { { 0 }, { 0 }, { 0 }, { 0 }, "", "", { 0 }, { 0 }, 0, 0, 0 },
+		0,
+		sizeof(struct ipt_entry),
+		sizeof(struct ipt_standard),
+		0, { 0, 0 }, { } },
+	      { { { { IPT_ALIGN(sizeof(struct ipt_standard_target)), "" } }, { } },
+		-NF_ACCEPT - 1 } },
+	    /* LOCAL_OUT */
+	    { { { { 0 }, { 0 }, { 0 }, { 0 }, "", "", { 0 }, { 0 }, 0, 0, 0 },
+		0,
+		sizeof(struct ipt_entry),
+		sizeof(struct ipt_standard),
+		0, { 0, 0 }, { } },
+	      { { { { IPT_ALIGN(sizeof(struct ipt_standard_target)), "" } }, { } },
+		-NF_ACCEPT - 1 } }
+    },
+    /* ERROR */
+    { { { { 0 }, { 0 }, { 0 }, { 0 }, "", "", { 0 }, { 0 }, 0, 0, 0 },
+	0,
+	sizeof(struct ipt_entry),
+	sizeof(struct ipt_error),
+	0, { 0, 0 }, { } },
+      { { { { IPT_ALIGN(sizeof(struct ipt_error_target)), IPT_ERROR_TARGET } },
+	  { } },
+	"ERROR"
+      }
+    }
+};
+
+static struct ipt_table tproxy_table = {
+	.name		= "tproxy",
+	.valid_hooks	= TPROXY_VALID_HOOKS,
+	.lock		= RW_LOCK_UNLOCKED,
+	.me		= THIS_MODULE,
+	.af		= AF_INET,
+};
+
+static void (*ip_conntrack_destroyed_old)(struct ip_conntrack *ct) = NULL;
+
+/* NAT entry setup flags */
+#define TN_BIDIR	1
+#define TN_STOREREF	2
+
+/* user settable flags */
+#define TF_NAT_ONCE	  0x00000001 /* this entry is applied only once */
+#define TF_LISTEN	  0x00000002 /* this entry is meant for listening */
+#define TF_CONNECT	  0x00000004 /* this entry is meant for connecting */
+#define TF_UNIDIR	  0x00000008 /* this entry is a listening UDP socket,
+					and only an unidirectional nat is to be applied */
+
+/* state flags */
+#define TF_HASHED	  0x00010000 /* entry hashed in hashtable */
+#define TF_CONNECT_ONLY	  0x00020000 /* conflicting foreign address */
+#define TF_MARK_ONLY	  0x00040000 /* have packets in this session mark as tproxy but don't apply translation */
+#define TF_NAT_APPLIED	  0x00080000 /* NAT already applied, ignore this entry during NAT search */
+#define TF_ORPHAN	  0x00100000 /* Parent (listening) socket was closed */
+
+#ifdef CONFIG_IP_NF_NAT_NRES
+#define TF_NAT_RESERVED	  0x00200000 /* a NAT reservation was allocated for the sockref's foreign address */
+#define TF_NAT_PEER	  0x00400000 /* raddr was also specified at NAT reservation */
+#endif
+
+#define TF_STATE_MASK	  0xffff0000
+
+static int hashsize = 0;
+module_param(hashsize, uint, 0600);
+
+int ip_tproxy_htable_size = 127;
+struct list_head *ip_tproxy_bylocal;
+struct list_head *ip_tproxy_byforeign;
+kmem_cache_t *ip_tproxy_sockref_table;
+int ip_tproxy_htable_count = 0;
+struct ip_conntrack ip_tproxy_fake_ct;
+
+static u32
+ip_tproxy_hash_fn(u32 addr, u16 port, u8 proto)
+{
+	return ntohl(addr + (port<<8) + proto) % ip_tproxy_htable_size;
+}
+
+/* allocate memory and initialize a sockref structure */
+static struct ip_tproxy_sockref *
+ip_tproxy_sockref_new(void)
+{
+	struct ip_tproxy_sockref *sr;
+
+	sr = kmem_cache_zalloc(ip_tproxy_sockref_table, GFP_ATOMIC);
+	if (!sr)
+		return NULL;
+
+	atomic_set(&sr->references, 1);
+	sr->bylocal.sockref = sr;
+	sr->byforeign.sockref = sr;
+	sr->relatedlock = SPIN_LOCK_UNLOCKED;
+	INIT_LIST_HEAD(&sr->relatedct);
+	atomic_set(&sr->socket_count, 1);
+
+	return sr;
+}
+
+/* increase reference count for a sockref entry */
+static inline void
+ip_tproxy_sockref_ref(struct ip_tproxy_sockref *sr)
+{
+	atomic_inc(&sr->references);
+}
+
+/* decrease refcount for the entry, and free the structure if needed */
+static inline void
+ip_tproxy_sockref_unref(struct ip_tproxy_sockref *sr)
+{
+	if (atomic_dec_and_test(&sr->references)) {
+		kmem_cache_free(ip_tproxy_sockref_table, sr);
+	}
+}
+
+/* put a sockref entry in the hash tables */
+static void
+ip_tproxy_hash(struct ip_tproxy_sockref *sr)
+{
+	u32 fhash = ip_tproxy_hash_fn(sr->faddr, sr->fport, sr->proto);
+	u32 lhash = ip_tproxy_hash_fn(sr->laddr, sr->lport, sr->proto);
+
+	sr->flags |= TF_HASHED;
+	sr->tv_hashed = xtime;
+
+	DEBUGP(KERN_DEBUG "IP_TPROXY: ip_tproxy_hash(): hashing sockref, "
+	       "lhash=%d, fhash=%d, %p, %02x, %08x:%04x -> %08x:%04x\n",
+	       lhash, fhash, sr, sr->proto, sr->laddr,
+	       sr->lport, sr->faddr, sr->fport);
+
+	ip_tproxy_sockref_ref(sr);
+
+	list_add_tail(&sr->bylocal.list, &ip_tproxy_bylocal[lhash]);
+	list_add_tail(&sr->byforeign.list, &ip_tproxy_byforeign[fhash]);
+	ip_tproxy_htable_count++;
+}
+
+/* delete sockref from the hash tables */
+static void
+ip_tproxy_unhash(struct ip_tproxy_sockref *sr)
+{
+	DEBUGP(KERN_DEBUG "IP_TPROXY: ip_tproxy_hash(): unhashing sockref, "
+	       "%p, %02x, %08x:%04x -> %08x:%04x\n",
+	       sr, sr->proto, sr->laddr, sr->lport, sr->faddr, sr->fport);
+
+	if (sr->flags & TF_HASHED) {
+		list_del(&sr->bylocal.list);
+		list_del(&sr->byforeign.list);
+		sr->flags &= ~TF_HASHED;
+		ip_tproxy_sockref_unref(sr);
+		ip_tproxy_htable_count--;
+	}
+	else {
+		printk(KERN_WARNING "IP_TPROXY: unhashing a sockref which was "
+		       "not hashed before, %p, flags=%x\n",
+		       sr, sr->flags);
+	}
+}
+
+/* change the fport of the sockref to the specified value, and modify foreign hash
+ * accordingly (used when not specifying an exact foreign port, and NAT allocates a
+ * free port number for the sockref) */
+static void
+ip_tproxy_rehash_fport(struct ip_tproxy_sockref *sr, u16 fport)
+{
+	u32 fhash = ip_tproxy_hash_fn(sr->faddr, fport, sr->proto);
+
+	DEBUGP(KERN_DEBUG "IP_TPROXY: ip_tproxy_rehash_fport(): rehashing sockref, "
+	       "%p, %02x, %08x:%04x -> %08x:%04x, new fport %04x\n",
+	       sr, sr->proto, sr->laddr, sr->lport,
+	       sr->faddr, sr->fport, fport);
+
+	if (sr->flags & TF_HASHED) {
+		list_del(&sr->byforeign.list);
+		sr->fport = fport;
+		list_add_tail(&sr->byforeign.list, &ip_tproxy_byforeign[fhash]);
+	}
+}
+
+/* add a conntrack entry to the related list of the sockref */
+static void
+ip_tproxy_relatedct_add(struct ip_tproxy_sockref *sr, struct ip_conntrack *ct)
+{
+#ifdef IP_TPROXY_DEBUG
+	struct ip_conntrack *p;
+#endif
+
+	if (test_and_set_bit(IPS_TPROXY_RELATED_BIT, &ct->status)) {
+		/* this conntrack is already related to another sockref! */
+		return;
+	}
+
+	spin_lock_bh(&sr->relatedlock);
+
+#ifdef IP_TPROXY_DEBUG
+	/* check if it's already in the list */
+	list_for_each_entry(p, &sr->relatedct, tproxy.related) {
+		if (ct == p)
+			goto out;
+	}
+#endif
+
+	/* each related conntrack adds one to the reference count of the sockref */
+	ip_tproxy_sockref_ref(sr);
+	atomic_inc(&sr->related);
+	/* since we store a pointer to the conntrack, we should get a reference */
+	atomic_inc(&ct->ct_general.use);
+	list_add(&ct->tproxy.related, &sr->relatedct);
+
+#ifdef IP_TPROXY_DEBUG
+out:
+#endif
+	spin_unlock_bh(&sr->relatedlock);
+}
+
+/* called by conntrack when a connection is confirmed */
+static void
+ip_tproxy_confirmed(struct ip_conntrack *ct)
+{
+	struct ip_tproxy_sockref *sr = (struct ip_tproxy_sockref *)ct->tproxy.sockref;
+
+	/* check if it was marked by tproxy and not yet a related sockref */
+	if (test_bit(IPS_TPROXY_BIT, &ct->status) &&
+	    !test_bit(IPS_TPROXY_RELATED_BIT, &ct->status) &&
+	    sr) {
+		ct->tproxy.sockref = NULL;
+
+		/* put it on the sockref's related list */
+		if (sr->proto == IPPROTO_UDP)
+			ip_tproxy_relatedct_add(sr, ct);
+
+		/* drop reference to sockref */
+		ip_tproxy_sockref_unref(sr);
+	}
+}
+
+/* called by conntrack when a connection is destroyed */
+static void
+ip_tproxy_conntrack_destroyed(struct ip_conntrack *ct)
+{
+	/* check if it's not confirmed, but marked by tproxy */
+	if (!is_confirmed(ct) &&
+	    !test_bit(IPS_TPROXY_RELATED_BIT, &ct->status) &&
+	    test_and_clear_bit(IPS_TPROXY_BIT, &ct->status) &&
+	    ct->tproxy.sockref != NULL) {
+		/* drop reference */
+		ip_tproxy_sockref_unref((struct ip_tproxy_sockref *)ct->tproxy.sockref);
+		ct->tproxy.sockref = NULL;
+	}
+
+	if (ip_conntrack_destroyed_old)
+		ip_conntrack_destroyed_old(ct);
+}
+
+static int
+sockref_listen_cmp(const struct ip_tproxy_sockref *sr, const u32 raddr, const u16 rport,
+		   const struct ip_conntrack *ct)
+{
+	return (ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.ip == sr->faddr) &&
+	       (ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u.all == sr->fport) &&
+	       (ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum == sr->proto) &&
+	       ((raddr == 0) || (ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.ip == raddr)) &&
+	       ((rport == 0) || (ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u.all == rport));
+}
+
+/* delete matching related connections from the sockref's list and delete them from
+ * the conntrack hash if requested */
+static void
+ip_tproxy_kill_related(struct ip_tproxy_sockref *sr, u32 raddr, u16 rport,
+		       int cmpfn(const struct ip_tproxy_sockref *, const u32 raddr,
+				 const u16 rport, const struct ip_conntrack *),
+		       int delete)
+{
+	struct ip_conntrack *ct, *p;
+
+	spin_lock_bh(&sr->relatedlock);
+
+	list_for_each_entry_safe(ct, p, &sr->relatedct, tproxy.related) {
+		/* if a compare function was given, don't delete unmatched entries */
+		if (cmpfn && !cmpfn(sr, raddr, rport, ct))
+			continue;
+
+		/* delete the conntrack entry from our related list, update related counter */
+		list_del(&ct->tproxy.related);
+		atomic_dec(&sr->related);
+
+#ifdef CONFIG_NETFILTER_DEBUG
+		/* clear IPS_TPROXY_RELATED flag from the conntrack */
+		if (!test_and_clear_bit(IPS_TPROXY_RELATED_BIT, &ct->status)) {
+			/* this is a bug: IPS_TPROXY_RELATED is not set for a conntrack? */
+			printk(KERN_WARNING "IP_TPROXY: IPS_TPROXY_RELATED not set "
+			       "for a related conntrack\n");
+		}
+#endif
+
+		/* should we delete the entry from the conntrack hash? */
+		if (delete && del_timer(&ct->timeout))
+			ct->timeout.function((unsigned long)ct);
+
+		/* unreference conntrack and sockref */
+		ip_conntrack_put(ct);
+		ip_tproxy_sockref_unref(sr);
+	}
+
+	spin_unlock_bh(&sr->relatedlock);
+}
+
+/* remove/kill related connections for the given sockref */
+static void
+ip_tproxy_kill_conntracks(struct ip_tproxy_sockref *sr,
+			  u32 raddr, u16 rport, int delete)
+{
+	if (sr->flags & TF_CONNECT) {
+		/* this is an established UDP "connection" or a CONNECT-ed
+		 * sockref, we delete all related connections from our list */
+		ip_tproxy_kill_related(sr, raddr, rport, NULL, delete);
+	} else if (sr->flags & TF_LISTEN) {
+		/* for listening sockrefs, we have to delete one specific
+		 * connection only, with both endpoints matching */
+		ip_tproxy_kill_related(sr, raddr, rport, sockref_listen_cmp, delete);
+	}
+}
+
+static void *ip_tproxy_seq_start(struct seq_file *seq, loff_t *pos)
+{
+	/* we use seq_file->private pointer as an int */
+	unsigned int *bucket = (unsigned int *)&seq->private;
+
+	read_lock_bh(&ip_tproxy_lock);
+
+	if (*pos >= ip_tproxy_htable_size)
+		return NULL;
+
+	*bucket = *pos;
+	return bucket;
+}
+
+static void *ip_tproxy_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+	unsigned int *bucket = (unsigned int *)v;
+
+	*pos = ++(*bucket);
+	if (*pos > ip_tproxy_htable_size)
+		return NULL;
+
+	return bucket;
+}
+
+static void ip_tproxy_seq_stop(struct seq_file *seq, void *v)
+{
+	read_unlock_bh(&ip_tproxy_lock);
+}
+
+/* print information about a sockref, used by the procfs interface */
+static unsigned int
+ip_tproxy_print_sockref(const struct ip_tproxy_hash *h, struct seq_file *seq)
+{
+	struct ip_tproxy_sockref *sr = h->sockref;
+
+	IP_NF_ASSERT(sr);
+
+	return seq_printf(seq, "%05d %08x:%04x %08x:%04x %08x:%04x %08x %05u %06u %10ld:%06ld\n",
+			sr->proto, sr->faddr, sr->fport, sr->laddr,
+			sr->lport, sr->raddr, sr->rport, sr->flags,
+			atomic_read(&sr->related), atomic_read(&sr->socket_count),
+			sr->tv_hashed.tv_sec, sr->tv_hashed.tv_nsec) ? 1 : 0;
+}
+
+static int ip_tproxy_seq_show(struct seq_file *seq, void *v)
+{
+	unsigned int *bucket = (unsigned int *)v;
+	struct ip_tproxy_hash *h;
+
+	list_for_each_entry(h, &ip_tproxy_bylocal[*bucket], list)
+		if (ip_tproxy_print_sockref(h, seq))
+			return 1;
+
+	return 0;
+}
+
+static struct seq_operations ip_tproxy_seq_ops = {
+	.start = ip_tproxy_seq_start,
+	.next  = ip_tproxy_seq_next,
+	.stop  = ip_tproxy_seq_stop,
+	.show  = ip_tproxy_seq_show
+};
+
+static int ip_tproxy_proc_open(struct inode *inode, struct file *file)
+{
+	return seq_open(file, &ip_tproxy_seq_ops);
+}
+
+static struct file_operations ip_tproxy_file_ops = {
+	.owner	 = THIS_MODULE,
+	.open	 = ip_tproxy_proc_open,
+	.read	 = seq_read,
+	.llseek	 = seq_lseek,
+	.release = seq_release
+};
+
+/* lookup sockref based on the local address. refcount is not incremented on the returned sockref */
+struct ip_tproxy_sockref *
+ip_tproxy_sockref_find_local(u32 addr, u16 port, u8 proto, int fresh, u32 raddr, u16 rport)
+{
+	u32 hash = ip_tproxy_hash_fn(addr, port, proto);
+	struct ip_tproxy_hash *h;
+	struct ip_tproxy_sockref *sr, *best = NULL;
+
+	ASSERT_READ_LOCK(&ip_tproxy_bylocal[hash]);
+	DEBUGP(KERN_DEBUG "IP_TPROXY: ip_tproxy_sockref_find_local(): "
+	       "entered, %08x:%04x\n", addr, port);
+
+	list_for_each_entry(h, &ip_tproxy_bylocal[hash], list) {
+		sr = h->sockref;
+
+		DEBUGP(KERN_DEBUG "IP_TPROXY: sockref_cmpfn_local(): sr: %08x:%04x "
+		       "(foreign: %08x:%04x remote: %08x:%04x), searched: "
+		       "%08x:%04x (remote: %08x:%04x)\n",
+		       sr->laddr, sr->lport, sr->faddr, sr->fport,
+		       sr->raddr, sr->rport, addr, port, raddr, rport);
+
+		if (sr->laddr == addr && sr->lport == port && sr->proto == proto) {
+			/* fresh means orphaned entries should be skipped */
+			if (fresh && (sr->flags & TF_ORPHAN))
+				continue;
+
+			if (raddr == 0 && rport == 0) {
+				/* not interested in remote address */
+				return sr;
+			}
+			else if (sr->raddr == raddr && sr->rport == rport) {
+				/* complete match */
+				return sr;
+			}
+			else if (sr->raddr == 0 && sr->rport == 0) {
+				/* unconnected sockref if complete match not found */
+				best = sr;
+			}
+		}
+	}
+
+	return best;
+}
+
+/* lookup sockref based on the foreign address. refcount is not incremented on the returned sockref */
+struct ip_tproxy_sockref *
+ip_tproxy_sockref_find_foreign(u32 addr, u16 port, u8 proto, u32 raddr, u16 rport)
+{
+	u32 hash = ip_tproxy_hash_fn(addr, port, proto);
+	struct ip_tproxy_hash *h;
+	struct ip_tproxy_sockref *sr, *best = NULL;
+
+	ASSERT_READ_LOCK(&ip_tproxy_byforeign[hash]);
+	DEBUGP(KERN_DEBUG "IP_TPROXY: ip_tproxy_sockref_find_foreign(): "
+	       "entered, %08x:%04x\n", addr, port);
+
+	list_for_each_entry(h, &ip_tproxy_byforeign[hash], list) {
+		sr = h->sockref;
+
+		DEBUGP(KERN_DEBUG "IP_TPROXY: sockref_cmpfn_foreign(): sr: %08x:%04x "
+		       "(remote: %08x:%04x), searched: %08x:%04x (remote: %08x:%04x)\n",
+		       sr->faddr, sr->fport, sr->raddr, sr->rport, addr, port, raddr, rport);
+
+		if (sr->faddr == addr && sr->fport == port && sr->proto == proto) {
+			if (raddr == 0 && rport == 0) {
+				/* not interested in remote address */
+				return sr;
+			}
+			else if (sr->raddr == raddr && sr->rport == rport) {
+				/* complete match */
+				return sr;
+			}
+			else if (sr->raddr == 0 && sr->rport == 0) {
+				/* unconnected sockref if complete match not found */
+				best = sr;
+			}
+		}
+	}
+
+	return best;
+}
+
+/* delete all sockrefs currently in the hash tables
+ * FIXME: we might have a race here. If our hook is running while to module
+ * is unloading, bad things might happen. */
+static void
+ip_tproxy_sockref_table_free(void)
+{
+	int i;
+	struct ip_tproxy_hash *h, *p;
+
+	for (i = 0; i < ip_tproxy_htable_size; i++) {
+		list_for_each_entry_safe(h, p, &ip_tproxy_bylocal[i], list) {
+			list_del(&h->list);
+			ip_tproxy_kill_conntracks(h->sockref, 0, 0, 0);
+			ip_tproxy_sockref_unref(h->sockref);
+		}
+		ip_tproxy_byforeign[i].prev = ip_tproxy_byforeign[i].next = &ip_tproxy_byforeign[i];
+	}
+}
+
+/* setup a bidirectional NAT mapping for the given connection, using the values specified by
+ * the assigned sockref */
+static int
+ip_tproxy_setup_nat_bidir(struct ip_conntrack *ct, int hooknum, struct ip_tproxy_sockref *sr, unsigned int flags)
+{
+	struct ip_nat_range range;
+	u32 newip = 0;
+	u16 newport = 0;
+	int res;
+
+	if (is_confirmed(ct) || ip_nat_initialized(ct, HOOK2MANIP(hooknum))) {
+		return NF_ACCEPT;
+	}
+
+	DEBUGP(KERN_DEBUG "IP_TPROXY: ip_tproxy_setup_nat(): adding nat "
+	       "entry hooknum=%d %08x:%04x -> %08x:%04x\n", hooknum, sr->laddr,
+	       sr->lport, sr->faddr, sr->fport);
+
+	range.flags = IP_NAT_RANGE_MAP_IPS | IP_NAT_RANGE_BYPASS_HELPERS;
+
+	if (hooknum == NF_IP_POST_ROUTING) {
+		/* in POSTROUTING we perform an SNAT to the foreign address */
+		newip = sr->faddr;
+		newport = sr->fport;
+	}
+	else if (hooknum == NF_IP_PRE_ROUTING || hooknum == NF_IP_LOCAL_OUT) {
+		/* in PREROUTING and LOCAL_OUT we perform a DNAT to our socket address */
+
+		newip = sr->laddr;
+		newport = sr->lport;
+	}
+
+	range.min_ip = range.max_ip = newip;
+
+	/* if port number was specified */
+	if (newport != 0) {
+		if (sr->proto == IPPROTO_TCP) {
+			range.min.tcp.port = range.max.tcp.port = newport;
+			range.flags |= IP_NAT_RANGE_PROTO_SPECIFIED;
+		}
+		else if (sr->proto == IPPROTO_UDP) {
+			range.min.udp.port = range.max.udp.port = newport;
+			range.flags |= IP_NAT_RANGE_PROTO_SPECIFIED;
+		}
+#ifdef CONFIG_IP_NF_NAT_NRES
+		if (sr->flags & TF_NAT_RESERVED)
+			range.flags |= IP_NAT_RANGE_USE_RESERVED;
+#endif
+	}
+
+	res = ip_nat_setup_info(ct, &range, hooknum);
+
+	if (res != NF_ACCEPT) {
+		printk(KERN_WARNING "IP_TPROXY: error applying NAT mapping, "
+		       "hooknum=%d %08x:%04x -> %08x:%04x\n",
+		       hooknum, sr->laddr, sr->lport, newip, newport);
+	} else {
+		/* we store a reference to the sockref in the conntrack */
+		if (!test_and_set_bit(IPS_TPROXY_BIT, &ct->status)) {
+			if (flags & TN_STOREREF) {
+				ip_tproxy_sockref_ref(sr);
+				ct->tproxy.sockref = sr;
+			}
+		}
+
+		if ((newport == 0) && (ct->status & IPS_SRC_NAT) && (sr->flags & TF_HASHED)) {
+			u16 fport = ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u.all;
+			write_lock_bh(&ip_tproxy_lock);
+			/* FIXME: necessary and correct? */
+			ip_tproxy_rehash_fport(sr, fport);
+			write_unlock_bh(&ip_tproxy_lock);
+		}
+	}
+
+	return res;
+}
+
+/* redirect incoming packet to the appropriate local port (UDP specific) */
+static int
+ip_tproxy_setup_nat_unidir(struct sk_buff **pskb, int hooknum, struct ip_tproxy_sockref *sr)
+{
+	enum ip_nat_manip_type manip_type;
+	struct sk_buff *skb = *pskb;
+	struct ip_conntrack_tuple tuple;
+
+	/* free the original conntrack entry, and assign the fake one */
+	nf_conntrack_put(skb->nfct);
+	skb->nfct = &ip_tproxy_fake_ct.ct_general;
+	skb->nfctinfo = IP_CT_NEW;
+	nf_conntrack_get(skb->nfct);
+
+	/* fill tuple structure with new information */
+	tuple.dst.protonum = skb->nh.iph->protocol;
+
+	if (hooknum == NF_IP_POST_ROUTING) {
+		/* in POSTROUTING we perform an SNAT to the foreign address */
+		tuple.src.ip = sr->faddr;
+		tuple.src.u.udp.port = sr->fport;
+		manip_type = IP_NAT_MANIP_SRC;
+	} else if (hooknum == NF_IP_PRE_ROUTING || hooknum == NF_IP_LOCAL_OUT) {
+		/* in PREROUTING and LOCAL_OUT we perform a DNAT to our socket address */
+		tuple.dst.ip = sr->laddr;
+		tuple.dst.u.udp.port = sr->lport;
+		manip_type = IP_NAT_MANIP_DST;
+	} else
+		return NF_DROP;
+
+	/* manipulate packet "by hand" */
+	if (unlikely(!ip_nat_manip_pkt(skb->nh.iph->protocol, pskb, 0, &tuple, manip_type))) {
+		DEBUGP(KERN_DEBUG "IP_TPROXY: ip_tproxy_setup_nat_unidir(): "
+		       "failed to rewrite packet header\n");
+		return NF_DROP;
+	}
+
+	return NF_ACCEPT;
+}
+
+/* setup NAT for the packet */
+int
+ip_tproxy_setup_nat(struct sk_buff **pskb, int hooknum, struct ip_tproxy_sockref *sr, unsigned int flags)
+{
+	if (sr->proto == IPPROTO_TCP || (flags & TN_BIDIR)) {
+		struct ip_conntrack *ct;
+		enum ip_conntrack_info ctinfo;
+
+		ct = ip_conntrack_get(*pskb, &ctinfo);
+		return ip_tproxy_setup_nat_bidir(ct, hooknum, sr, flags);
+	} else if (sr->proto == IPPROTO_UDP)
+		return ip_tproxy_setup_nat_unidir(pskb, hooknum, sr);
+
+	return NF_DROP;
+}
+EXPORT_SYMBOL_GPL(ip_tproxy_setup_nat);
+
+/* This is a gross hack */
+static void
+ip_tproxy_save_orig_addrs(struct sk_buff *skb)
+{
+	struct iphdr *iph = skb->nh.iph;
+	u16 *tports, _tports[2];
+
+	tports = skb_header_pointer(skb, iph->ihl * 4, sizeof(_tports), &_tports);
+	if (tports != NULL) {
+		IPCB(skb)->orig_srcaddr = iph->saddr;
+		IPCB(skb)->orig_srcport = tports[0];
+		IPCB(skb)->orig_dstaddr = iph->daddr;
+		IPCB(skb)->orig_dstport = tports[1];
+	}
+}
+
+/* tproxy Netfilter hook */
+static unsigned int
+ip_tproxy_fn(unsigned int hooknum,
+	     struct sk_buff **pskb,
+	     const struct net_device *in,
+	     const struct net_device *out,
+	     int (*okfn)(struct sk_buff *))
+{
+	struct ip_conntrack *ct;
+	enum ip_conntrack_info ctinfo;
+	unsigned int verdict = NF_ACCEPT;
+
+	ct = ip_conntrack_get(*pskb, &ctinfo);
+
+	if (ct && ctinfo == IP_CT_NEW) {
+		struct iphdr *iph = (*pskb)->nh.iph;
+		u16 *tports, _tports[2];
+		struct ip_tproxy_sockref *sr = NULL;
+
+		tports = skb_header_pointer(*pskb, (*pskb)->nh.iph->ihl * 4,
+					    sizeof(_tports), &_tports);
+		if (tports == NULL) {
+			DEBUGP(KERN_DEBUG "IP_TPROXY: ip_tproxy_fn(): "
+			       "failed to copy protocol header\n");
+			return NF_DROP;
+		}
+
+		DEBUGP(KERN_DEBUG "IP_TPROXY: ip_tproxy_fn(): new connection, "
+		       "hook=%d, %08x:%04x -> %08x:%04x\n",
+		       hooknum, iph->saddr, tports[0], iph->daddr, tports[1]);
+
+		ip_tproxy_save_orig_addrs(*pskb);
+		read_lock_bh(&ip_tproxy_lock);
+		if (hooknum == NF_IP_PRE_ROUTING || hooknum == NF_IP_LOCAL_OUT) {
+
+			/*
+			 * We either received a connection from the network (PREROUTING case)
+			 * or a local process generated one (LOCAL_OUT case).
+			 *
+			 * In either case we check whether a proxy bound to the
+			 * destination of this connection.
+			 *
+			 * As a special case we check in LOCAL_OUT whether the
+			 * connection was initiated by a local proxy, and if it
+			 * was we mark the connection as such and skip the
+			 * tproxy table.
+			 */
+
+			/* destination address is interesting */
+
+			sr = ip_tproxy_sockref_find_foreign(iph->daddr, tports[1],
+						iph->protocol, iph->saddr, tports[0]);
+
+			if (sr && sr->flags & TF_ORPHAN) {
+				/* This sockref is orphaned, the listening socket is already unassigned,
+				 * so it should not be used for setting up NAT for a new connection. */
+				sr = NULL;
+			}
+
+			if (sr && (sr->flags & (TF_LISTEN|TF_MARK_ONLY)) == 0) {
+				DEBUGP(KERN_DEBUG "IP_TPROXY: ip_tproxy_fn(PREROUTING), "
+				       "entry found but flags = 0\n");
+				sr = NULL;
+			}
+
+			if (hooknum == NF_IP_LOCAL_OUT &&
+			    !sr &&
+			    (sr = ip_tproxy_sockref_find_local(iph->saddr, tports[0],
+							       iph->protocol, 1, iph->daddr,
+							       tports[1]))) {
+				DEBUGP("IP_TPROXY: tproxy initiated session in local "
+				       "output, sr->flags=%04x\n", sr->flags);
+				if ((sr->flags & TF_MARK_ONLY) == 0)
+					sr = NULL;
+			}
+		}
+		else if (hooknum == NF_IP_POST_ROUTING) {
+
+			/*
+			 * We detected a new connection just leaving this box, so
+			 * we now have a chance to add a translation changing
+			 * the source address of all packets. We want to do this
+			 * if the connection was initiated by a transparent proxy
+			 * which registered another address to rewrite the source into.
+			 *
+			 * A proxy registered an entry if find_local returns non-NULL.
+			 */
+
+			/* source address is interesting */
+
+			sr = ip_tproxy_sockref_find_local(iph->saddr, tports[0], iph->protocol,
+					1, iph->daddr, tports[1]);
+			if (sr && (sr->flags & (TF_CONNECT|TF_MARK_ONLY)) == 0) {
+				DEBUGP(KERN_DEBUG "IP_TPROXY: ip_tproxy_fn(POSTROUTING), "
+				       "entry found but flags = 0\n");
+				sr = NULL;
+			}
+		}
+		else {
+			printk(KERN_WARNING "IP_TPROXY: hook function called at hooks other "
+			       "than NF_IP_PRE_ROUTING, NF_IP_POST_ROUTING or "
+			       "NF_IP_LOCAL_OUT, hooknum=%d\n", hooknum);
+			verdict = NF_DROP;
+		}
+
+		/*
+		 * sockref will not be freed, as the hash is read locked here
+		 * and by the time we unlock it we own a reference
+		 */
+
+		if (sr) {
+			if (sr->flags & TF_MARK_ONLY) {
+				/*
+				 * A MARK_ONLY entry indicates that although the proxy
+				 * doesn't want any address rewrite to be performed
+				 * it registered its connection as one originating
+				 * from a transparent proxy, so -m tproxy matches it.
+				 *
+				 * It is a convinience feature, so administrators
+				 * can simply let tproxied traffic through their filter
+				 * table.
+				 */
+				DEBUGP(KERN_DEBUG "IP_TPROXY: mark only entry...\n");
+
+				if (!test_and_set_bit(IPS_TPROXY_BIT, &ct->status))
+					ct->tproxy.sockref = NULL;
+
+				sr = NULL;
+			}
+			else {
+				/* we'll have a reference to the sockref after releasing the lock */
+				ip_tproxy_sockref_ref(sr);
+			}
+		}
+		read_unlock_bh(&ip_tproxy_lock);
+
+		DEBUGP(KERN_DEBUG "IP_TPROXY: ip_tproxy_fn(): sockref looked up, sr=%p\n", sr);
+		if (sr) {
+
+			/* sockref found it is a real translation as
+			 * MARK_ONLY was handled above so we apply the
+			 * necessary NAT function
+			 */
+
+			/* apply NAT mapping */
+			unsigned int dirflag = !(sr->flags & TF_UNIDIR) ? TN_BIDIR : 0;
+			if (ip_tproxy_setup_nat(pskb, hooknum, sr, dirflag | TN_STOREREF) == NF_ACCEPT) {
+				/* FIXME: hmm. there might be races involved
+				 * with TF_NAT_APPLIED, as another processor
+				 * might be processing the same sockref.
+				 */
+				sr->flags |= TF_NAT_APPLIED;
+			} else {
+				/* Applying the NAT mapping failed, we should drop the packet */
+				verdict = NF_DROP;
+			}
+
+			/* drop reference */
+			ip_tproxy_sockref_unref(sr);
+		} /* if (sr) */
+		else if (!test_bit(IPS_TPROXY_BIT, &ct->status) &&
+			 (hooknum == NF_IP_PRE_ROUTING || hooknum == NF_IP_LOCAL_OUT)) {
+
+			/* there was no matching sockref, so we consult the
+			 * TPROXY table
+			 */
+
+			verdict = ipt_do_table(pskb, hooknum, in, out, &tproxy_table);
+		}
+	}
+
+	return verdict;
+}
+
+#ifdef CONFIG_IP_NF_NAT_NRES
+static inline struct ip_nat_reserved *
+ip_tproxy_nat_reserve(const u32 faddr, const u16 fport, int proto, const u32 raddr, const u16 rport)
+{
+	struct ip_conntrack_manip m = {.ip = faddr, .u = {.all = fport}};
+	struct ip_conntrack_manip p = {.ip = raddr, .u = {.all = rport}};
+	struct ip_nat_reserved *res;
+
+	DEBUGP(KERN_DEBUG "IP_TPROXY: ip_tproxy_nat_reserve proto %u foreign "
+	       "%u.%u.%u.%u:%u peer %u.%u.%u.%u:%u\n",
+	       proto, NIPQUAD(faddr), ntohs(fport), NIPQUAD(raddr), ntohs(rport));
+
+	write_lock_bh(&ip_nat_lock);
+	res = __ip_nat_reserved_new_hash(&m, proto, (raddr && rport) ? &p : NULL);
+	write_unlock_bh(&ip_nat_lock);
+
+	return res;
+}
+
+static void
+ip_tproxy_nat_reserved_free(struct ip_tproxy_sockref *sr)
+{
+	struct ip_nat_reserved *res;
+	struct ip_conntrack_manip m = {.ip = sr->faddr, .u = {.all = sr->fport}};
+	struct ip_conntrack_manip p = {.ip = sr->raddr, .u = {.all = sr->rport}};
+
+	/* free NAT reservation */
+	if (sr->flags & TF_NAT_RESERVED) {
+		write_lock_bh(&ip_nat_lock);
+		if (sr->flags & TF_NAT_PEER)
+			res = __ip_nat_reserved_unhash(&m, sr->proto, &p);
+		else
+			res = __ip_nat_reserved_unhash(&m, sr->proto, NULL);
+		write_unlock_bh(&ip_nat_lock);
+
+		if (res) {
+			sr->flags &= ~(TF_NAT_RESERVED | TF_NAT_PEER);
+			__ip_nat_reserved_free(res);
+		}
+	}
+}
+#endif
+
+/* This routine dynamically allocates a foreign port if the proxy requests this
+ * by setting fport to 0. We try to use the same algorithm the local stack
+ * uses to allocate a port. The routine itself is only used when we need to
+ * allocate a foreign port _before_ sending the first packet, standard connect
+ * sockets get their foreign port allocated by the NAT subsystem. */
+static inline int
+ip_tproxy_sockref_uniq(struct ip_tproxy_sockref *sr)
+{
+	int min, max, rover, left;
+	static int ip_tproxy_port_rover = 0;
+
+	DEBUGP(KERN_DEBUG "IP_TPROXY: ip_tproxy_sockref_uniq\n");
+	min = sysctl_local_port_range[0];
+	max = sysctl_local_port_range[1];
+	rover = ip_tproxy_port_rover;
+	left = (max - min) + 1;
+	do {
+		rover++;
+		if (rover < min || rover > max)
+			rover = min;
+		if (ip_tproxy_sockref_find_foreign(sr->faddr, htons(rover),
+						   sr->proto, sr->raddr,
+						   sr->rport) == NULL) {
+#ifdef CONFIG_IP_NF_NAT_NRES
+			/* unique entry found, try to reserve in NAT */
+			if (ip_tproxy_nat_reserve(sr->faddr, htons(rover), sr->proto, sr->raddr, sr->rport))
+#endif
+				break;
+		}
+	} while (--left > 0);
+
+	if (left == 0) {
+		printk(KERN_WARNING "IP_TPROXY: out of free foreign ports, "
+		       "increase local_port_range\n");
+		return 0;
+	} else if (rover == 0) {
+		printk(KERN_WARNING "IP_TPROXY: hm?? ip_tproxy_sockref_uniq, "
+		       "left != 0 && rover == 0\n");
+	} else {
+		/* succeeded */
+		DEBUGP(KERN_DEBUG "IP_TPROXY: ip_tproxy_sockref_uniq, "
+		       "allocated port=%d\n", rover);
+
+		ip_tproxy_port_rover = rover;
+
+#ifdef CONFIG_IP_NF_NAT_NRES
+		sr->flags |= TF_NAT_RESERVED;
+		if (sr->raddr && sr->rport)
+			sr->flags |= TF_NAT_PEER;
+#endif
+		ip_tproxy_rehash_fport(sr, htons(rover));
+	}
+
+	return rover;
+}
+
+static int
+ip_tproxy_setsockopt_version(struct sock *sk, int proto, struct in_tproxy *itp)
+{
+	int res = 0;
+	u_int32_t ver = itp->v.version;
+
+	DEBUGP(KERN_DEBUG "IP_TPROXY: IP_TPROXY_VERSION\n");
+
+	if ((MAJOR_VERSION(ver) != TPROXY_MAJOR_VERSION) ||
+	    (MINOR_VERSION(ver) > TPROXY_MINOR_VERSION))
+		res = -EINVAL;
+
+	return res;
+}
+
+static int
+ip_tproxy_setsockopt_assign(struct sock *sk, int proto, struct in_tproxy *itp)
+{
+	int foreign_matches, res = 0;
+	struct ip_tproxy_sockref *sr, *tsr = NULL;
+	struct inet_sock *inet = inet_sk(sk);
+
+	DEBUGP(KERN_DEBUG "IP_TPROXY: IP_TPROXY_ASSIGN %08x:%04x\n",
+	       inet->rcv_saddr, inet->sport);
+
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
+	if ((sk->sk_socket == NULL) ||
+	    (sk->sk_socket->state != SS_UNCONNECTED)) {
+		DEBUGP(KERN_DEBUG "IP_TPROXY: socket is not SS_UNCONNECTED "
+		       "during assign\n");
+		return -EINVAL;
+	}
+
+	if (!inet->rcv_saddr || !inet->sport)
+		return -EINVAL;
+
+	read_lock_bh(&ip_tproxy_lock);
+
+	DEBUGP(KERN_DEBUG "IP_TPROXY: count=%d\n", ip_tproxy_htable_count);
+
+	/* check if this socket was already assigned a sockref */
+	sr = ip_tproxy_sockref_find_local(inet->rcv_saddr, inet->sport, proto, 0, 0, 0);
+
+	/* NOTE: this is a HACK, and trusts the userspace app.
+	   We allow to assign multiple sockrefs to a single
+	   local addr:port pair _iff_ the foreign address is
+	   0.0.0.0:0 to allow UDP sessions to be bound to
+	   the same socket while keeping the 'mark as
+	   tproxy' packet mechanism.
+
+	   Maybe we should assign sockrefs to the struct sock *
+	   address instead.
+	*/
+	if (sr) {
+		if (itp->v.addr.faddr.s_addr || itp->v.addr.fport) {
+			printk("IP_TPROXY: socket already assigned, reuse=%d, "
+			       "%08x:%04x, sr->faddr=%08x:%04x, flags=%x, "
+			       "sr->tv_hashed=%ld:%ld\n", sk->sk_reuse,
+			       inet->rcv_saddr, inet->sport, sr->faddr, sr->fport,
+			       sr->flags, sr->tv_hashed.tv_sec, sr->tv_hashed.tv_nsec);
+			res = -EEXIST;
+			goto read_unlk;
+		} else {
+			DEBUGP(KERN_DEBUG "IP_TPROXY: IP_TPROXY_ASSIGN local address "
+			       "already taken, sharing this sockref\n");
+
+			/* increase socket count of sockref, indicating that it is
+			 * shared between multiple sockets */
+			atomic_inc(&sr->socket_count);
+			goto read_unlk;
+		}
+	}
+
+	/* check if the foreign address specified has already been taken.
+	 * if it has, the socket can only be used for connecting, provided
+	 * sk->sk_reuse is true, otherwise fail */
+
+	if (itp->v.addr.faddr.s_addr && itp->v.addr.fport != 0 &&
+	    (tsr = ip_tproxy_sockref_find_foreign(itp->v.addr.faddr.s_addr, itp->v.addr.fport, proto, 0, 0))) {
+		if (!sk->sk_reuse) {
+			res = -EADDRINUSE;
+			goto read_unlk;
+		}
+		foreign_matches = 1;
+	} else {
+		foreign_matches = 0;
+	}
+
+	/* we performed all checks, now allocate and fill a new
+	 * sockref */
+
+	sr = ip_tproxy_sockref_new();
+	if (!sr) {
+		printk(KERN_WARNING "IP_TPROXY: drained? cannot allocate sockref\n");
+		res = -ENOBUFS;
+		goto read_unlk;
+	}
+	sr->flags = 0;
+	sr->proto = proto;
+	sr->faddr = itp->v.addr.faddr.s_addr;
+	sr->fport = itp->v.addr.fport;
+	sr->laddr = inet->rcv_saddr;
+	sr->lport = inet->sport;
+	sr->assigned_to = sk;
+
+	if (itp->v.addr.faddr.s_addr == 0) {
+		/* we store the local address as foreign as well
+		 * for mark only connections, so find_foreign
+		 * finds this entry as well */
+
+		sr->flags |= TF_MARK_ONLY;
+		sr->faddr = sr->laddr;
+		sr->fport = sr->lport;
+	}
+	else if (foreign_matches) {
+		/* sk->sk_reuse was true */
+		/* if the existing sockref is mark only, or has its remote
+		 * endpoint specified, we have a chance not to clash with it,
+		 * otherwise this sockref will be connect-only */
+
+		if ((tsr->flags & TF_MARK_ONLY) || (tsr->raddr != 0 && tsr->rport != 0)) {
+			DEBUGP(KERN_DEBUG "IP_TPROXY: IP_TPROXY_ASSIGN omitting "
+			       "CONNECT_ONLY, other sockref is mark-only or connected\n");
+		} else {
+			sr->flags |= TF_CONNECT_ONLY;
+			DEBUGP(KERN_DEBUG "IP_TPROXY: IP_TPROXY_ASSIGN setting "
+			       "sr %p CONNECT_ONLY\n", sr);
+		}
+	}
+
+	DEBUGP(KERN_DEBUG "IP_TPROXY: IP_TPROXY_ASSIGN sr %p faddr:fport "
+	       "%08x:%04x flags %08x\n", sr, sr->faddr, sr->fport, sr->flags);
+
+#ifdef CONFIG_IP_NF_NAT_NRES
+	/* If SO_REUSE is not set and foreign port was specified, we should
+	 * allocate a NAT reservation right now. This mode is used by range
+	 * binds, so being pessimistic at NAT reservation clash checks causes
+	 * the caller to proceed to the next port and try again. */
+	if (itp->v.addr.faddr.s_addr && itp->v.addr.fport &&
+	    !foreign_matches && !sk->sk_reuse) {
+		/* we should register a NAT reservatinon */
+		if (ip_tproxy_nat_reserve(sr->faddr, sr->fport, proto, 0, 0)) {
+			sr->flags |= TF_NAT_RESERVED;
+			sr->flags &= ~TF_NAT_PEER;
+		} else {
+			/* failed to register NAT reservation, bail out */
+			DEBUGP(KERN_DEBUG "IP_TPROXY: IP_TPROXY_ASSIGN cannot "
+			       "register NAT reservation %08x:%04x\n",
+			       sr->faddr, sr->fport);
+
+			res = -EINVAL;
+			ip_tproxy_sockref_unref(sr);
+			goto read_unlk;
+		}
+	}
+#endif
+
+	read_unlock_bh(&ip_tproxy_lock);
+	write_lock_bh(&ip_tproxy_lock);
+	/* here we should check if we've won the race: if a sockref is in the
+	 * local hash by the time we acquired the write lock, we've lost */
+	if (!(tsr = ip_tproxy_sockref_find_local(inet->rcv_saddr,
+						 inet->sport, proto, 0, 0, 0)))
+		ip_tproxy_hash(sr);
+	write_unlock_bh(&ip_tproxy_lock);
+
+	if (tsr) {
+		/* we've lost the race */
+		res = -EINVAL;
+	}
+
+	/* the hashtable stores a reference, if hashing succeeded */
+	ip_tproxy_sockref_unref(sr);
+
+	return res;
+
+ read_unlk:
+	read_unlock_bh(&ip_tproxy_lock);
+	return res;
+}
+
+static int
+ip_tproxy_setsockopt_unassign(struct sock *sk, int proto, struct in_tproxy *itp)
+{
+	int res = 0, unhash = 0;
+	u32 saddr, daddr;
+	u16 sport, dport;
+	struct ip_tproxy_sockref *sr;
+
+	/* connection and time_wait socksets have to be handled differently */
+	if (sk->sk_state == TCP_TIME_WAIT) {
+		struct inet_timewait_sock *tw = inet_twsk(sk);
+		saddr = tw->tw_rcv_saddr;
+		daddr = tw->tw_daddr;
+		sport = tw->tw_sport;
+		dport = tw->tw_dport;
+	} else {
+		struct inet_sock *inet = inet_sk(sk);
+		saddr = inet->rcv_saddr;
+		daddr = inet->daddr;
+		sport = inet->sport;
+		dport = inet->dport;
+	}
+
+	/* break the connection between this socket and
+	 * a foreign address. This is implicitly performed
+	 * when the socket is closed */
+
+	DEBUGP(KERN_DEBUG "IP_TPROXY: IP_TPROXY_UNASSIGN %08x:%04x\n", saddr, sport);
+
+	write_lock_bh(&ip_tproxy_lock);
+	sr = ip_tproxy_sockref_find_local(saddr, sport, proto, 0, daddr, dport);
+
+	if (!sr) {
+		DEBUGP(KERN_DEBUG "IP_TPROXY: IP_TPROXY_UNASSIGN not unhashing socket, "
+		       "%08x:%04x, proto=%d\n",
+		       saddr, sport, proto);
+		res = -ENOENT;
+		goto write_unlk;
+	}
+
+	/* Delete appropriate related connections and set 'unhash' if
+	 * we have to unhash the sockref. */
+
+	/* Handle mark-only sockrefs separately: mark-only sockrefs don't have
+	 * related conntrack entries, so there is no need to bother to delete
+	 * the correct one from the related list. However, mark-only entries
+	 * can be shared, which means that more than one sockets are bound to
+	 * the same local address, and they are using the same sockref to have
+	 * matching connections marked. Because of this, we may unhash the
+	 * sockref only if there are no sockets left */
+	if (sr->flags & TF_MARK_ONLY) {
+		DEBUGP(KERN_DEBUG "IP_TPROXY: IP_TPROXY_UNASSIGN unassigning "
+		       "mark-only sockref %08x:%04x\n", saddr, sport);
+		if (atomic_dec_and_test(&sr->socket_count)) {
+			/* this was the last socket using this sockref */
+			unhash = 1;
+		}
+	} else switch (proto) {
+	case IPPROTO_TCP:
+		if ((sr->flags & TF_LISTEN)) {
+			if (sr->assigned_to != sk) {
+				/* unassigning the socket of a connection
+				 * established to a listening socket */
+				DEBUGP(KERN_DEBUG "IP_TPROXY: IP_TPROXY_UNASSIGN unassigning "
+				       "TCP listen related %08x:%04x -> %08x:%04x\n",
+				       daddr, dport, saddr, sport);
+			} else {
+				/* unassigning a listening socket, don't destroy just mark invalid */
+				DEBUGP(KERN_DEBUG "IP_TPROXY: IP_TPROXY_UNASSIGN unassigning "
+				       "TCP listen socket %08x:%04x\n", saddr, sport);
+				sr->flags |= TF_ORPHAN;
+				sr->assigned_to = NULL;
+			}
+
+			/* we have to unhash if there are no more related
+			 * connections and the listening socket is closed as
+			 * well */
+			if (!atomic_read(&sr->related) && !sr->assigned_to)
+				unhash = 1;
+
+		} else if (sr->flags & TF_CONNECT) {
+			/* unassigning a connect socket */
+			DEBUGP(KERN_DEBUG "IP_TPROXY: IP_TPROXY_UNASSIGN unassigning "
+			       "TCP connect %08x:%04x\n", saddr, sport);
+			unhash = 1;
+		}
+		break;
+
+	case IPPROTO_UDP:
+		DEBUGP(KERN_DEBUG "IP_TPROXY: IP_TPROXY_UNASSIGN unassigning UDP "
+		       "%08x:%04x\n", saddr, sport);
+		ip_tproxy_kill_conntracks(sr, 0, 0, 1);
+		unhash = 1;
+		break;
+	}
+
+	/* unhash sockref if we don't need it anymore */
+	if (unhash) {
+#ifdef CONFIG_IP_NF_NAT_NRES
+		ip_tproxy_nat_reserved_free(sr);
+#endif
+		ip_tproxy_unhash(sr);
+	}
+
+ write_unlk:
+	write_unlock_bh(&ip_tproxy_lock);
+
+	return res;
+}
+
+static int
+ip_tproxy_setsockopt_flags(struct sock *sk, int proto, struct in_tproxy *itp)
+{
+	int res = 0;
+	struct ip_tproxy_sockref *sr;
+	u_int32_t flags = itp->v.flags;
+	struct inet_sock *inet = inet_sk(sk);
+
+	/* specify translation flags for this socket */
+
+	DEBUGP(KERN_DEBUG "IP_TPROXY: IP_TPROXY_FLAGS %08x:%04x\n",
+	       inet->rcv_saddr, inet->sport);
+
+	/* we don't check CAP_NET_ADMIN here, it was checked when this entry was hashed */
+
+	DEBUGP(KERN_DEBUG "IP_TPROXY: IP_TPROXY_FLAGS flags to set %08x\n",
+	       flags);
+
+	/* FIXME: since read locks cannot be upgraded, we need a write lock if
+	 * foreign port allocation will be needed... */
+	write_lock_bh(&ip_tproxy_lock);
+	sr = ip_tproxy_sockref_find_local(inet->rcv_saddr, inet->sport, proto,
+					  0, inet->daddr, inet->dport);
+	if (!sr) {
+		res = -ENOENT;
+		goto write_unlk;
+	}
+
+	DEBUGP(KERN_DEBUG "IP_TPROXY: IP_TPROXY_FLAGS sr %p flags %08x\n", sr, sr->flags);
+
+	/* Don't do anything in case of MARK_ONLY sockrefs */
+	if (sr->flags & TF_MARK_ONLY) {
+		DEBUGP(KERN_DEBUG "IP_TPROXY: IP_TPROXY_FLAGS sr %p mark only, "
+		       "doing nothing\n", sr);
+		goto write_unlk;
+	}
+
+	/* clear user-settable flags */
+	sr->flags &= TF_STATE_MASK;
+
+	/* set TF_CONNECT/TF_LISTEN if needed */
+	switch (flags & (ITP_CONNECT | ITP_LISTEN | ITP_ESTABLISHED)) {
+	case ITP_CONNECT:
+		sr->flags |= TF_CONNECT;
+		ip_tproxy_kill_conntracks(sr, 0, 0, 1);
+		break;
+	case ITP_LISTEN:
+		if (sr->flags & TF_CONNECT_ONLY) {
+			DEBUGP(KERN_DEBUG "IP_TPROXY: IP_TPROXY_FLAGS sr %p: "
+					   "trying to set ITP_LISTEN on a connect only sockref\n",
+					   sr);
+			res = -EINVAL;
+			break;
+		}
+		sr->flags |= TF_LISTEN;
+		ip_tproxy_kill_conntracks(sr, 0, 0, 1);
+		break;
+	case ITP_ESTABLISHED:
+		DEBUGP(KERN_DEBUG "IP_TPROXY: IP_TPROXY_FLAGS: establishing sr %p "
+		       "raddr:rport %08x:%04x daddr:dport %08x:%04x\n",
+			sr, sr->raddr, sr->rport, inet->daddr, inet->dport);
+
+		if (sr->raddr == 0 || sr->rport == 0) {
+			DEBUGP(KERN_DEBUG "IP_TPROXY: IP_TPROXY_FLAGS sr %p: "
+			       "trying to set ITP_ESTABLISHED on a not connected sockref\n",
+			       sr);
+			res = -EINVAL;
+		}
+		sr->flags |= TF_LISTEN | TF_CONNECT;
+		ip_tproxy_kill_conntracks(sr, 0, 0, 1);
+		break;
+	default:
+		DEBUGP(KERN_DEBUG "IP_TPROXY: IP_TPROXY_FLAGS sr %p: "
+				  "invalid combination of flags %x\n", sr, flags);
+		/* FIXME: indicate error, if no CONNECT/LISTEN/ESTABLISHED was given? */
+		break;
+	}
+
+	/* Set TF_NAT_ONCE and TF_UNIDIR if needed */
+	sr->flags |= (flags & ITP_ONCE ? TF_NAT_ONCE : 0) |
+		     (flags & ITP_UNIDIR ? TF_UNIDIR : 0);
+
+#ifdef CONFIG_IP_NF_NAT_NRES
+	/* reserve NAT mappings if raddr is specified and sk->sk_reuse is set */
+	if (flags & (ITP_CONNECT | ITP_ESTABLISHED) &&
+	    sr->faddr && sr->fport && sr->raddr && sr->rport && sk->sk_reuse) {
+		if (ip_tproxy_nat_reserve(sr->faddr, sr->fport, proto, sr->raddr, sr->rport)) {
+			sr->flags |= (TF_NAT_RESERVED | TF_NAT_PEER);
+		} else {
+			DEBUGP(KERN_DEBUG "IP_TPROXY: IP_TPROXY_FLAGS sr %p: "
+					  "failed to register NAT reservation\n", sr);
+			res = -EINVAL;
+			goto write_unlk;
+		}
+	}
+#endif
+
+ write_unlk:
+	write_unlock_bh(&ip_tproxy_lock);
+
+	return res;
+}
+
+static int
+ip_tproxy_setsockopt_alloc(struct sock *sk, int proto, struct in_tproxy *itp)
+{
+	int res = 0;
+	struct ip_tproxy_sockref *sr;
+	struct inet_sock *inet = inet_sk(sk);
+
+	/* we'd like to force allocation of a unique foreign address, if one's not specified */
+	DEBUGP(KERN_DEBUG "IP_TPROXY: IP_TPROXY_ALLOC %08x:%04x\n",
+	       inet->rcv_saddr, inet->sport);
+
+	write_lock_bh(&ip_tproxy_lock);
+	sr = ip_tproxy_sockref_find_local(inet->rcv_saddr, inet->sport, proto, 0, inet->daddr, inet->dport);
+	if (!sr) {
+		res = -ENOENT;
+		goto write_unlk;
+	}
+
+	DEBUGP(KERN_DEBUG "IP_TPROXY: IP_TPROXY_ALLOC sr %p, current foreign "
+	       "%08x:%04x\n", sr, sr->faddr, sr->fport);
+
+	if (sr->flags & TF_MARK_ONLY) {
+		DEBUGP("IP_TPROXY: IP_TPROXY_ALLOC sr %p mark only, "
+		       "doing nothing\n", sr);
+		goto write_unlk;
+	}
+
+	if (sr->faddr && sr->fport) {
+		/* foreign port already assigned */
+		res = -EINVAL;
+		goto write_unlk;
+	}
+
+	if (ip_tproxy_sockref_uniq(sr) == 0) {
+		/* allocating a foreign port failed */
+		DEBUGP(KERN_DEBUG "IP_TPROXY: failed to allocate foreign port "
+		       "for listening sockref\n");
+		res = -EFAULT;
+		goto write_unlk;
+	}
+
+ write_unlk:
+	write_unlock_bh(&ip_tproxy_lock);
+
+	return res;
+}
+
+static int
+ip_tproxy_setsockopt_connect(struct sock *sk, int proto, struct in_tproxy *itp)
+{
+	int res = 0;
+	struct ip_tproxy_sockref *sr;
+	struct inet_sock *inet = inet_sk(sk);
+
+	DEBUGP(KERN_DEBUG "IP_TPROXY: IP_TPROXY_CONNECT %08x:%04x\n",
+	       inet->rcv_saddr, inet->sport);
+
+	/* Look up in the local sockref hash */
+	read_lock_bh(&ip_tproxy_lock);
+	sr = ip_tproxy_sockref_find_local(inet->rcv_saddr, inet->sport, proto,
+					  0, inet->daddr, inet->dport);
+	if (!sr) {
+		res = -ENOENT;
+		goto read_unlk;
+	}
+
+	DEBUGP(KERN_DEBUG "IP_TPROXY: IP_TPROXY_CONNECT sr %p, current "
+	       "raddr:rport %08x:%04x\n", sr, sr->raddr, sr->rport);
+
+	if (sr->flags & TF_MARK_ONLY) {
+		DEBUGP(KERN_DEBUG "IP_TPROXY: IP_TPROXY_CONNECT sr %p "
+		       "mark only\n", sr);
+		goto read_unlk;
+	}
+
+	/* store remote address */
+	if (itp->v.addr.faddr.s_addr && itp->v.addr.fport) {
+		sr->raddr = itp->v.addr.faddr.s_addr;
+		sr->rport = itp->v.addr.fport;
+		DEBUGP(KERN_DEBUG "IP_TPROXY: IP_TPROXY_CONNECT sr %p, "
+		       "new raddr:rport %08x:%04x\n", sr, sr->raddr, sr->rport);
+	}
+
+ read_unlk:
+	read_unlock_bh(&ip_tproxy_lock);
+
+	return res;
+}
+
+static int
+ip_tproxy_setsockopt(struct sock *sk, int optval, void __user *user, unsigned int len)
+{
+	int proto;
+	int res = 0;
+	unsigned int mlen;
+	struct in_tproxy itp;
+
+	/* get protocol number of the socket */
+	proto = sk->sk_protocol;
+	if ((proto != IPPROTO_UDP) && (proto != IPPROTO_TCP))
+		return -EINVAL;
+
+	if (len < sizeof(itp.op) + sizeof(itp.v.version))
+		return -EINVAL;
+
+	mlen = MIN(sizeof(itp), len);
+
+	if (copy_from_user(&itp, user, mlen))
+		return -EFAULT;
+
+	switch (itp.op) {
+		case TPROXY_VERSION:
+			res = ip_tproxy_setsockopt_version(sk, proto, &itp);
+			break;
+		case TPROXY_ASSIGN:
+			res = ip_tproxy_setsockopt_assign(sk, proto, &itp);
+			break;
+		case TPROXY_UNASSIGN:
+			res = ip_tproxy_setsockopt_unassign(sk, proto, &itp);
+			break;
+		case TPROXY_FLAGS:
+			res = ip_tproxy_setsockopt_flags(sk, proto, &itp);
+			break;
+		case TPROXY_ALLOC:
+			res = ip_tproxy_setsockopt_alloc(sk, proto, &itp);
+			break;
+		case TPROXY_CONNECT:
+			res = ip_tproxy_setsockopt_connect(sk, proto, &itp);
+			break;
+		default:
+			res = -ENOPROTOOPT;
+			break;
+	}
+
+	return res;
+}
+
+static int
+ip_tproxy_getsockopt_version(struct sock *sk, int proto, struct in_tproxy *itp)
+{
+	DEBUGP(KERN_DEBUG "IP_TPROXY: IP_TPROXY_VERSION\n");
+
+	itp->v.version = TPROXY_FULL_VERSION;
+
+	return 0;
+}
+
+static int
+ip_tproxy_getsockopt_query(struct sock *sk, int proto, struct in_tproxy *itp)
+{
+	int res = 0;
+	struct ip_tproxy_sockref *sr;
+	struct inet_sock *inet = inet_sk(sk);
+
+	DEBUGP(KERN_DEBUG "IP_TPROXY: IP_TPROXY_QUERY %08x:%04x\n",
+	       inet->rcv_saddr, inet->sport);
+
+	read_lock_bh(&ip_tproxy_lock);
+
+	sr = ip_tproxy_sockref_find_local(inet->rcv_saddr, inet->sport, proto,
+					  0, inet->daddr, inet->dport);
+	if (sr) {
+		itp->v.addr.faddr.s_addr = sr->faddr;
+		itp->v.addr.fport = sr->fport;
+		DEBUGP(KERN_DEBUG "IP_TPROXY: IP_TPROXY_QUERY found sr %p "
+		       "faddr:fport %08x:%04x\n", sr, sr->faddr, sr->fport);
+	} else
+		res = -ENOENT;
+
+	read_unlock_bh(&ip_tproxy_lock);
+
+	return res;
+}
+
+static int
+ip_tproxy_getsockopt_flags(struct sock *sk, int proto, struct in_tproxy *itp)
+{
+	int res = 0;
+	u_int32_t flags;
+	struct ip_tproxy_sockref *sr;
+	struct inet_sock *inet = inet_sk(sk);
+
+	DEBUGP(KERN_DEBUG "IP_TPROXY: IP_TPROXY_FLAGS get %08x:%04x\n",
+	       inet->rcv_saddr, inet->sport);
+
+	read_lock_bh(&ip_tproxy_lock);
+
+	sr = ip_tproxy_sockref_find_local(inet->rcv_saddr, inet->sport, proto,
+					  0, inet->daddr, inet->dport);
+	if (!sr) {
+		res = -ENOENT;
+		goto read_unlk;
+	}
+
+	flags = 0;
+	if ((sr->flags & (TF_CONNECT+TF_LISTEN)) == (TF_CONNECT+TF_LISTEN))
+		flags |= ITP_ESTABLISHED;
+	else if (sr->flags & TF_CONNECT)
+		flags |= ITP_CONNECT;
+	else if (sr->flags & TF_LISTEN)
+		flags |= ITP_LISTEN;
+
+	if (sr->flags & TF_UNIDIR)
+		flags |= ITP_UNIDIR;
+	if (sr->flags & TF_NAT_ONCE)
+		flags |= ITP_ONCE;
+	if (sr->flags & TF_MARK_ONLY)
+		flags |= ITP_MARK;
+	if (sr->flags & TF_NAT_APPLIED)
+		flags |= ITP_APPLIED;
+
+	DEBUGP(KERN_DEBUG "IP_TPROXY: IP_TPROXY_FLAGS found sr %p faddr:fport "
+	       "%08x:%04x flags %08x\n", sr, sr->faddr, sr->fport, sr->flags);
+
+	itp->v.flags = flags;
+
+ read_unlk:
+	read_unlock_bh(&ip_tproxy_lock);
+
+	return res;
+}
+
+static int
+ip_tproxy_getsockopt(struct sock *sk, int optval, void __user *user, int *len)
+{
+	int proto;
+	int res = 0;
+	unsigned int mlen;
+	struct in_tproxy itp;
+
+	proto = sk->sk_protocol;
+	if ((proto != IPPROTO_UDP) && (proto != IPPROTO_TCP))
+		return -EINVAL;
+
+	if (*len < sizeof(itp.op) + sizeof(itp.v.version))
+		return -EINVAL;
+
+	mlen = MIN(sizeof(itp), *len);
+
+	if (copy_from_user(&itp, user, mlen))
+		return -EFAULT;
+
+	switch (itp.op) {
+		case TPROXY_VERSION:
+			res = ip_tproxy_getsockopt_version(sk, proto, &itp);
+			break;
+		case TPROXY_QUERY:
+			res = ip_tproxy_getsockopt_query(sk, proto, &itp);
+			break;
+		case TPROXY_FLAGS:
+			res = ip_tproxy_getsockopt_flags(sk, proto, &itp);
+			break;
+		default:
+			res = -ENOPROTOOPT;
+			break;
+	}
+
+	/* copy data to userspace */
+	/* FIXME: we do this even when res != 0, is this a problem? */
+	if (copy_to_user(user, &itp, mlen))
+		res = -EFAULT;
+
+	return res;
+}
+
+/* callback function: called when a socket gets unhashed by the UDP or TCP stack */
+static void
+ip_tproxy_close(struct sock *sk, int proto)
+{
+	if (proto)
+		ip_tproxy_setsockopt_unassign(sk, proto, NULL);
+}
+
+/* fake timeout function needed by the fake conntrack entry, in theory, it never
+ * runs */
+static void
+ip_tproxy_fake_timeout(unsigned long null_ptr)
+{
+	printk("IP_TPROXY: Fake timeout called!");
+}
+
+static struct nf_hook_ops ip_tproxy_pre_ops = {
+	.hook		= ip_tproxy_fn,
+	.owner		= THIS_MODULE,
+	.pf		= PF_INET,
+	.hooknum	= NF_IP_PRE_ROUTING,
+	.priority	= -130
+};
+
+static struct nf_hook_ops ip_tproxy_post_ops = {
+	.hook		= ip_tproxy_fn,
+	.owner		= THIS_MODULE,
+	.pf		= PF_INET,
+	.hooknum	= NF_IP_POST_ROUTING,
+	.priority	= -130
+};
+
+static struct nf_hook_ops ip_tproxy_local_out_ops = {
+	.hook		= ip_tproxy_fn,
+	.owner		= THIS_MODULE,
+	.pf		= PF_INET,
+	.hooknum	= NF_IP_LOCAL_OUT,
+	.priority	= -130
+};
+
+static struct nf_sockopt_ops ip_tproxy_sockopts = {
+	.pf		= PF_INET,
+	.set_optmin	= IP_TPROXY,
+	.set_optmax	= IP_TPROXY + 1,
+	.set		= ip_tproxy_setsockopt,
+	.get_optmin	= IP_TPROXY,
+	.get_optmax	= IP_TPROXY + 1,
+	.get		= ip_tproxy_getsockopt,
+};
+
+/* init or cleanup the tproxy module */
+static int
+init_or_cleanup(int startup)
+{
+	int ret = 0;
+	int i;
+	struct proc_dir_entry *proc;
+
+	if (!startup) {
+		goto clean_all;
+	}
+
+	/* we depend on the NAT hooks being operational */
+	need_nat_hooks();
+
+	/* use our own fake conntrack entry, which indicates that packet was
+	   tproxied, this let's us use the same -m tproxy match in our filter
+	   rules.  The original idea of using a fake conntrack entry to avoid
+	   conntracking is by Jozsef Kadlecsik */
+
+	atomic_set(&ip_tproxy_fake_ct.ct_general.use, 1);
+	ip_tproxy_fake_ct.timeout.function = ip_tproxy_fake_timeout;
+	ip_tproxy_fake_ct.status |= IPS_CONFIRMED | IPS_TPROXY | IPS_NAT_DONE_MASK;
+
+	ip_tproxy_sockref_table = kmem_cache_create("ip_tproxy", sizeof(struct ip_tproxy_sockref), 0,
+						    SLAB_HWCACHE_ALIGN, NULL, NULL);
+
+	if (!ip_tproxy_sockref_table) {
+		ret = -ENOMEM;
+		goto clean_nothing;
+	}
+
+	if (hashsize)
+		ip_tproxy_htable_size = hashsize;
+	else
+		ip_tproxy_htable_size = 127;
+
+	ip_tproxy_bylocal = (struct list_head *) vmalloc(sizeof(struct list_head) *
+							 ip_tproxy_htable_size * 2);
+	if (!ip_tproxy_bylocal) {
+		ret = -ENOMEM;
+		goto clean_sockref_table;
+	}
+	ip_tproxy_byforeign = (struct list_head *) ip_tproxy_bylocal + ip_tproxy_htable_size;
+
+	for (i = 0; i < ip_tproxy_htable_size; i++) {
+		INIT_LIST_HEAD(&ip_tproxy_bylocal[i]);
+		INIT_LIST_HEAD(&ip_tproxy_byforeign[i]);
+	}
+
+	proc = proc_net_create("tproxy", 0, NULL);
+	if (!proc) goto clean_sockref_hash;
+	proc->proc_fops = &ip_tproxy_file_ops;
+
+	ret = ipt_register_table(&tproxy_table, &initial_table.repl);
+	if (ret < 0) {
+		printk("IP_TPROXY: can't register tproxy table.\n");
+		goto clean_proc;
+	}
+
+	ret = nf_register_hook(&ip_tproxy_local_out_ops);
+	if (ret < 0) {
+		printk("IP_TPROXY: can't register local out hook.\n");
+		goto clean_table;
+	}
+
+	ret = nf_register_hook(&ip_tproxy_post_ops);
+	if (ret < 0) {
+		printk("IP_TPROXY: can't register postrouting hook.\n");
+		goto clean_loops;
+	}
+
+	ret = nf_register_hook(&ip_tproxy_pre_ops);
+	if (ret < 0) {
+		printk("IP_TPROXY: can't register prerouting hook.\n");
+		goto clean_postops;
+	}
+
+	nf_register_sockopt(&ip_tproxy_sockopts);
+
+	ip_tproxy_udp_unhashed = ip_tproxy_close;
+	ip_tproxy_tcp_unhashed = ip_tproxy_close;
+
+	/* initialize confirm and destroy callbacks */
+	ip_conntrack_confirmed = ip_tproxy_confirmed;
+	ip_conntrack_destroyed_old = ip_conntrack_destroyed;
+	ip_conntrack_destroyed = ip_tproxy_conntrack_destroyed;
+
+	printk("IP_TPROXY: Transparent proxy support initialized 2.0.6\n"
+	       "IP_TPROXY: Copyright (c) 2002-2006 BalaBit IT Ltd.\n");
+	return ret;
+
+ clean_all:
+	ip_conntrack_destroyed = ip_conntrack_destroyed_old;
+	ip_conntrack_confirmed = NULL;
+
+	ip_tproxy_udp_unhashed = NULL;
+	ip_tproxy_tcp_unhashed = NULL;
+
+	nf_unregister_sockopt(&ip_tproxy_sockopts);
+
+	nf_unregister_hook(&ip_tproxy_pre_ops);
+
+ clean_postops:
+	nf_unregister_hook(&ip_tproxy_post_ops);
+
+ clean_loops:
+	nf_unregister_hook(&ip_tproxy_local_out_ops);
+
+ clean_table:
+	ipt_unregister_table(&tproxy_table);
+
+ clean_proc:
+	proc_net_remove("tproxy");
+
+ clean_sockref_hash:
+	ip_tproxy_sockref_table_free();
+	vfree(ip_tproxy_bylocal);
+
+ clean_sockref_table:
+	kmem_cache_destroy(ip_tproxy_sockref_table);
+
+ clean_nothing:
+	return ret;
+}
+
+static int __init init(void)
+{
+	return init_or_cleanup(1);
+}
+
+static void __exit fini(void)
+{
+	init_or_cleanup(0);
+}
+
+module_init(init);
+module_exit(fini);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Balázs Scheidler <bazsi@balabit.hu>");
+MODULE_DESCRIPTION("Netfilter transparent proxy core module.");
--- /dev/null	2011-06-03 14:51:38.633053002 +0200
+++ linux-2.6.20.14-fbx/net/ipv4/netfilter/ipt_TPROXY.c	2010-12-29 19:30:08.921500487 +0100
@@ -0,0 +1,136 @@
+/*
+ * Transparent proxy support for Linux/iptables
+ *
+ * Copyright (c) 2002-2004 BalaBit IT Ltd.
+ * Author: Balázs Scheidler
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/ip.h>
+#include <linux/inetdevice.h>
+#include <net/checksum.h>
+
+#include <linux/netfilter_ipv4/ip_tables.h>
+#include <linux/netfilter_ipv4/ipt_TPROXY.h>
+#include <linux/netfilter_ipv4/iptable_tproxy.h>
+
+/* determine ip address of the interface the packet came in */
+static u32
+determine_local_ip(struct sk_buff *skb, int hooknum)
+{
+	struct in_device *indev;
+	u32 ip;
+
+	if (hooknum == NF_IP_LOCAL_OUT)
+		return htonl(0x7f000001);
+
+	indev = in_dev_get(skb->dev);
+
+	if (!indev) {
+		printk(KERN_WARNING "IP_TPROXY: No IP protocol on incoming "
+		       "interface during redirect, dropping packet.\n");
+		return 0;
+	}
+	if (!indev->ifa_list) {
+		printk(KERN_WARNING "IP_TPROXY: No IP address on incoming "
+		       "interface during redirect, dropping packet.\n");
+		in_dev_put(indev);
+		return 0;
+	}
+
+	ip = indev->ifa_list->ifa_local;
+	in_dev_put(indev);
+
+	return ip;
+}
+
+static unsigned int
+target(struct sk_buff **pskb,
+       const struct net_device *in,
+       const struct net_device *out,
+       unsigned int hooknum,
+       const struct xt_target *target,
+       const void *targinfo)
+{
+	const struct ipt_tproxy_target_info *info = targinfo;
+	struct ip_tproxy_sockref sr;
+	u16 *ports, _ports[2];
+	struct iphdr *iph = (*pskb)->nh.iph;
+
+	/* TCP/UDP only */
+	if ((iph->protocol != IPPROTO_TCP) &&
+	    (iph->protocol != IPPROTO_UDP))
+		return NF_ACCEPT;
+
+	/* get ports */
+	ports = skb_header_pointer(*pskb, iph->ihl * 4,
+				   sizeof(_ports), &_ports);
+	if (ports == NULL)
+		return NF_DROP;
+
+	/* set up fake sockref */
+	memset(&sr, 0, sizeof(sr));
+	sr.proto = iph->protocol;
+
+	/* use the original address/port if none is specified in the rule */
+	if (info->laddr)
+		sr.laddr = info->laddr;
+	else
+		sr.laddr = determine_local_ip(*pskb, hooknum);
+
+	if (info->lport)
+		sr.lport = info->lport;
+	else
+		sr.lport = ports[1];
+
+	/* set up mapping */
+	return ip_tproxy_setup_nat(pskb, hooknum, &sr, 0);
+}
+
+static int
+checkentry(const char *tablename,
+	   const void *e,
+	   const struct xt_target *target,
+           void *targinfo,
+           unsigned int hook_mask)
+{
+	if (strcmp(tablename, "tproxy") != 0) {
+		printk(KERN_WARNING "TPROXY: can only be called from \"tproxy\" table, not \"%s\"\n", tablename);
+		return 0;
+	}
+
+	return 1;
+}
+
+static struct ipt_target ipt_tproxy_reg = {
+	.name		= "TPROXY",
+	.target		= target,
+	.targetsize	= sizeof(struct ipt_tproxy_target_info),
+	.checkentry	= checkentry,
+	.me		= THIS_MODULE,
+};
+
+static int __init init(void)
+{
+	if (ipt_register_target(&ipt_tproxy_reg))
+		return -EINVAL;
+
+	return 0;
+}
+
+static void __exit fini(void)
+{
+	ipt_unregister_target(&ipt_tproxy_reg);
+}
+
+module_init(init);
+module_exit(fini);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Balázs Scheidler <bazsi@balabit.hu>");
+MODULE_DESCRIPTION("Netfilter transparent proxy TPROXY target module.");
