[OpenWrt-Devel] [PATCH 2/4] lantiq: Add support for linux 4.4

Martin Blumenstingl martin.blumenstingl at googlemail.com
Wed Dec 30 19:10:46 EST 2015


The following patches were dropped because they are already applied
upstream:
- 0038-MIPS-lantiq-fpi-on-ar9.patch
- 0039-MIPS-lantiq-initialize-usb-on-boot.patch
- 0042-USB-DWC2-big-endian-support.patch
- 0043-gpio-stp-xway-fix-phy-mask.patch
- 0050-MIPS-lantiq-add-clk_round_rate.patch

All other patches were simply refreshed, except the following two:
- 0001-MIPS-lantiq-add-pcie-driver.patch
  Changes to arch/mips/lantiq/xway/sysctrl.c (these changes disabled
  some PMU gates for the vrx200 / VR9 SoCs) were removed since the
  upstream kernel disables unused PMU gates automatically (since
  95135bfa7ead1becc2879230f72583dde2b71a0c
  "MIPS: Lantiq: Deactivate most of the devices by default").
- 0101-mtd-split.patch
  __mtd_add_partition() is not required anymore since r47747 removed
  it from the generic-patches. This was possible because the upstream
  kernel does not check for duplicates anymore (since
  3a434f66e6dab645d74a59c95651cdbe16497a50
  "mtd: part: Remove partition overlap checks").
---
linux 4.4 was briefly tested on an arx100 and vrx200 (special thanks to
Mathias Kresin) based SoC. Both seem to boot fine and (V)DSL, wifi and
ethernet seem to be working.

 target/linux/lantiq/config-4.4                     |  168 +
 .../0001-MIPS-lantiq-add-pcie-driver.patch         | 5521 ++++++++++++++++++++
 .../0002-MIPS-lantiq-dtb-image-hack.patch          |   31 +
 .../0004-MIPS-lantiq-add-atm-hack.patch            |  500 ++
 .../0007-MIPS-lantiq-add-basic-tffs-driver.patch   |  111 +
 .../0008-MIPS-lantiq-backport-old-timer-code.patch | 1028 ++++
 .../0012-pinctrl-lantiq-fix-up-pinmux.patch        |   78 +
 ...0013-MTD-lantiq-xway-fix-invalid-operator.patch |   24 +
 ...xway-the-latched-command-should-be-persis.patch |   44 +
 .../0015-MTD-lantiq-xway-remove-endless-loop.patch |   41 +
 ...xway-add-missing-write_buf-and-read_buf-t.patch |   55 +
 .../0017-MTD-xway-fix-nand-locking.patch           |   89 +
 .../0018-MTD-nand-lots-of-xrx200-fixes.patch       |  125 +
 ...MTD-lantiq-handle-NO_XIP-on-cfi0001-flash.patch |   25 +
 ...MTD-m25p80-allow-loading-mtd-name-from-OF.patch |   44 +
 ...023-NET-PHY-adds-driver-for-lantiq-PHY11G.patch |  537 ++
 ...024-NET-lantiq-adds-PHY11G-firmware-blobs.patch |  364 ++
 .../0025-NET-MIPS-lantiq-adds-xrx200-net.patch     | 3340 ++++++++++++
 .../patches-4.4/0026-NET-multi-phy-support.patch   |   53 +
 .../0028-NET-lantiq-various-etop-fixes.patch       |  907 ++++
 .../0030-GPIO-add-named-gpio-exports.patch         |  166 +
 ...2C-MIPS-lantiq-add-FALC-ON-i2c-bus-master.patch | 1034 ++++
 .../0032-USB-fix-roothub-for-IFXHCD.patch          |   31 +
 .../0033-SPI-MIPS-lantiq-adds-spi-xway.patch       | 1049 ++++
 ...ompile-when-reset-RESET_CONTROLLER-is-not.patch |   45 +
 ...-lantiq-wifi-and-ethernet-eeprom-handling.patch |  630 +++
 .../0036-owrt-generic-dtb-image-hack.patch         |   32 +
 .../0040-USB-DWC2-enable-usb-power-gpio.patch      |   35 +
 .../patches-4.4/0041-USB-DWC2-add-ltq-params.patch |   46 +
 .../linux/lantiq/patches-4.4/0101-mtd-split.patch  |  173 +
 .../patches-4.4/0150-lantiq-pinctrl-xway.patch     |   15 +
 .../0151-lantiq-ifxmips_pcie-use-of.patch          |   51 +
 .../0160-owrt-lantiq-multiple-flash.patch          |  217 +
 ...D-cfi-cmdset-0001-disable-buffered-writes.patch |   11 +
 34 files changed, 16620 insertions(+)
 create mode 100644 target/linux/lantiq/config-4.4
 create mode 100644 target/linux/lantiq/patches-4.4/0001-MIPS-lantiq-add-pcie-driver.patch
 create mode 100644 target/linux/lantiq/patches-4.4/0002-MIPS-lantiq-dtb-image-hack.patch
 create mode 100644 target/linux/lantiq/patches-4.4/0004-MIPS-lantiq-add-atm-hack.patch
 create mode 100644 target/linux/lantiq/patches-4.4/0007-MIPS-lantiq-add-basic-tffs-driver.patch
 create mode 100644 target/linux/lantiq/patches-4.4/0008-MIPS-lantiq-backport-old-timer-code.patch
 create mode 100644 target/linux/lantiq/patches-4.4/0012-pinctrl-lantiq-fix-up-pinmux.patch
 create mode 100644 target/linux/lantiq/patches-4.4/0013-MTD-lantiq-xway-fix-invalid-operator.patch
 create mode 100644 target/linux/lantiq/patches-4.4/0014-MTD-lantiq-xway-the-latched-command-should-be-persis.patch
 create mode 100644 target/linux/lantiq/patches-4.4/0015-MTD-lantiq-xway-remove-endless-loop.patch
 create mode 100644 target/linux/lantiq/patches-4.4/0016-MTD-lantiq-xway-add-missing-write_buf-and-read_buf-t.patch
 create mode 100644 target/linux/lantiq/patches-4.4/0017-MTD-xway-fix-nand-locking.patch
 create mode 100644 target/linux/lantiq/patches-4.4/0018-MTD-nand-lots-of-xrx200-fixes.patch
 create mode 100644 target/linux/lantiq/patches-4.4/0020-MTD-lantiq-handle-NO_XIP-on-cfi0001-flash.patch
 create mode 100644 target/linux/lantiq/patches-4.4/0022-MTD-m25p80-allow-loading-mtd-name-from-OF.patch
 create mode 100644 target/linux/lantiq/patches-4.4/0023-NET-PHY-adds-driver-for-lantiq-PHY11G.patch
 create mode 100644 target/linux/lantiq/patches-4.4/0024-NET-lantiq-adds-PHY11G-firmware-blobs.patch
 create mode 100644 target/linux/lantiq/patches-4.4/0025-NET-MIPS-lantiq-adds-xrx200-net.patch
 create mode 100644 target/linux/lantiq/patches-4.4/0026-NET-multi-phy-support.patch
 create mode 100644 target/linux/lantiq/patches-4.4/0028-NET-lantiq-various-etop-fixes.patch
 create mode 100644 target/linux/lantiq/patches-4.4/0030-GPIO-add-named-gpio-exports.patch
 create mode 100644 target/linux/lantiq/patches-4.4/0031-I2C-MIPS-lantiq-add-FALC-ON-i2c-bus-master.patch
 create mode 100644 target/linux/lantiq/patches-4.4/0032-USB-fix-roothub-for-IFXHCD.patch
 create mode 100644 target/linux/lantiq/patches-4.4/0033-SPI-MIPS-lantiq-adds-spi-xway.patch
 create mode 100644 target/linux/lantiq/patches-4.4/0034-reset-Fix-compile-when-reset-RESET_CONTROLLER-is-not.patch
 create mode 100644 target/linux/lantiq/patches-4.4/0035-owrt-lantiq-wifi-and-ethernet-eeprom-handling.patch
 create mode 100644 target/linux/lantiq/patches-4.4/0036-owrt-generic-dtb-image-hack.patch
 create mode 100644 target/linux/lantiq/patches-4.4/0040-USB-DWC2-enable-usb-power-gpio.patch
 create mode 100644 target/linux/lantiq/patches-4.4/0041-USB-DWC2-add-ltq-params.patch
 create mode 100644 target/linux/lantiq/patches-4.4/0101-mtd-split.patch
 create mode 100644 target/linux/lantiq/patches-4.4/0150-lantiq-pinctrl-xway.patch
 create mode 100644 target/linux/lantiq/patches-4.4/0151-lantiq-ifxmips_pcie-use-of.patch
 create mode 100644 target/linux/lantiq/patches-4.4/0160-owrt-lantiq-multiple-flash.patch
 create mode 100644 target/linux/lantiq/patches-4.4/0300-MTD-cfi-cmdset-0001-disable-buffered-writes.patch

diff --git a/target/linux/lantiq/config-4.4 b/target/linux/lantiq/config-4.4
new file mode 100644
index 0000000..2bc12fa
--- /dev/null
+++ b/target/linux/lantiq/config-4.4
@@ -0,0 +1,168 @@
+CONFIG_ARCH_BINFMT_ELF_RANDOMIZE_PIE=y
+CONFIG_ARCH_DISCARD_MEMBLOCK=y
+CONFIG_ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE=y
+CONFIG_ARCH_HAVE_CUSTOM_GPIO_H=y
+CONFIG_ARCH_HIBERNATION_POSSIBLE=y
+CONFIG_ARCH_REQUIRE_GPIOLIB=y
+CONFIG_ARCH_SUPPORTS_MSI=y
+CONFIG_ARCH_SUSPEND_POSSIBLE=y
+CONFIG_ARCH_WANT_IPC_PARSE_VERSION=y
+CONFIG_CEVT_R4K=y
+CONFIG_CLKDEV_LOOKUP=y
+CONFIG_CPU_BIG_ENDIAN=y
+CONFIG_CPU_GENERIC_DUMP_TLB=y
+CONFIG_CPU_HAS_PREFETCH=y
+CONFIG_CPU_HAS_SYNC=y
+CONFIG_CPU_MIPS32=y
+# CONFIG_CPU_MIPS32_R1 is not set
+CONFIG_CPU_MIPS32_R2=y
+CONFIG_CPU_MIPSR2=y
+CONFIG_CPU_R4K_CACHE_TLB=y
+CONFIG_CPU_R4K_FPU=y
+CONFIG_CPU_SUPPORTS_32BIT_KERNEL=y
+CONFIG_CPU_SUPPORTS_HIGHMEM=y
+CONFIG_CRYPTO_HASH=y
+CONFIG_CRYPTO_HASH2=y
+CONFIG_CSRC_R4K=y
+CONFIG_DECOMPRESS_LZMA=y
+CONFIG_DMA_NONCOHERENT=y
+CONFIG_DTC=y
+CONFIG_DT_EASY50712=y
+CONFIG_EARLY_PRINTK=y
+CONFIG_ETHERNET_PACKET_MANGLE=y
+CONFIG_GENERIC_ATOMIC64=y
+CONFIG_GENERIC_CLOCKEVENTS=y
+CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
+CONFIG_GENERIC_CMOS_UPDATE=y
+CONFIG_GENERIC_GPIO=y
+CONFIG_GENERIC_IO=y
+CONFIG_GENERIC_IRQ_SHOW=y
+CONFIG_GENERIC_PCI_IOMAP=y
+CONFIG_GENERIC_SMP_IDLE_THREAD=y
+CONFIG_GPIOLIB=y
+CONFIG_GPIO_MM_LANTIQ=y
+CONFIG_GPIO_STP_XWAY=y
+CONFIG_GPIO_SYSFS=y
+CONFIG_HARDWARE_WATCHPOINTS=y
+CONFIG_HAS_DMA=y
+CONFIG_HAS_IOMEM=y
+CONFIG_HAS_IOPORT=y
+CONFIG_HAVE_ARCH_JUMP_LABEL=y
+CONFIG_HAVE_ARCH_KGDB=y
+CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE=y
+CONFIG_HAVE_CLK=y
+CONFIG_HAVE_C_RECORDMCOUNT=y
+CONFIG_HAVE_DEBUG_KMEMLEAK=y
+CONFIG_HAVE_DMA_API_DEBUG=y
+CONFIG_HAVE_DMA_ATTRS=y
+CONFIG_HAVE_DYNAMIC_FTRACE=y
+CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
+CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
+CONFIG_HAVE_FUNCTION_TRACER=y
+CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y
+CONFIG_HAVE_GENERIC_DMA_COHERENT=y
+CONFIG_HAVE_GENERIC_HARDIRQS=y
+CONFIG_HAVE_IDE=y
+CONFIG_HAVE_KVM=y
+CONFIG_HAVE_MACH_CLKDEV=y
+CONFIG_HAVE_MEMBLOCK=y
+CONFIG_HAVE_MEMBLOCK_NODE_MAP=y
+CONFIG_HAVE_MOD_ARCH_SPECIFIC=y
+CONFIG_HAVE_NET_DSA=y
+CONFIG_HAVE_OPROFILE=y
+CONFIG_HAVE_PERF_EVENTS=y
+CONFIG_HW_HAS_PCI=y
+CONFIG_HW_RANDOM=y
+CONFIG_HZ=250
+# CONFIG_HZ_100 is not set
+CONFIG_HZ_250=y
+CONFIG_INITRAMFS_SOURCE=""
+CONFIG_IRQ_CPU=y
+CONFIG_IRQ_DOMAIN=y
+CONFIG_IRQ_FORCED_THREADING=y
+CONFIG_LANTIQ=y
+CONFIG_LANTIQ_ETOP=y
+# CONFIG_LANTIQ_PHY is not set
+CONFIG_LANTIQ_WDT=y
+# CONFIG_LANTIQ_XRX200 is not set
+CONFIG_LEDS_GPIO=y
+CONFIG_MDIO_BOARDINFO=y
+CONFIG_MIPS=y
+# CONFIG_MIPS_CMDLINE_DTB_EXTEND is not set
+# CONFIG_MIPS_CMDLINE_FROM_BOOTLOADER is not set
+CONFIG_MIPS_CMDLINE_FROM_DTB=y
+# CONFIG_MIPS_ELF_APPENDED_DTB is not set
+# CONFIG_MIPS_HUGE_TLB_SUPPORT is not set
+CONFIG_MIPS_L1_CACHE_SHIFT=5
+# CONFIG_MIPS_MACHINE is not set
+CONFIG_MIPS_MT_DISABLED=y
+# CONFIG_MIPS_MT_SMP is not set
+# CONFIG_MIPS_MT_SMTC is not set
+CONFIG_MIPS_NO_APPENDED_DTB=y
+# CONFIG_MIPS_RAW_APPENDED_DTB is not set
+# CONFIG_MIPS_VPE_LOADER is not set
+CONFIG_MODULES_USE_ELF_REL=y
+CONFIG_MTD_CFI_ADV_OPTIONS=y
+CONFIG_MTD_CFI_GEOMETRY=y
+CONFIG_MTD_CMDLINE_PARTS=y
+CONFIG_MTD_JEDECPROBE=y
+CONFIG_MTD_LANTIQ=y
+CONFIG_MTD_M25P80=y
+# CONFIG_MTD_NAND_XWAY is not set
+CONFIG_MTD_SPI_NOR=y
+CONFIG_MTD_SPLIT_FIRMWARE=y
+CONFIG_MTD_UIMAGE_SPLIT=y
+CONFIG_NEED_DMA_MAP_STATE=y
+CONFIG_NEED_PER_CPU_KM=y
+CONFIG_NO_GENERIC_PCI_IOPORT_MAP=y
+CONFIG_OF=y
+CONFIG_OF_ADDRESS=y
+CONFIG_OF_DEVICE=y
+CONFIG_OF_EARLY_FLATTREE=y
+CONFIG_OF_FLATTREE=y
+CONFIG_OF_GPIO=y
+CONFIG_OF_IRQ=y
+CONFIG_OF_MDIO=y
+CONFIG_OF_MTD=y
+CONFIG_OF_NET=y
+CONFIG_OF_PCI=y
+CONFIG_OF_PCI_IRQ=y
+CONFIG_PAGEFLAGS_EXTENDED=y
+CONFIG_PCI=y
+# CONFIG_PCIE_LANTIQ is not set
+CONFIG_PCI_DOMAINS=y
+CONFIG_PCI_LANTIQ=y
+CONFIG_PERCPU_RWSEM=y
+CONFIG_PERF_USE_VMALLOC=y
+CONFIG_PHYLIB=y
+CONFIG_PINCTRL=y
+# CONFIG_PINCTRL_AMD is not set
+CONFIG_PINCTRL_LANTIQ=y
+# CONFIG_PINCTRL_SINGLE is not set
+CONFIG_PINCTRL_XWAY=y
+# CONFIG_PREEMPT_RCU is not set
+CONFIG_PROC_DEVICETREE=y
+CONFIG_PSB6970_PHY=y
+CONFIG_RTL8366RB_PHY=y
+CONFIG_RTL8366_SMI=y
+# CONFIG_SCSI_DMA is not set
+# CONFIG_SERIAL_8250 is not set
+CONFIG_SERIAL_LANTIQ=y
+# CONFIG_SOC_AMAZON_SE is not set
+# CONFIG_SOC_FALCON is not set
+CONFIG_SOC_TYPE_XWAY=y
+CONFIG_SOC_XWAY=y
+CONFIG_SWAP_IO_SPACE=y
+CONFIG_SWCONFIG=y
+CONFIG_SYS_HAS_CPU_MIPS32_R1=y
+CONFIG_SYS_HAS_CPU_MIPS32_R2=y
+CONFIG_SYS_HAS_EARLY_PRINTK=y
+CONFIG_SYS_SUPPORTS_32BIT_KERNEL=y
+CONFIG_SYS_SUPPORTS_ARBIT_HZ=y
+CONFIG_SYS_SUPPORTS_BIG_ENDIAN=y
+CONFIG_SYS_SUPPORTS_MULTITHREADING=y
+CONFIG_TICK_CPU_ACCOUNTING=y
+CONFIG_UIDGID_CONVERTED=y
+CONFIG_USB_ARCH_HAS_XHCI=y
+CONFIG_USE_OF=y
+CONFIG_ZONE_DMA_FLAG=0
diff --git a/target/linux/lantiq/patches-4.4/0001-MIPS-lantiq-add-pcie-driver.patch b/target/linux/lantiq/patches-4.4/0001-MIPS-lantiq-add-pcie-driver.patch
new file mode 100644
index 0000000..1a4d863
--- /dev/null
+++ b/target/linux/lantiq/patches-4.4/0001-MIPS-lantiq-add-pcie-driver.patch
@@ -0,0 +1,5521 @@
+From 6f933347d0b4ed02d9534f5fa07f7b99f13eeaa1 Mon Sep 17 00:00:00 2001
+From: John Crispin <blogic at openwrt.org>
+Date: Thu, 7 Aug 2014 18:12:28 +0200
+Subject: [PATCH 01/36] MIPS: lantiq: add pcie driver
+
+Signed-off-by: John Crispin <blogic at openwrt.org>
+---
+ arch/mips/lantiq/Kconfig           |   10 +
+ arch/mips/lantiq/xway/sysctrl.c    |    2 +
+ arch/mips/pci/Makefile             |    2 +
+ arch/mips/pci/fixup-lantiq-pcie.c  |   82 +++
+ arch/mips/pci/fixup-lantiq.c       |    5 +-
+ arch/mips/pci/ifxmips_pci_common.h |   57 ++
+ arch/mips/pci/ifxmips_pcie.c       | 1099 ++++++++++++++++++++++++++++++
+ arch/mips/pci/ifxmips_pcie.h       |  135 ++++
+ arch/mips/pci/ifxmips_pcie_ar10.h  |  290 ++++++++
+ arch/mips/pci/ifxmips_pcie_msi.c   |  392 +++++++++++
+ arch/mips/pci/ifxmips_pcie_phy.c   |  478 +++++++++++++
+ arch/mips/pci/ifxmips_pcie_pm.c    |  176 +++++
+ arch/mips/pci/ifxmips_pcie_pm.h    |   36 +
+ arch/mips/pci/ifxmips_pcie_reg.h   | 1001 +++++++++++++++++++++++++++
+ arch/mips/pci/ifxmips_pcie_vr9.h   |  271 ++++++++
+ arch/mips/pci/pci.c                |   25 +
+ arch/mips/pci/pcie-lantiq.h        | 1305 ++++++++++++++++++++++++++++++++++++
+ drivers/pci/pcie/aer/Kconfig       |    2 +-
+ include/linux/pci.h                |    2 +
+ include/linux/pci_ids.h            |    6 +
+ 20 files changed, 5374 insertions(+), 2 deletions(-)
+ create mode 100644 arch/mips/pci/fixup-lantiq-pcie.c
+ create mode 100644 arch/mips/pci/ifxmips_pci_common.h
+ create mode 100644 arch/mips/pci/ifxmips_pcie.c
+ create mode 100644 arch/mips/pci/ifxmips_pcie.h
+ create mode 100644 arch/mips/pci/ifxmips_pcie_ar10.h
+ create mode 100644 arch/mips/pci/ifxmips_pcie_msi.c
+ create mode 100644 arch/mips/pci/ifxmips_pcie_phy.c
+ create mode 100644 arch/mips/pci/ifxmips_pcie_pm.c
+ create mode 100644 arch/mips/pci/ifxmips_pcie_pm.h
+ create mode 100644 arch/mips/pci/ifxmips_pcie_reg.h
+ create mode 100644 arch/mips/pci/ifxmips_pcie_vr9.h
+ create mode 100644 arch/mips/pci/pcie-lantiq.h
+
+--- a/arch/mips/lantiq/Kconfig
++++ b/arch/mips/lantiq/Kconfig
+@@ -17,6 +17,7 @@ config SOC_XWAY
+ 	bool "XWAY"
+ 	select SOC_TYPE_XWAY
+ 	select HW_HAS_PCI
++	select ARCH_SUPPORTS_MSI
+ 
+ config SOC_FALCON
+ 	bool "FALCON"
+@@ -37,6 +38,15 @@ config PCI_LANTIQ
+ 	bool "PCI Support"
+ 	depends on SOC_XWAY && PCI
+ 
++config PCIE_LANTIQ
++	bool "PCIE Support"
++	depends on SOC_XWAY && PCI
++
++config PCIE_LANTIQ_MSI
++	bool
++	depends on PCIE_LANTIQ && PCI_MSI
++	default y
++
+ config XRX200_PHY_FW
+ 	bool "XRX200 PHY firmware loader"
+ 	depends on SOC_XWAY
+--- a/arch/mips/pci/Makefile
++++ b/arch/mips/pci/Makefile
+@@ -45,6 +45,8 @@ obj-$(CONFIG_LANTIQ)		+= fixup-lantiq.o
+ obj-$(CONFIG_PCI_LANTIQ)	+= pci-lantiq.o ops-lantiq.o
+ obj-$(CONFIG_SOC_RT288X)	+= pci-rt2880.o
+ obj-$(CONFIG_SOC_RT3883)	+= pci-rt3883.o
++obj-$(CONFIG_PCIE_LANTIQ)	+= ifxmips_pcie_phy.o ifxmips_pcie.o fixup-lantiq-pcie.o
++obj-$(CONFIG_PCIE_LANTIQ_MSI)	+= pcie-lantiq-msi.o
+ obj-$(CONFIG_TANBAC_TB0219)	+= fixup-tb0219.o
+ obj-$(CONFIG_TANBAC_TB0226)	+= fixup-tb0226.o
+ obj-$(CONFIG_TANBAC_TB0287)	+= fixup-tb0287.o
+--- /dev/null
++++ b/arch/mips/pci/fixup-lantiq-pcie.c
+@@ -0,0 +1,74 @@
++/******************************************************************************
++**
++** FILE NAME    : ifxmips_fixup_pcie.c
++** PROJECT      : IFX UEIP for VRX200
++** MODULES      : PCIe 
++**
++** DATE         : 02 Mar 2009
++** AUTHOR       : Lei Chuanhua
++** DESCRIPTION  : PCIe Root Complex Driver
++** COPYRIGHT    :       Copyright (c) 2009
++**                      Infineon Technologies AG
++**                      Am Campeon 1-12, 85579 Neubiberg, Germany
++**
++**    This program is free software; you can redistribute it and/or modify
++**    it under the terms of the GNU General Public License as published by
++**    the Free Software Foundation; either version 2 of the License, or
++**    (at your option) any later version.
++** HISTORY
++** $Version $Date        $Author         $Comment
++** 0.0.1    17 Mar,2009  Lei Chuanhua    Initial version
++*******************************************************************************/
++/*!
++ \file ifxmips_fixup_pcie.c
++ \ingroup IFX_PCIE  
++ \brief PCIe Fixup functions source file
++*/
++#include <linux/pci.h>
++#include <linux/pci_regs.h>
++#include <linux/pci_ids.h>
++
++#include <lantiq_soc.h>
++
++#include "pcie-lantiq.h"
++
++static void
++ifx_pcie_fixup_resource(struct pci_dev *dev)
++{
++    u32 reg;
++
++    IFX_PCIE_PRINT(PCIE_MSG_FIXUP, "%s dev %s: enter\n", __func__, pci_name(dev));
++
++    printk("%s: fixup host controller %s (%04x:%04x)\n", 
++        __func__, pci_name(dev), dev->vendor, dev->device); 
++
++   /* Setup COMMAND register */
++    reg = PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER /* | 
++          PCI_COMMAND_INTX_DISABLE */| PCI_COMMAND_SERR;
++    pci_write_config_word(dev, PCI_COMMAND, reg);
++    IFX_PCIE_PRINT(PCIE_MSG_FIXUP, "%s dev %s: exit\n", __func__, pci_name(dev));
++}
++DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INFINEON, PCI_DEVICE_ID_INFINEON_PCIE, ifx_pcie_fixup_resource);
++DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_LANTIQ, PCI_VENDOR_ID_LANTIQ, ifx_pcie_fixup_resource);
++
++static void
++ifx_pcie_rc_class_early_fixup(struct pci_dev *dev)
++{
++    IFX_PCIE_PRINT(PCIE_MSG_FIXUP, "%s dev %s: enter\n", __func__, pci_name(dev));
++
++    if (dev->devfn == PCI_DEVFN(0, 0) &&
++        (dev->class >> 8) == PCI_CLASS_BRIDGE_HOST) {
++
++        dev->class = (PCI_CLASS_BRIDGE_PCI << 8) | (dev->class & 0xff);
++
++        printk(KERN_INFO "%s: fixed pcie host bridge to pci-pci bridge\n", __func__);
++    }
++    IFX_PCIE_PRINT(PCIE_MSG_FIXUP, "%s dev %s: exit\n", __func__, pci_name(dev));
++    mdelay(10);
++}
++
++DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INFINEON, PCI_DEVICE_ID_INFINEON_PCIE,
++     ifx_pcie_rc_class_early_fixup);
++
++DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_LANTIQ, PCI_DEVICE_ID_LANTIQ_PCIE,
++     ifx_pcie_rc_class_early_fixup);
+--- a/arch/mips/pci/fixup-lantiq.c
++++ b/arch/mips/pci/fixup-lantiq.c
+@@ -8,12 +8,18 @@
+ 
+ #include <linux/of_irq.h>
+ #include <linux/of_pci.h>
++#include "ifxmips_pci_common.h"
+ 
+ int (*ltq_pci_plat_arch_init)(struct pci_dev *dev) = NULL;
+ int (*ltq_pci_plat_dev_init)(struct pci_dev *dev) = NULL;
+ 
+ int pcibios_plat_dev_init(struct pci_dev *dev)
+ {
++#ifdef CONFIG_PCIE_LANTIQ
++	if (pci_find_capability(dev, PCI_CAP_ID_EXP))
++		ifx_pcie_bios_plat_dev_init(dev);
++#endif
++
+ 	if (ltq_pci_plat_arch_init)
+ 		return ltq_pci_plat_arch_init(dev);
+ 
+@@ -25,5 +31,10 @@ int pcibios_plat_dev_init(struct pci_dev
+ 
+ int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+ {
++#ifdef CONFIG_PCIE_LANTIQ
++	if (pci_find_capability(dev, PCI_CAP_ID_EXP))
++		return ifx_pcie_bios_map_irq(dev, slot, pin);
++#endif
++
+ 	return of_irq_parse_and_map_pci(dev, slot, pin);
+ }
+--- /dev/null
++++ b/arch/mips/pci/ifxmips_pci_common.h
+@@ -0,0 +1,57 @@
++/******************************************************************************
++**
++** FILE NAME    : ifxmips_pci_common.h
++** PROJECT      : IFX UEIP
++** MODULES      : PCI subsystem
++**
++** DATE         : 30 June 2009
++** AUTHOR       : Lei Chuanhua
++** DESCRIPTION  : PCIe Root Complex Driver
++** COPYRIGHT    :       Copyright (c) 2009
++**                      Infineon Technologies AG
++**                      Am Campeon 1-12, 85579 Neubiberg, Germany
++**
++**    This program is free software; you can redistribute it and/or modify
++**    it under the terms of the GNU General Public License as published by
++**    the Free Software Foundation; either version 2 of the License, or
++**    (at your option) any later version.
++** HISTORY
++** $Version $Date        $Author         $Comment
++** 0.0.1    30 June,2009  Lei Chuanhua    Initial version
++*******************************************************************************/
++
++#ifndef IFXMIPS_PCI_COMMON_H
++#define IFXMIPS_PCI_COMMON_H
++#include <linux/version.h>
++/*!
++ \defgroup IFX_PCI_COM  IFX PCI/PCIe common parts for OS integration  
++ \brief  PCI/PCIe common parts
++*/
++
++/*!
++ \defgroup IFX_PCI_COM_OS OS APIs
++ \ingroup IFX_PCI_COM
++ \brief PCI/PCIe bus driver OS interface functions
++*/
++/*!
++  \file ifxmips_pci_common.h
++  \ingroup IFX_PCI_COM
++  \brief PCI/PCIe bus driver common OS header file
++*/
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
++#define IFX_PCI_CONST
++#else
++#define IFX_PCI_CONST const
++#endif
++#ifdef CONFIG_IFX_PCI
++extern int ifx_pci_bios_map_irq(IFX_PCI_CONST struct pci_dev *dev, u8 slot, u8 pin);
++extern int ifx_pci_bios_plat_dev_init(struct pci_dev *dev);
++#endif /* COFNIG_IFX_PCI */
++
++#ifdef CONFIG_PCIE_LANTIQ
++extern int ifx_pcie_bios_map_irq(IFX_PCI_CONST struct pci_dev *dev, u8 slot, u8 pin);
++extern int ifx_pcie_bios_plat_dev_init(struct pci_dev *dev);
++#endif
++
++#endif /* IFXMIPS_PCI_COMMON_H */
++
+--- /dev/null
++++ b/arch/mips/pci/ifxmips_pcie.c
+@@ -0,0 +1,1092 @@
++/*
++ *  This program is free software; you can redistribute it and/or modify it
++ *  under the terms of the GNU General Public License version 2 as published
++ *  by the Free Software Foundation.
++ *
++ *  Copyright (C) 2009 Lei Chuanhua <chuanhua.lei at infineon.com>
++ *  Copyright (C) 2013 John Crispin <blogic at openwrt.org>
++ */
++
++#include <linux/types.h>
++#include <linux/pci.h>
++#include <linux/kernel.h>
++#include <linux/init.h>
++#include <linux/delay.h>
++#include <linux/mm.h>
++#include <asm/paccess.h>
++#include <linux/pci.h>
++#include <linux/pci_regs.h>
++#include <linux/module.h>
++
++#include "ifxmips_pcie.h"
++#include "ifxmips_pcie_reg.h"
++
++/* Enable 32bit io due to its mem mapped io nature */
++#define IFX_PCIE_ERROR_INT
++#define IFX_PCIE_IO_32BIT
++
++#define IFX_PCIE_IR                     (INT_NUM_IM4_IRL0 + 25)
++#define IFX_PCIE_INTA                   (INT_NUM_IM4_IRL0 + 8)
++#define IFX_PCIE_INTB                   (INT_NUM_IM4_IRL0 + 9)
++#define IFX_PCIE_INTC                   (INT_NUM_IM4_IRL0 + 10)
++#define IFX_PCIE_INTD                   (INT_NUM_IM4_IRL0 + 11)
++#define MS(_v, _f)  (((_v) & (_f)) >> _f##_S)
++#define SM(_v, _f)  (((_v) << _f##_S) & (_f))
++#define IFX_REG_SET_BIT(_f, _r) \
++	IFX_REG_W32((IFX_REG_R32((_r)) &~ (_f)) | (_f), (_r))
++
++#define IFX_PCIE_LTSSM_ENABLE_TIMEOUT 10
++
++static DEFINE_SPINLOCK(ifx_pcie_lock);
++
++u32 g_pcie_debug_flag = PCIE_MSG_ANY & (~PCIE_MSG_CFG);
++
++static ifx_pcie_irq_t pcie_irqs[IFX_PCIE_CORE_NR] = {
++    {
++        .ir_irq = {
++            .irq  = IFX_PCIE_IR,
++            .name = "ifx_pcie_rc0",
++        },
++
++        .legacy_irq = {
++            {
++                .irq_bit = PCIE_IRN_INTA,
++                .irq     = IFX_PCIE_INTA,
++            },
++            {
++                .irq_bit = PCIE_IRN_INTB,
++                .irq     = IFX_PCIE_INTB,
++            },
++            {
++                .irq_bit = PCIE_IRN_INTC,
++                .irq     = IFX_PCIE_INTC,
++            },
++            {
++                .irq_bit = PCIE_IRN_INTD,
++                .irq     = IFX_PCIE_INTD,
++            },
++        },
++    },
++
++};
++
++void ifx_pcie_debug(const char *fmt, ...)
++{
++	static char buf[256] = {0};      /* XXX */
++	va_list ap;
++
++	va_start(ap, fmt);
++	vsnprintf(buf, sizeof(buf), fmt, ap);
++	va_end(ap);
++
++	printk("%s", buf);
++}
++
++
++static inline int pcie_ltssm_enable(int pcie_port)
++{
++	int i;
++
++	/* Enable LTSSM */
++	IFX_REG_W32(PCIE_RC_CCR_LTSSM_ENABLE, PCIE_RC_CCR(pcie_port));
++
++	/* Wait for the link to come up */
++	for (i = 0; i < IFX_PCIE_LTSSM_ENABLE_TIMEOUT; i++) {
++		if (!(IFX_REG_R32(PCIE_LCTLSTS(pcie_port)) & PCIE_LCTLSTS_RETRAIN_PENDING))
++			return 0;
++		udelay(10);
++	}
++
++	printk("%s link timeout!!!!!\n", __func__);
++	return -1;
++}
++
++static inline void pcie_status_register_clear(int pcie_port)
++{
++	IFX_REG_W32(0, PCIE_RC_DR(pcie_port));
++	IFX_REG_W32(0, PCIE_PCICMDSTS(pcie_port));
++	IFX_REG_W32(0, PCIE_DCTLSTS(pcie_port));
++	IFX_REG_W32(0, PCIE_LCTLSTS(pcie_port));
++	IFX_REG_W32(0, PCIE_SLCTLSTS(pcie_port));
++	IFX_REG_W32(0, PCIE_RSTS(pcie_port));
++	IFX_REG_W32(0, PCIE_UES_R(pcie_port));
++	IFX_REG_W32(0, PCIE_UEMR(pcie_port));
++	IFX_REG_W32(0, PCIE_UESR(pcie_port));
++	IFX_REG_W32(0, PCIE_CESR(pcie_port));
++	IFX_REG_W32(0, PCIE_CEMR(pcie_port));
++	IFX_REG_W32(0, PCIE_RESR(pcie_port));
++	IFX_REG_W32(0, PCIE_PVCCRSR(pcie_port));
++	IFX_REG_W32(0, PCIE_VC0_RSR0(pcie_port));
++	IFX_REG_W32(0, PCIE_TPFCS(pcie_port));
++	IFX_REG_W32(0, PCIE_TNPFCS(pcie_port));
++	IFX_REG_W32(0, PCIE_TCFCS(pcie_port));
++	IFX_REG_W32(0, PCIE_QSR(pcie_port));
++	IFX_REG_W32(0, PCIE_IOBLSECS(pcie_port));
++}
++
++static inline int ifx_pcie_link_up(int pcie_port)
++{
++    return (IFX_REG_R32(PCIE_PHY_SR(pcie_port)) & PCIE_PHY_SR_PHY_LINK_UP) ? 1 : 0;
++}
++
++
++static inline void pcie_mem_io_setup(int pcie_port)
++{
++    u32 reg;
++    /*
++     * BAR[0:1] readonly register 
++     * RC contains only minimal BARs for packets mapped to this device 
++     * Mem/IO filters defines a range of memory occupied by memory mapped IO devices that
++     * reside on the downstream side fo the bridge.
++     */
++    reg = SM((PCIE_MEM_PHY_PORT_TO_END(pcie_port) >> 20), PCIE_MBML_MEM_LIMIT_ADDR)
++        | SM((PCIE_MEM_PHY_PORT_TO_BASE(pcie_port) >> 20), PCIE_MBML_MEM_BASE_ADDR);
++
++    IFX_REG_W32(reg, PCIE_MBML(pcie_port));
++
++
++#ifdef IFX_PCIE_PREFETCH_MEM_64BIT
++    reg = SM((PCIE_MEM_PHY_PORT_TO_END(pcie_port) >> 20), PCIE_PMBL_END_ADDR)
++        | SM((PCIE_MEM_PHY_PORT_TO_BASE(pcie_port) >> 20), PCIE_PMBL_UPPER_12BIT)
++        | PCIE_PMBL_64BIT_ADDR;
++    IFX_REG_W32(reg, PCIE_PMBL(pcie_port));
++
++    /* Must configure upper 32bit */
++    IFX_REG_W32(0, PCIE_PMBU32(pcie_port));
++    IFX_REG_W32(0, PCIE_PMLU32(pcie_port));
++#else
++    /* PCIe_PBML, same as MBML */
++    IFX_REG_W32(IFX_REG_R32(PCIE_MBML(pcie_port)), PCIE_PMBL(pcie_port));
++#endif 
++
++    /* IO Address Range */
++    reg = SM((PCIE_IO_PHY_PORT_TO_END(pcie_port) >> 12), PCIE_IOBLSECS_IO_LIMIT_ADDR)
++        | SM((PCIE_IO_PHY_PORT_TO_BASE(pcie_port) >> 12), PCIE_IOBLSECS_IO_BASE_ADDR);
++#ifdef IFX_PCIE_IO_32BIT    
++    reg |= PCIE_IOBLSECS_32BIT_IO_ADDR;
++#endif /* IFX_PCIE_IO_32BIT */
++    IFX_REG_W32(reg, PCIE_IOBLSECS(pcie_port));
++
++#ifdef IFX_PCIE_IO_32BIT
++    reg = SM((PCIE_IO_PHY_PORT_TO_END(pcie_port) >> 16), PCIE_IO_BANDL_UPPER_16BIT_IO_LIMIT)
++        | SM((PCIE_IO_PHY_PORT_TO_BASE(pcie_port) >> 16), PCIE_IO_BANDL_UPPER_16BIT_IO_BASE);
++    IFX_REG_W32(reg, PCIE_IO_BANDL(pcie_port));
++
++#endif /* IFX_PCIE_IO_32BIT */
++}
++
++static inline void
++pcie_device_setup(int pcie_port)
++{
++    u32 reg;
++
++    /* Device capability register, set up Maximum payload size */
++    reg = IFX_REG_R32(PCIE_DCAP(pcie_port));
++    reg |= PCIE_DCAP_ROLE_BASE_ERR_REPORT;
++    reg |= SM(PCIE_MAX_PAYLOAD_128, PCIE_DCAP_MAX_PAYLOAD_SIZE);
++
++    /* Only available for EP */
++    reg &= ~(PCIE_DCAP_EP_L0S_LATENCY | PCIE_DCAP_EP_L1_LATENCY);
++    IFX_REG_W32(reg, PCIE_DCAP(pcie_port));
++
++    /* Device control and status register */
++    /* Set Maximum Read Request size for the device as a Requestor */
++    reg = IFX_REG_R32(PCIE_DCTLSTS(pcie_port));
++
++    /* 
++     * Request size can be larger than the MPS used, but the completions returned 
++     * for the read will be bounded by the MPS size.
++     * In our system, Max request size depends on AHB burst size. It is 64 bytes.
++     * but we set it as 128 as minimum one.
++     */
++    reg |= SM(PCIE_MAX_PAYLOAD_128, PCIE_DCTLSTS_MAX_READ_SIZE)
++            | SM(PCIE_MAX_PAYLOAD_128, PCIE_DCTLSTS_MAX_PAYLOAD_SIZE);
++
++    /* Enable relaxed ordering, no snoop, and all kinds of errors */
++    reg |= PCIE_DCTLSTS_RELAXED_ORDERING_EN | PCIE_DCTLSTS_ERR_EN | PCIE_DCTLSTS_NO_SNOOP_EN;
++
++    IFX_REG_W32(reg, PCIE_DCTLSTS(pcie_port));
++}
++
++static inline void
++pcie_link_setup(int pcie_port)
++{
++    u32 reg;
++
++    /*
++     * XXX, Link capability register, bit 18 for EP CLKREQ# dynamic clock management for L1, L2/3 CPM 
++     * L0s is reported during link training via TS1 order set by N_FTS
++     */
++    reg = IFX_REG_R32(PCIE_LCAP(pcie_port));
++    reg &= ~PCIE_LCAP_L0S_EIXT_LATENCY;
++    reg |= SM(3, PCIE_LCAP_L0S_EIXT_LATENCY);
++    IFX_REG_W32(reg, PCIE_LCAP(pcie_port));
++
++    /* Link control and status register */
++    reg = IFX_REG_R32(PCIE_LCTLSTS(pcie_port));
++
++    /* Link Enable, ASPM enabled  */
++    reg &= ~PCIE_LCTLSTS_LINK_DISABLE;
++
++#ifdef CONFIG_PCIEASPM
++    /*  
++     * We use the same physical reference clock that the platform provides on the connector 
++     * It paved the way for ASPM to calculate the new exit Latency
++     */
++    reg |= PCIE_LCTLSTS_SLOT_CLK_CFG;
++    reg |= PCIE_LCTLSTS_COM_CLK_CFG;
++    /*
++     * We should disable ASPM by default except that we have dedicated power management support
++     * Enable ASPM will cause the system hangup/instability, performance degration
++     */
++    reg |= PCIE_LCTLSTS_ASPM_ENABLE;
++#else
++    reg &= ~PCIE_LCTLSTS_ASPM_ENABLE;
++#endif /* CONFIG_PCIEASPM */
++
++    /* 
++     * The maximum size of any completion with data packet is bounded by the MPS setting 
++     * in  device control register 
++     */
++
++    /* RCB may cause multiple split transactions, two options available, we use 64 byte RCB */
++    reg &= ~ PCIE_LCTLSTS_RCB128;
++
++    IFX_REG_W32(reg, PCIE_LCTLSTS(pcie_port));
++}
++
++static inline void pcie_error_setup(int pcie_port)
++{
++	u32 reg;
++
++	/* 
++	* Forward ERR_COR, ERR_NONFATAL, ERR_FATAL to the backbone 
++	* Poisoned write TLPs and completions indicating poisoned TLPs will set the PCIe_PCICMDSTS.MDPE 
++	*/
++	reg = IFX_REG_R32(PCIE_INTRBCTRL(pcie_port));
++	reg |= PCIE_INTRBCTRL_SERR_ENABLE | PCIE_INTRBCTRL_PARITY_ERR_RESP_ENABLE;
++
++	IFX_REG_W32(reg, PCIE_INTRBCTRL(pcie_port));
++
++	/* Uncorrectable Error Mask Register, Unmask <enable> all bits in PCIE_UESR */
++	reg = IFX_REG_R32(PCIE_UEMR(pcie_port));
++	reg &= ~PCIE_ALL_UNCORRECTABLE_ERR;
++	IFX_REG_W32(reg, PCIE_UEMR(pcie_port));
++
++	/* Uncorrectable Error Severity Register, ALL errors are FATAL */
++	IFX_REG_W32(PCIE_ALL_UNCORRECTABLE_ERR, PCIE_UESR(pcie_port));
++
++	/* Correctable Error Mask Register, unmask <enable> all bits */
++	reg = IFX_REG_R32(PCIE_CEMR(pcie_port));
++	reg &= ~PCIE_CORRECTABLE_ERR;
++	IFX_REG_W32(reg, PCIE_CEMR(pcie_port));
++
++	/* Advanced Error Capabilities and Control Registr */
++	reg = IFX_REG_R32(PCIE_AECCR(pcie_port));
++	reg |= PCIE_AECCR_ECRC_CHECK_EN | PCIE_AECCR_ECRC_GEN_EN;
++	IFX_REG_W32(reg, PCIE_AECCR(pcie_port));
++
++	/* Root Error Command Register, Report all types of errors */
++	reg = IFX_REG_R32(PCIE_RECR(pcie_port));
++	reg |= PCIE_RECR_ERR_REPORT_EN;
++	IFX_REG_W32(reg, PCIE_RECR(pcie_port));
++
++	/* Clear the Root status register */
++	reg = IFX_REG_R32(PCIE_RESR(pcie_port));
++	IFX_REG_W32(reg, PCIE_RESR(pcie_port));
++}
++
++static inline void pcie_port_logic_setup(int pcie_port)
++{
++	u32 reg;
++
++	/* FTS number, default 12, increase to 63, may increase time from/to L0s to L0  */
++	reg = IFX_REG_R32(PCIE_AFR(pcie_port));
++	reg &= ~(PCIE_AFR_FTS_NUM | PCIE_AFR_COM_FTS_NUM);
++	reg |= SM(PCIE_AFR_FTS_NUM_DEFAULT, PCIE_AFR_FTS_NUM)
++		| SM(PCIE_AFR_FTS_NUM_DEFAULT, PCIE_AFR_COM_FTS_NUM);
++	/* L0s and L1 entry latency */
++	reg &= ~(PCIE_AFR_L0S_ENTRY_LATENCY | PCIE_AFR_L1_ENTRY_LATENCY);
++	reg |= SM(PCIE_AFR_L0S_ENTRY_LATENCY_DEFAULT, PCIE_AFR_L0S_ENTRY_LATENCY)
++		| SM(PCIE_AFR_L1_ENTRY_LATENCY_DEFAULT, PCIE_AFR_L1_ENTRY_LATENCY);
++	IFX_REG_W32(reg, PCIE_AFR(pcie_port));
++
++
++	/* Port Link Control Register */
++	reg = IFX_REG_R32(PCIE_PLCR(pcie_port));
++	reg |= PCIE_PLCR_DLL_LINK_EN;  /* Enable the DLL link */
++	IFX_REG_W32(reg, PCIE_PLCR(pcie_port));
++
++	/* Lane Skew Register */
++	reg = IFX_REG_R32(PCIE_LSR(pcie_port));
++	/* Enable ACK/NACK and FC */
++	reg &= ~(PCIE_LSR_ACKNAK_DISABLE | PCIE_LSR_FC_DISABLE);
++	IFX_REG_W32(reg, PCIE_LSR(pcie_port));
++
++	/* Symbol Timer Register and Filter Mask Register 1 */
++	reg = IFX_REG_R32(PCIE_STRFMR(pcie_port));
++
++	/* Default SKP interval is very accurate already, 5us */
++	/* Enable IO/CFG transaction */
++	reg |= PCIE_STRFMR_RX_CFG_TRANS_ENABLE | PCIE_STRFMR_RX_IO_TRANS_ENABLE;
++	/* Disable FC WDT */
++	reg &= ~PCIE_STRFMR_FC_WDT_DISABLE;
++	IFX_REG_W32(reg, PCIE_STRFMR(pcie_port));
++
++	/* Filter Masker Register 2 */
++	reg = IFX_REG_R32(PCIE_FMR2(pcie_port));
++	reg |= PCIE_FMR2_VENDOR_MSG1_PASSED_TO_TRGT1 | PCIE_FMR2_VENDOR_MSG0_PASSED_TO_TRGT1;
++	IFX_REG_W32(reg, PCIE_FMR2(pcie_port));
++
++	/* VC0 Completion Receive Queue Control Register */
++	reg = IFX_REG_R32(PCIE_VC0_CRQCR(pcie_port));
++	reg &= ~PCIE_VC0_CRQCR_CPL_TLP_QUEUE_MODE;
++	reg |= SM(PCIE_VC0_TLP_QUEUE_MODE_BYPASS, PCIE_VC0_CRQCR_CPL_TLP_QUEUE_MODE);
++	IFX_REG_W32(reg, PCIE_VC0_CRQCR(pcie_port));
++}
++
++static inline void pcie_rc_cfg_reg_setup(int pcie_port)
++{
++	u32 reg;
++
++	/* Disable LTSSM */
++	IFX_REG_W32(0, PCIE_RC_CCR(pcie_port)); /* Disable LTSSM */
++
++	pcie_mem_io_setup(pcie_port);
++
++	/* XXX, MSI stuff should only apply to EP */
++	/* MSI Capability: Only enable 32-bit addresses */
++	reg = IFX_REG_R32(PCIE_MCAPR(pcie_port));
++	reg &= ~PCIE_MCAPR_ADDR64_CAP;
++
++	reg |= PCIE_MCAPR_MSI_ENABLE;
++
++	/* Disable multiple message */
++	reg &= ~(PCIE_MCAPR_MULTI_MSG_CAP | PCIE_MCAPR_MULTI_MSG_ENABLE);
++	IFX_REG_W32(reg, PCIE_MCAPR(pcie_port));
++
++
++	/* Enable PME, Soft reset enabled */
++	reg = IFX_REG_R32(PCIE_PM_CSR(pcie_port));
++	reg |= PCIE_PM_CSR_PME_ENABLE | PCIE_PM_CSR_SW_RST;
++	IFX_REG_W32(reg, PCIE_PM_CSR(pcie_port));
++
++	/* setup the bus */
++	reg = SM(0, PCIE_BNR_PRIMARY_BUS_NUM) | SM(1, PCIE_PNR_SECONDARY_BUS_NUM) | SM(0xFF, PCIE_PNR_SUB_BUS_NUM);
++	IFX_REG_W32(reg, PCIE_BNR(pcie_port));
++
++
++	pcie_device_setup(pcie_port);
++	pcie_link_setup(pcie_port);
++	pcie_error_setup(pcie_port);
++
++	/* Root control and capabilities register */
++	reg = IFX_REG_R32(PCIE_RCTLCAP(pcie_port));
++	reg |= PCIE_RCTLCAP_SERR_ENABLE | PCIE_RCTLCAP_PME_INT_EN;
++	IFX_REG_W32(reg, PCIE_RCTLCAP(pcie_port));
++
++	/* Port VC Capability Register 2 */
++	reg = IFX_REG_R32(PCIE_PVC2(pcie_port));
++	reg &= ~PCIE_PVC2_VC_ARB_WRR;
++	reg |= PCIE_PVC2_VC_ARB_16P_FIXED_WRR;
++	IFX_REG_W32(reg, PCIE_PVC2(pcie_port));
++
++	/* VC0 Resource Capability Register */
++	reg = IFX_REG_R32(PCIE_VC0_RC(pcie_port));
++	reg &= ~PCIE_VC0_RC_REJECT_SNOOP;
++	IFX_REG_W32(reg, PCIE_VC0_RC(pcie_port));
++
++	pcie_port_logic_setup(pcie_port);
++}
++
++static int ifx_pcie_wait_phy_link_up(int pcie_port)
++{
++#define IFX_PCIE_PHY_LINK_UP_TIMEOUT  1000 /* XXX, tunable */
++    int i;
++
++    /* Wait for PHY link is up */
++    for (i = 0; i < IFX_PCIE_PHY_LINK_UP_TIMEOUT; i++) {
++        if (ifx_pcie_link_up(pcie_port)) {
++            break;
++        }
++        udelay(100);
++    }
++    if (i >= IFX_PCIE_PHY_LINK_UP_TIMEOUT) {
++        printk(KERN_ERR "%s timeout\n", __func__);
++        return -1;
++    }
++
++    /* Check data link up or not */
++    if (!(IFX_REG_R32(PCIE_RC_DR(pcie_port)) & PCIE_RC_DR_DLL_UP)) {
++        printk(KERN_ERR "%s DLL link is still down\n", __func__);
++        return -1;
++    }
++
++    /* Check Data link active or not */
++    if (!(IFX_REG_R32(PCIE_LCTLSTS(pcie_port)) & PCIE_LCTLSTS_DLL_ACTIVE)) {
++        printk(KERN_ERR "%s DLL is not active\n", __func__);
++        return -1;
++    }
++    return 0;
++}
++
++static inline int pcie_app_loigc_setup(int pcie_port)
++{
++	/* supress ahb bus errrors */
++	IFX_REG_W32(PCIE_AHB_CTRL_BUS_ERROR_SUPPRESS, PCIE_AHB_CTRL(pcie_port));
++
++	/* Pull PCIe EP out of reset */
++	pcie_device_rst_deassert(pcie_port);
++
++	/* Start LTSSM training between RC and EP */
++	pcie_ltssm_enable(pcie_port);
++
++	/* Check PHY status after enabling LTSSM */
++	if (ifx_pcie_wait_phy_link_up(pcie_port) != 0)
++		return -1;
++
++	return 0;
++}
++
++/*
++ * The numbers below are directly from the PCIe spec table 3-4/5. 
++ */
++static inline void pcie_replay_time_update(int pcie_port)
++{
++	u32 reg;
++	int nlw;
++	int rtl;
++
++	reg = IFX_REG_R32(PCIE_LCTLSTS(pcie_port));
++
++	nlw = MS(reg, PCIE_LCTLSTS_NEGOTIATED_LINK_WIDTH);
++	switch (nlw) {
++	case PCIE_MAX_LENGTH_WIDTH_X1:
++		rtl = 1677;
++		break;
++	case PCIE_MAX_LENGTH_WIDTH_X2:
++		rtl = 867;
++		break;
++	case PCIE_MAX_LENGTH_WIDTH_X4:
++		rtl = 462;
++		break;
++	case PCIE_MAX_LENGTH_WIDTH_X8:
++		rtl = 258;
++		break;
++	default:
++		rtl = 1677;
++		break;
++	}
++	reg = IFX_REG_R32(PCIE_ALTRT(pcie_port));
++	reg &= ~PCIE_ALTRT_REPLAY_TIME_LIMIT;
++	reg |= SM(rtl, PCIE_ALTRT_REPLAY_TIME_LIMIT);
++	IFX_REG_W32(reg, PCIE_ALTRT(pcie_port));
++}
++
++/*
++ * Table 359 Enhanced Configuration Address Mapping1)
++ * 1) This table is defined in Table 7-1, page 341, PCI Express Base Specification v1.1
++ * Memory Address PCI Express Configuration Space
++ * A[(20+n-1):20] Bus Number 1 < n < 8
++ * A[19:15] Device Number
++ * A[14:12] Function Number
++ * A[11:8] Extended Register Number
++ * A[7:2] Register Number
++ * A[1:0] Along with size of the access, used to generate Byte Enables
++ * For VR9, only the address bits [22:0] are mapped to the configuration space:
++ * . Address bits [22:20] select the target bus (1-of-8)1)
++ * . Address bits [19:15] select the target device (1-of-32) on the bus
++ * . Address bits [14:12] select the target function (1-of-8) within the device.
++ * . Address bits [11:2] selects the target dword (1-of-1024) within the selected function.s configuration space
++ * . Address bits [1:0] define the start byte location within the selected dword.
++ */
++static inline u32 pcie_bus_addr(u8 bus_num, u16 devfn, int where)
++{
++	u32 addr;
++	u8  bus;
++
++	if (!bus_num) {
++		/* type 0 */
++		addr = ((PCI_SLOT(devfn) & 0x1F) << 15) | ((PCI_FUNC(devfn) & 0x7) << 12) | ((where & 0xFFF)& ~3);
++	} else {
++		bus = bus_num;
++		/* type 1, only support 8 buses  */
++		addr = ((bus & 0x7) << 20) | ((PCI_SLOT(devfn) & 0x1F) << 15) |
++			((PCI_FUNC(devfn) & 0x7) << 12) | ((where & 0xFFF) & ~3);
++	}
++	return addr;
++}
++
++static int pcie_valid_config(int pcie_port, int bus, int dev)
++{
++	/* RC itself */
++	if ((bus == 0) && (dev == 0)) {
++		return 1;
++	}
++
++	/* No physical link */
++	if (!ifx_pcie_link_up(pcie_port)) {
++		return 0;
++	}
++
++	/* Bus zero only has RC itself
++	* XXX, check if EP will be integrated 
++	*/
++	if ((bus == 0) && (dev != 0)) {
++		return 0;
++	}
++
++	/* Maximum 8 buses supported for VRX */
++	if (bus > 9) {
++		return 0;
++	}
++
++	/* 
++	 * PCIe is PtP link, one bus only supports only one device 
++	 * except bus zero and PCIe switch which is virtual bus device
++	 * The following two conditions really depends on the system design
++	 * and attached the device.
++	 * XXX, how about more new switch
++	*/
++	if ((bus == 1) && (dev != 0)) {
++		return 0;
++	}
++
++	if ((bus >= 3) && (dev != 0)) {
++		return 0;
++	}
++	return 1;
++}
++
++static inline u32 ifx_pcie_cfg_rd(int pcie_port, u32 reg)
++{
++    return IFX_REG_R32((volatile u32 *)(PCIE_CFG_PORT_TO_BASE(pcie_port) + reg));
++}
++
++static inline void ifx_pcie_cfg_wr(int pcie_port, unsigned int reg, u32 val)
++{
++    IFX_REG_W32( val, (volatile u32 *)(PCIE_CFG_PORT_TO_BASE(pcie_port) + reg));
++}
++
++static inline u32 ifx_pcie_rc_cfg_rd(int pcie_port, u32 reg)
++{
++    return IFX_REG_R32((volatile u32 *)(PCIE_RC_PORT_TO_BASE(pcie_port) + reg));
++}
++
++static inline void ifx_pcie_rc_cfg_wr(int pcie_port, unsigned int reg, u32 val)
++{
++	IFX_REG_W32(val, (volatile u32 *)(PCIE_RC_PORT_TO_BASE(pcie_port) + reg));
++}
++
++u32 ifx_pcie_bus_enum_read_hack(int where, u32 value)
++{
++	u32 tvalue = value;
++
++	if (where == PCI_PRIMARY_BUS) {
++		u8 primary, secondary, subordinate;
++
++		primary = tvalue & 0xFF;
++		secondary = (tvalue >> 8) & 0xFF;
++		subordinate = (tvalue >> 16) & 0xFF;
++		primary += pcibios_1st_host_bus_nr();
++		secondary += pcibios_1st_host_bus_nr();
++		subordinate += pcibios_1st_host_bus_nr();
++		tvalue = (tvalue & 0xFF000000) | (u32)primary | (u32)(secondary << 8) | (u32)(subordinate << 16);
++	}
++	return tvalue;
++}
++
++u32 ifx_pcie_bus_enum_write_hack(int where, u32 value)
++{
++    u32 tvalue = value;
++
++    if (where == PCI_PRIMARY_BUS) {
++        u8 primary, secondary, subordinate;
++
++        primary = tvalue & 0xFF;
++        secondary = (tvalue >> 8) & 0xFF;
++        subordinate = (tvalue >> 16) & 0xFF;
++        if (primary > 0 && primary != 0xFF) {
++            primary -= pcibios_1st_host_bus_nr();
++        }
++
++        if (secondary > 0 && secondary != 0xFF) {
++            secondary -= pcibios_1st_host_bus_nr();
++        }
++        if (subordinate > 0 && subordinate != 0xFF) {
++            subordinate -= pcibios_1st_host_bus_nr();
++        }
++        tvalue = (tvalue & 0xFF000000) | (u32)primary | (u32)(secondary << 8) | (u32)(subordinate << 16);
++    }
++    else if (where == PCI_SUBORDINATE_BUS) {
++        u8 subordinate = tvalue & 0xFF;
++
++        subordinate = subordinate > 0 ? subordinate - pcibios_1st_host_bus_nr() : 0;
++        tvalue = subordinate;
++    }
++    return tvalue;
++}
++
++static int ifx_pcie_read_config(struct pci_bus *bus, u32 devfn,
++				int where, int size, u32 *value)
++{
++    u32 data = 0;
++    int bus_number = bus->number;
++    static const u32 mask[8] = {0, 0xff, 0xffff, 0, 0xffffffff, 0, 0, 0};
++    int ret = PCIBIOS_SUCCESSFUL;
++    struct ifx_pci_controller *ctrl = bus->sysdata;
++    int pcie_port = ctrl->port;
++
++    if (unlikely(size != 1 && size != 2 && size != 4)){
++        ret = PCIBIOS_BAD_REGISTER_NUMBER;
++        goto out;
++    }
++
++    /* Make sure the address is aligned to natural boundary */
++    if (unlikely(((size - 1) & where))) {
++        ret = PCIBIOS_BAD_REGISTER_NUMBER;
++        goto out;
++    }
++
++    /* 
++     * If we are second controller, we have to cheat OS so that it assume 
++     * its bus number starts from 0 in host controller
++     */
++    bus_number = ifx_pcie_bus_nr_deduct(bus_number, pcie_port);
++
++    /* 
++     * We need to force the bus number to be zero on the root 
++     * bus. Linux numbers the 2nd root bus to start after all 
++     * busses on root 0. 
++     */ 
++    if (bus->parent == NULL) {
++        bus_number = 0; 
++    }
++
++    /* 
++     * PCIe only has a single device connected to it. It is 
++     * always device ID 0. Don't bother doing reads for other 
++     * device IDs on the first segment. 
++     */ 
++    if ((bus_number == 0) && (PCI_SLOT(devfn) != 0)) {
++        ret = PCIBIOS_FUNC_NOT_SUPPORTED;
++        goto out; 
++    }
++
++    if (pcie_valid_config(pcie_port, bus_number, PCI_SLOT(devfn)) == 0) {
++        *value = 0xffffffff;
++        ret = PCIBIOS_DEVICE_NOT_FOUND;
++        goto out;
++    }
++
++    PCIE_IRQ_LOCK(ifx_pcie_lock);
++    if (bus_number == 0) { /* RC itself */
++        u32 t;
++
++        t = (where & ~3);
++        data = ifx_pcie_rc_cfg_rd(pcie_port, t);
++    } else {
++        u32 addr = pcie_bus_addr(bus_number, devfn, where);
++
++        data = ifx_pcie_cfg_rd(pcie_port, addr);
++    #ifdef CONFIG_IFX_PCIE_HW_SWAP
++            data = le32_to_cpu(data);
++    #endif /* CONFIG_IFX_PCIE_HW_SWAP */
++    }
++    /* To get a correct PCI topology, we have to restore the bus number to OS */
++    data = ifx_pcie_bus_enum_hack(bus, devfn, where, data, pcie_port, 1);
++
++    PCIE_IRQ_UNLOCK(ifx_pcie_lock);
++
++    *value = (data >> (8 * (where & 3))) & mask[size & 7];
++out:
++    return ret;
++}
++
++static u32 ifx_pcie_size_to_value(int where, int size, u32 data, u32 value)
++{
++	u32 shift;
++	u32 tdata = data;
++
++	switch (size) {
++	case 1:
++		shift = (where & 0x3) << 3;
++		tdata &= ~(0xffU << shift);
++		tdata |= ((value & 0xffU) << shift);
++		break;
++	case 2:
++		shift = (where & 3) << 3;
++		tdata &= ~(0xffffU << shift);
++		tdata |= ((value & 0xffffU) << shift);
++		break;
++	case 4:
++		tdata = value;
++		break;
++	}
++	return tdata;
++}
++
++static int ifx_pcie_write_config(struct pci_bus *bus, u32 devfn,
++				int where, int size, u32 value)
++{
++	int bus_number = bus->number;
++	int ret = PCIBIOS_SUCCESSFUL;
++	struct ifx_pci_controller *ctrl = bus->sysdata;
++	int pcie_port = ctrl->port;
++	u32 tvalue = value;
++	u32 data;
++
++	/* Make sure the address is aligned to natural boundary */
++	if (unlikely(((size - 1) & where))) {
++		ret = PCIBIOS_BAD_REGISTER_NUMBER;
++		goto out;
++	}
++	/* 
++	* If we are second controller, we have to cheat OS so that it assume 
++	* its bus number starts from 0 in host controller
++	*/
++	bus_number = ifx_pcie_bus_nr_deduct(bus_number, pcie_port);
++
++	/* 
++	* We need to force the bus number to be zero on the root 
++	* bus. Linux numbers the 2nd root bus to start after all 
++	* busses on root 0. 
++	*/ 
++	if (bus->parent == NULL) {
++		bus_number = 0; 
++	}
++
++	if (pcie_valid_config(pcie_port, bus_number, PCI_SLOT(devfn)) == 0) {
++		ret = PCIBIOS_DEVICE_NOT_FOUND;
++		goto out;
++	}
++
++	/* XXX, some PCIe device may need some delay */
++	PCIE_IRQ_LOCK(ifx_pcie_lock);
++
++	/* 
++	* To configure the correct bus topology using native way, we have to cheat Os so that
++	* it can configure the PCIe hardware correctly.
++	*/
++	tvalue = ifx_pcie_bus_enum_hack(bus, devfn, where, value, pcie_port, 0);
++
++	if (bus_number == 0) { /* RC itself */
++		u32 t;
++
++		t = (where & ~3);
++		data = ifx_pcie_rc_cfg_rd(pcie_port, t);
++
++		data = ifx_pcie_size_to_value(where, size, data, tvalue);
++
++		ifx_pcie_rc_cfg_wr(pcie_port, t, data);
++	} else {
++		u32 addr = pcie_bus_addr(bus_number, devfn, where);
++
++		data = ifx_pcie_cfg_rd(pcie_port, addr);
++#ifdef CONFIG_IFX_PCIE_HW_SWAP
++		data = le32_to_cpu(data);
++#endif
++
++		data = ifx_pcie_size_to_value(where, size, data, tvalue);
++#ifdef CONFIG_IFX_PCIE_HW_SWAP
++		data = cpu_to_le32(data);
++#endif
++		ifx_pcie_cfg_wr(pcie_port, addr, data);
++	}
++	PCIE_IRQ_UNLOCK(ifx_pcie_lock);
++out:
++	return ret;
++}
++
++static struct resource ifx_pcie_io_resource = {
++	.name	= "PCIe0 I/O space",
++	.start	= PCIE_IO_PHY_BASE,
++	.end	= PCIE_IO_PHY_END,
++	.flags	= IORESOURCE_IO,
++};
++
++static struct resource ifx_pcie_mem_resource = {
++	.name	= "PCIe0 Memory space",
++	.start	= PCIE_MEM_PHY_BASE,
++	.end	= PCIE_MEM_PHY_END,
++	.flags	= IORESOURCE_MEM,
++};
++
++static struct pci_ops ifx_pcie_ops = {
++	.read	= ifx_pcie_read_config,
++	.write	= ifx_pcie_write_config,
++};
++
++static struct ifx_pci_controller ifx_pcie_controller[IFX_PCIE_CORE_NR] = {
++    {
++        .pcic = {
++            .pci_ops      = &ifx_pcie_ops,
++            .mem_resource = &ifx_pcie_mem_resource,
++            .io_resource  = &ifx_pcie_io_resource,
++         },
++         .port = IFX_PCIE_PORT0,
++    },
++};
++
++#ifdef IFX_PCIE_ERROR_INT
++
++static irqreturn_t pcie_rc_core_isr(int irq, void *dev_id)
++{
++	struct ifx_pci_controller *ctrl = (struct ifx_pci_controller *)dev_id;
++	int pcie_port = ctrl->port;
++	u32 reg;
++
++	pr_debug("PCIe RC error intr %d\n", irq);
++	reg = IFX_REG_R32(PCIE_IRNCR(pcie_port));
++	reg &= PCIE_RC_CORE_COMBINED_INT;
++	IFX_REG_W32(reg, PCIE_IRNCR(pcie_port));
++
++	return IRQ_HANDLED;
++}
++
++static int
++pcie_rc_core_int_init(int pcie_port)
++{
++	int ret;
++
++	/* Enable core interrupt */
++	IFX_REG_SET_BIT(PCIE_RC_CORE_COMBINED_INT, PCIE_IRNEN(pcie_port));
++
++	/* Clear it first */
++	IFX_REG_SET_BIT(PCIE_RC_CORE_COMBINED_INT, PCIE_IRNCR(pcie_port));
++	ret = request_irq(pcie_irqs[pcie_port].ir_irq.irq, pcie_rc_core_isr, 0,
++		pcie_irqs[pcie_port].ir_irq.name, &ifx_pcie_controller[pcie_port]);
++	if (ret)
++		printk(KERN_ERR "%s request irq %d failed\n", __func__, IFX_PCIE_IR);
++
++	return ret;
++}
++#endif
++
++int ifx_pcie_bios_map_irq(IFX_PCI_CONST struct pci_dev *dev, u8 slot, u8 pin)
++{
++	u32 irq_bit = 0;
++	int irq = 0;
++	struct ifx_pci_controller *ctrl = dev->bus->sysdata;
++	int pcie_port = ctrl->port;
++
++	printk("%s port %d dev %s slot %d pin %d \n", __func__, pcie_port, pci_name(dev), slot, pin);
++
++	if ((pin == PCIE_LEGACY_DISABLE) || (pin > PCIE_LEGACY_INT_MAX)) {
++		printk(KERN_WARNING "WARNING: dev %s: invalid interrupt pin %d\n", pci_name(dev), pin);
++		return -1;
++	}
++
++	/* Pin index so minus one */
++	irq_bit = pcie_irqs[pcie_port].legacy_irq[pin - 1].irq_bit;
++	irq = pcie_irqs[pcie_port].legacy_irq[pin - 1].irq;
++	IFX_REG_SET_BIT(irq_bit, PCIE_IRNEN(pcie_port));
++	IFX_REG_SET_BIT(irq_bit, PCIE_IRNCR(pcie_port));
++	printk("%s dev %s irq %d assigned\n", __func__, pci_name(dev), irq);
++	return irq;
++}
++
++int  ifx_pcie_bios_plat_dev_init(struct pci_dev *dev)
++{
++    u16 config;
++#ifdef IFX_PCIE_ERROR_INT
++    u32 dconfig; 
++    int pos;
++#endif
++
++    /* Enable reporting System errors and parity errors on all devices */ 
++    /* Enable parity checking and error reporting */ 
++    pci_read_config_word(dev, PCI_COMMAND, &config);
++    config |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR /*| PCI_COMMAND_INVALIDATE |
++          PCI_COMMAND_FAST_BACK*/;
++    pci_write_config_word(dev, PCI_COMMAND, config);
++
++    if (dev->subordinate) {
++        /* Set latency timers on sub bridges */
++        pci_write_config_byte(dev, PCI_SEC_LATENCY_TIMER, 0x40); /* XXX, */
++        /* More bridge error detection */
++        pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &config);
++        config |= PCI_BRIDGE_CTL_PARITY | PCI_BRIDGE_CTL_SERR;
++        pci_write_config_word(dev, PCI_BRIDGE_CONTROL, config);
++    }
++#ifdef IFX_PCIE_ERROR_INT
++    /* Enable the PCIe normal error reporting */
++    pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
++    if (pos) {
++
++        /* Disable system error generation in response to error messages */
++        pci_read_config_word(dev, pos + PCI_EXP_RTCTL, &config);
++        config &= ~(PCI_EXP_RTCTL_SECEE | PCI_EXP_RTCTL_SENFEE | PCI_EXP_RTCTL_SEFEE);
++        pci_write_config_word(dev, pos + PCI_EXP_RTCTL, config);
++
++        /* Clear PCIE Capability's Device Status */
++        pci_read_config_word(dev, pos + PCI_EXP_DEVSTA, &config);
++        pci_write_config_word(dev, pos + PCI_EXP_DEVSTA, config);
++
++        /* Update Device Control */ 
++        pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &config);
++        /* Correctable Error Reporting */
++        config |= PCI_EXP_DEVCTL_CERE;
++        /* Non-Fatal Error Reporting */
++        config |= PCI_EXP_DEVCTL_NFERE;
++        /* Fatal Error Reporting */
++        config |= PCI_EXP_DEVCTL_FERE;
++        /* Unsupported Request */
++        config |= PCI_EXP_DEVCTL_URRE;
++        pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, config);
++    }
++
++    /* Find the Advanced Error Reporting capability */
++    pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
++    if (pos) {
++        /* Clear Uncorrectable Error Status */ 
++        pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &dconfig);
++        pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, dconfig);
++        /* Enable reporting of all uncorrectable errors */
++        /* Uncorrectable Error Mask - turned on bits disable errors */
++        pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, 0);
++        /* 
++        * Leave severity at HW default. This only controls if 
++        * errors are reported as uncorrectable or 
++        * correctable, not if the error is reported. 
++        */ 
++        /* PCI_ERR_UNCOR_SEVER - Uncorrectable Error Severity */
++        /* Clear Correctable Error Status */
++        pci_read_config_dword(dev, pos + PCI_ERR_COR_STATUS, &dconfig);
++        pci_write_config_dword(dev, pos + PCI_ERR_COR_STATUS, dconfig);
++        /* Enable reporting of all correctable errors */
++        /* Correctable Error Mask - turned on bits disable errors */
++        pci_write_config_dword(dev, pos + PCI_ERR_COR_MASK, 0);
++        /* Advanced Error Capabilities */ 
++        pci_read_config_dword(dev, pos + PCI_ERR_CAP, &dconfig);
++        /* ECRC Generation Enable */
++        if (dconfig & PCI_ERR_CAP_ECRC_GENC) {
++            dconfig |= PCI_ERR_CAP_ECRC_GENE;
++        }
++        /* ECRC Check Enable */
++        if (dconfig & PCI_ERR_CAP_ECRC_CHKC) {
++            dconfig |= PCI_ERR_CAP_ECRC_CHKE;
++        }
++        pci_write_config_dword(dev, pos + PCI_ERR_CAP, dconfig);
++
++        /* PCI_ERR_HEADER_LOG - Header Log Register (16 bytes) */
++        /* Enable Root Port's interrupt in response to error messages */
++        pci_write_config_dword(dev, pos + PCI_ERR_ROOT_COMMAND,
++              PCI_ERR_ROOT_CMD_COR_EN |
++              PCI_ERR_ROOT_CMD_NONFATAL_EN |
++              PCI_ERR_ROOT_CMD_FATAL_EN); 
++        /* Clear the Root status register */
++        pci_read_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, &dconfig);
++        pci_write_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, dconfig);
++    }
++#endif /* IFX_PCIE_ERROR_INT */
++    /* WAR, only 128 MRRS is supported, force all EPs to support this value */
++    pcie_set_readrq(dev, 128);
++    return 0;
++}
++
++static int
++pcie_rc_initialize(int pcie_port)
++{
++	int i;
++#define IFX_PCIE_PHY_LOOP_CNT  5
++
++	pcie_rcu_endian_setup(pcie_port);
++
++	pcie_ep_gpio_rst_init(pcie_port);
++
++	/* 
++	* XXX, PCIe elastic buffer bug will cause not to be detected. One more 
++	* reset PCIe PHY will solve this issue 
++	*/
++	for (i = 0; i < IFX_PCIE_PHY_LOOP_CNT; i++) {
++		/* Disable PCIe PHY Analog part for sanity check */
++		pcie_phy_pmu_disable(pcie_port);
++
++		pcie_phy_rst_assert(pcie_port);
++		pcie_phy_rst_deassert(pcie_port);
++
++		/* Make sure PHY PLL is stable */
++		udelay(20);
++
++		/* PCIe Core reset enabled, low active, sw programmed */
++		pcie_core_rst_assert(pcie_port);
++
++		/* Put PCIe EP in reset status */
++		pcie_device_rst_assert(pcie_port);
++
++		/* PCI PHY & Core reset disabled, high active, sw programmed */
++		pcie_core_rst_deassert(pcie_port);
++
++		/* Already in a quiet state, program PLL, enable PHY, check ready bit */
++		pcie_phy_clock_mode_setup(pcie_port);
++
++		/* Enable PCIe PHY and Clock */
++		pcie_core_pmu_setup(pcie_port);
++
++		/* Clear status registers */
++		pcie_status_register_clear(pcie_port);
++
++#ifdef CONFIG_PCI_MSI
++		pcie_msi_init(pcie_port);
++#endif /* CONFIG_PCI_MSI */
++		pcie_rc_cfg_reg_setup(pcie_port);
++
++		/* Once link is up, break out */
++		if (pcie_app_loigc_setup(pcie_port) == 0)
++			break;
++	}
++	if (i >= IFX_PCIE_PHY_LOOP_CNT) {
++		printk(KERN_ERR "%s link up failed!!!!!\n", __func__);
++		return -EIO;
++	}
++	/* NB, don't increase ACK/NACK timer timeout value, which will cause a lot of COR errors */
++	pcie_replay_time_update(pcie_port);
++	return 0;
++}
++
++static int __init ifx_pcie_bios_init(void)
++{
++    void __iomem *io_map_base;
++    int pcie_port;
++    int startup_port;
++
++    /* Enable AHB Master/ Slave */
++    pcie_ahb_pmu_setup();
++
++    startup_port = IFX_PCIE_PORT0;
++    
++    for (pcie_port = startup_port; pcie_port < IFX_PCIE_CORE_NR; pcie_port++){
++	if (pcie_rc_initialize(pcie_port) == 0) {
++	    IFX_PCIE_PRINT(PCIE_MSG_INIT, "%s: ifx_pcie_cfg_base 0x%p\n", 
++                 __func__, PCIE_CFG_PORT_TO_BASE(pcie_port));
++            /* Otherwise, warning will pop up */
++            io_map_base = ioremap(PCIE_IO_PHY_PORT_TO_BASE(pcie_port), PCIE_IO_SIZE);
++            if (io_map_base == NULL) {
++                IFX_PCIE_PRINT(PCIE_MSG_ERR, "%s io space ioremap failed\n", __func__);
++                return -ENOMEM;
++            }
++            ifx_pcie_controller[pcie_port].pcic.io_map_base = (unsigned long)io_map_base;
++
++            register_pci_controller(&ifx_pcie_controller[pcie_port].pcic);
++            /* XXX, clear error status */
++
++            IFX_PCIE_PRINT(PCIE_MSG_INIT, "%s: mem_resource 0x%p, io_resource 0x%p\n", 
++                              __func__, &ifx_pcie_controller[pcie_port].pcic.mem_resource, 
++                              &ifx_pcie_controller[pcie_port].pcic.io_resource);
++
++        #ifdef IFX_PCIE_ERROR_INT
++            pcie_rc_core_int_init(pcie_port);
++        #endif /* IFX_PCIE_ERROR_INT */
++        }
++    }
++
++    return 0;
++}
++arch_initcall(ifx_pcie_bios_init);
++
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR("Chuanhua.Lei at infineon.com");
++MODULE_SUPPORTED_DEVICE("Infineon builtin PCIe RC module");
++MODULE_DESCRIPTION("Infineon builtin PCIe RC driver");
++
+--- /dev/null
++++ b/arch/mips/pci/ifxmips_pcie.h
+@@ -0,0 +1,135 @@
++/******************************************************************************
++**
++** FILE NAME    : ifxmips_pcie.h
++** PROJECT      : IFX UEIP for VRX200
++** MODULES      : PCIe module
++**
++** DATE         : 02 Mar 2009
++** AUTHOR       : Lei Chuanhua
++** DESCRIPTION  : PCIe Root Complex Driver
++** COPYRIGHT    :       Copyright (c) 2009
++**                      Infineon Technologies AG
++**                      Am Campeon 1-12, 85579 Neubiberg, Germany
++**
++**    This program is free software; you can redistribute it and/or modify
++**    it under the terms of the GNU General Public License as published by
++**    the Free Software Foundation; either version 2 of the License, or
++**    (at your option) any later version.
++** HISTORY
++** $Version $Date        $Author         $Comment
++** 0.0.1    17 Mar,2009  Lei Chuanhua    Initial version
++*******************************************************************************/
++#ifndef IFXMIPS_PCIE_H
++#define IFXMIPS_PCIE_H
++#include <linux/version.h>
++#include <linux/types.h>
++#include <linux/pci.h>
++#include <linux/interrupt.h>
++#include "ifxmips_pci_common.h"
++#include "ifxmips_pcie_reg.h"
++
++/*!
++ \defgroup IFX_PCIE  PCI Express bus driver module   
++ \brief  PCI Express IP module support VRX200 
++*/
++
++/*!
++ \defgroup IFX_PCIE_OS OS APIs
++ \ingroup IFX_PCIE
++ \brief PCIe bus driver OS interface functions
++*/
++
++/*!
++ \file ifxmips_pcie.h
++ \ingroup IFX_PCIE  
++ \brief header file for PCIe module common header file
++*/
++#define PCIE_IRQ_LOCK(lock) do {             \
++    unsigned long flags;                     \
++    spin_lock_irqsave(&(lock), flags);
++#define PCIE_IRQ_UNLOCK(lock)                \
++    spin_unlock_irqrestore(&(lock), flags);  \
++} while (0)
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
++#define IRQF_SHARED SA_SHIRQ
++#endif
++
++#define PCIE_MSG_MSI        0x00000001
++#define PCIE_MSG_ISR        0x00000002
++#define PCIE_MSG_FIXUP      0x00000004
++#define PCIE_MSG_READ_CFG   0x00000008
++#define PCIE_MSG_WRITE_CFG  0x00000010
++#define PCIE_MSG_CFG        (PCIE_MSG_READ_CFG | PCIE_MSG_WRITE_CFG)
++#define PCIE_MSG_REG        0x00000020
++#define PCIE_MSG_INIT       0x00000040
++#define PCIE_MSG_ERR        0x00000080
++#define PCIE_MSG_PHY        0x00000100
++#define PCIE_MSG_ANY        0x000001ff
++
++#define IFX_PCIE_PORT0      0
++#define IFX_PCIE_PORT1      1
++
++#ifdef CONFIG_IFX_PCIE_2ND_CORE
++#define IFX_PCIE_CORE_NR    2
++#else
++#define IFX_PCIE_CORE_NR    1
++#endif
++
++#define IFX_PCIE_ERROR_INT
++
++//#define IFX_PCIE_DBG
++
++#if defined(IFX_PCIE_DBG)
++#define IFX_PCIE_PRINT(_m, _fmt, args...) do {   \
++        ifx_pcie_debug((_fmt), ##args);          \
++} while (0)
++
++#define INLINE 
++#else
++#define IFX_PCIE_PRINT(_m, _fmt, args...)   \
++    do {} while(0)
++#define INLINE inline
++#endif
++
++struct ifx_pci_controller {
++	struct pci_controller   pcic;
++    
++	/* RC specific, per host bus information */
++	u32   port;  /* Port index, 0 -- 1st core, 1 -- 2nd core */
++};
++
++typedef struct ifx_pcie_ir_irq {
++    const unsigned int irq;
++    const char name[16];
++}ifx_pcie_ir_irq_t;
++
++typedef struct ifx_pcie_legacy_irq{
++    const u32 irq_bit;
++    const int irq;
++}ifx_pcie_legacy_irq_t;
++
++typedef struct ifx_pcie_irq {
++    ifx_pcie_ir_irq_t ir_irq;
++    ifx_pcie_legacy_irq_t legacy_irq[PCIE_LEGACY_INT_MAX];
++}ifx_pcie_irq_t;
++
++extern u32 g_pcie_debug_flag;
++extern void ifx_pcie_debug(const char *fmt, ...);
++extern void pcie_phy_clock_mode_setup(int pcie_port);
++extern void pcie_msi_pic_init(int pcie_port);
++extern u32 ifx_pcie_bus_enum_read_hack(int where, u32 value);
++extern u32 ifx_pcie_bus_enum_write_hack(int where, u32 value);
++
++#define CONFIG_VR9
++
++#ifdef CONFIG_VR9
++#include "ifxmips_pcie_vr9.h"
++#elif defined (CONFIG_AR10)
++#include "ifxmips_pcie_ar10.h"
++#else
++#error "PCIE: platform not defined"
++#endif /* CONFIG_VR9 */
++
++#endif  /* IFXMIPS_PCIE_H */
++
+--- /dev/null
++++ b/arch/mips/pci/ifxmips_pcie_ar10.h
+@@ -0,0 +1,290 @@
++/****************************************************************************
++                              Copyright (c) 2010
++                            Lantiq Deutschland GmbH
++                     Am Campeon 3; 85579 Neubiberg, Germany
++
++  For licensing information, see the file 'LICENSE' in the root folder of
++  this software module.
++
++ *****************************************************************************/
++/*!
++  \file ifxmips_pcie_ar10.h
++  \ingroup IFX_PCIE
++  \brief PCIe RC driver ar10 specific file
++*/
++
++#ifndef IFXMIPS_PCIE_AR10_H
++#define IFXMIPS_PCIE_AR10_H
++#ifndef AUTOCONF_INCLUDED
++#include <linux/config.h>
++#endif /* AUTOCONF_INCLUDED */
++#include <linux/types.h>
++#include <linux/delay.h>
++
++/* Project header file */
++#include <asm/ifx/ifx_types.h>
++#include <asm/ifx/ifx_pmu.h>
++#include <asm/ifx/ifx_gpio.h>
++#include <asm/ifx/ifx_ebu_led.h>
++
++static inline void pcie_ep_gpio_rst_init(int pcie_port)
++{
++    ifx_ebu_led_enable();
++    if (pcie_port == 0) {
++        ifx_ebu_led_set_data(11, 1);        
++    }
++    else {
++        ifx_ebu_led_set_data(12, 1);  
++    }
++}
++
++static inline void pcie_ahb_pmu_setup(void) 
++{
++    /* XXX, moved to CGU to control AHBM */
++}
++
++static inline void pcie_rcu_endian_setup(int pcie_port)
++{
++    u32 reg;
++
++    reg = IFX_REG_R32(IFX_RCU_AHB_ENDIAN);
++    /* Inbound, big endian */
++    reg |= IFX_RCU_BE_AHB4S;
++    if (pcie_port == 0) {
++        reg |= IFX_RCU_BE_PCIE0M;
++
++    #ifdef CONFIG_IFX_PCIE_HW_SWAP
++        /* Outbound, software swap needed */
++        reg |= IFX_RCU_BE_AHB3M;
++        reg &= ~IFX_RCU_BE_PCIE0S;
++    #else
++        /* Outbound little endian  */
++        reg &= ~IFX_RCU_BE_AHB3M;
++        reg &= ~IFX_RCU_BE_PCIE0S;
++    #endif
++    }
++    else {
++        reg |= IFX_RCU_BE_PCIE1M;
++    #ifdef CONFIG_IFX_PCIE1_HW_SWAP
++        /* Outbound, software swap needed */
++        reg |= IFX_RCU_BE_AHB3M;
++        reg &= ~IFX_RCU_BE_PCIE1S;
++    #else
++        /* Outbound little endian  */
++        reg &= ~IFX_RCU_BE_AHB3M;
++        reg &= ~IFX_RCU_BE_PCIE1S;
++    #endif
++    }
++
++    IFX_REG_W32(reg, IFX_RCU_AHB_ENDIAN);
++    IFX_PCIE_PRINT(PCIE_MSG_REG, "%s IFX_RCU_AHB_ENDIAN: 0x%08x\n", __func__, IFX_REG_R32(IFX_RCU_AHB_ENDIAN));
++}
++
++static inline void pcie_phy_pmu_enable(int pcie_port)
++{
++    if (pcie_port == 0) { /* XXX, should use macro*/
++        PCIE0_PHY_PMU_SETUP(IFX_PMU_ENABLE);
++    }
++    else {
++        PCIE1_PHY_PMU_SETUP(IFX_PMU_ENABLE);
++    }
++}
++
++static inline void pcie_phy_pmu_disable(int pcie_port)
++{
++    if (pcie_port == 0) { /* XXX, should use macro*/
++        PCIE0_PHY_PMU_SETUP(IFX_PMU_DISABLE);
++    }
++    else {
++        PCIE1_PHY_PMU_SETUP(IFX_PMU_DISABLE);
++    }
++}
++
++static inline void pcie_pdi_big_endian(int pcie_port)
++{
++    u32 reg;
++
++    reg = IFX_REG_R32(IFX_RCU_AHB_ENDIAN);
++    if (pcie_port == 0) {
++        /* Config AHB->PCIe and PDI endianness */
++        reg |= IFX_RCU_BE_PCIE0_PDI;
++    }
++    else {
++        /* Config AHB->PCIe and PDI endianness */
++        reg |= IFX_RCU_BE_PCIE1_PDI;
++    }
++    IFX_REG_W32(reg, IFX_RCU_AHB_ENDIAN);
++}
++
++static inline void pcie_pdi_pmu_enable(int pcie_port)
++{
++    if (pcie_port == 0) {
++        /* Enable PDI to access PCIe PHY register */
++        PDI0_PMU_SETUP(IFX_PMU_ENABLE);
++    }
++    else {
++        PDI1_PMU_SETUP(IFX_PMU_ENABLE);
++    }
++}
++
++static inline void pcie_core_rst_assert(int pcie_port)
++{
++    u32 reg;
++
++    reg = IFX_REG_R32(IFX_RCU_RST_REQ);
++
++    /* Reset Core, bit 22 */
++    if (pcie_port == 0) {
++        reg |= 0x00400000;
++    }
++    else {
++        reg |= 0x08000000; /* Bit 27 */
++    }
++    IFX_REG_W32(reg, IFX_RCU_RST_REQ);
++}
++
++static inline void pcie_core_rst_deassert(int pcie_port)
++{
++    u32 reg;
++
++    /* Make sure one micro-second delay */
++    udelay(1);
++
++    reg = IFX_REG_R32(IFX_RCU_RST_REQ);
++    if (pcie_port == 0) {
++        reg &= ~0x00400000; /* bit 22 */
++    }
++    else {
++        reg &= ~0x08000000; /* Bit 27 */
++    }
++    IFX_REG_W32(reg, IFX_RCU_RST_REQ);
++}
++
++static inline void pcie_phy_rst_assert(int pcie_port)
++{
++    u32 reg;
++
++    reg = IFX_REG_R32(IFX_RCU_RST_REQ);
++    if (pcie_port == 0) {
++        reg |= 0x00001000; /* Bit 12 */
++    }
++    else {
++        reg |= 0x00002000; /* Bit 13 */
++    }
++    IFX_REG_W32(reg, IFX_RCU_RST_REQ);
++}
++
++static inline void pcie_phy_rst_deassert(int pcie_port)
++{
++    u32 reg;
++
++    /* Make sure one micro-second delay */
++    udelay(1);
++
++    reg = IFX_REG_R32(IFX_RCU_RST_REQ);
++    if (pcie_port == 0) {
++        reg &= ~0x00001000; /* Bit 12 */
++    }
++    else {
++        reg &= ~0x00002000; /* Bit 13 */
++    }
++    IFX_REG_W32(reg, IFX_RCU_RST_REQ);
++}
++
++static inline void pcie_device_rst_assert(int pcie_port)
++{
++    if (pcie_port == 0) {
++        ifx_ebu_led_set_data(11, 0);
++    }
++    else {
++        ifx_ebu_led_set_data(12, 0);
++    }
++}
++
++static inline void pcie_device_rst_deassert(int pcie_port)
++{
++    mdelay(100);
++    if (pcie_port == 0) {
++        ifx_ebu_led_set_data(11, 1);
++    }
++    else {
++        ifx_ebu_led_set_data(12, 1);
++    }
++    ifx_ebu_led_disable();
++}
++
++static inline void pcie_core_pmu_setup(int pcie_port)
++{
++    if (pcie_port == 0) {
++        PCIE0_CTRL_PMU_SETUP(IFX_PMU_ENABLE);
++    }
++    else {
++        PCIE1_CTRL_PMU_SETUP(IFX_PMU_ENABLE); 
++    }
++}
++
++static inline void pcie_msi_init(int pcie_port)
++{
++    pcie_msi_pic_init(pcie_port);
++    if (pcie_port == 0) {
++        MSI0_PMU_SETUP(IFX_PMU_ENABLE);
++    }
++    else {
++        MSI1_PMU_SETUP(IFX_PMU_ENABLE);
++    }
++}
++
++static inline u32
++ifx_pcie_bus_nr_deduct(u32 bus_number, int pcie_port)
++{
++    u32 tbus_number = bus_number;
++
++#ifdef CONFIG_IFX_PCIE_2ND_CORE
++    if (pcie_port == IFX_PCIE_PORT1) { /* Port 1 must check if there are two cores enabled */
++        if (pcibios_host_nr() > 1) {
++            tbus_number -= pcibios_1st_host_bus_nr();
++        }        
++    }
++#endif /* CONFIG_IFX_PCI */
++    return tbus_number;
++}
++
++static inline u32
++ifx_pcie_bus_enum_hack(struct pci_bus *bus, u32 devfn, int where, u32 value, int pcie_port, int read)
++{
++    struct pci_dev *pdev;
++    u32 tvalue = value;
++
++    /* Sanity check */
++    pdev = pci_get_slot(bus, devfn);
++    if (pdev == NULL) {
++        return tvalue;
++    }
++
++    /* Only care about PCI bridge */
++    if (pdev->hdr_type != PCI_HEADER_TYPE_BRIDGE) {
++        return tvalue;
++    }
++
++    if (read) { /* Read hack */
++    #ifdef CONFIG_IFX_PCIE_2ND_CORE
++        if (pcie_port == IFX_PCIE_PORT1) { /* Port 1 must check if there are two cores enabled */
++            if (pcibios_host_nr() > 1) {
++                tvalue = ifx_pcie_bus_enum_read_hack(where, tvalue);
++            }
++        }
++    #endif /* CONFIG_IFX_PCIE_2ND_CORE */
++    }
++    else { /* Write hack */
++    #ifdef CONFIG_IFX_PCIE_2ND_CORE
++        if (pcie_port == IFX_PCIE_PORT1) { /* Port 1 must check if there are two cores enabled */
++            if (pcibios_host_nr() > 1) {
++                tvalue = ifx_pcie_bus_enum_write_hack(where, tvalue);
++            }
++        }
++    #endif
++    }
++    return tvalue;
++}
++
++#endif /* IFXMIPS_PCIE_AR10_H */
+--- /dev/null
++++ b/arch/mips/pci/ifxmips_pcie_msi.c
+@@ -0,0 +1,392 @@
++/******************************************************************************
++**
++** FILE NAME    : ifxmips_pcie_msi.c
++** PROJECT      : IFX UEIP for VRX200
++** MODULES      : PCI MSI sub module
++**
++** DATE         : 02 Mar 2009
++** AUTHOR       : Lei Chuanhua
++** DESCRIPTION  : PCIe MSI Driver
++** COPYRIGHT    :       Copyright (c) 2009
++**                      Infineon Technologies AG
++**                      Am Campeon 1-12, 85579 Neubiberg, Germany
++**
++**    This program is free software; you can redistribute it and/or modify
++**    it under the terms of the GNU General Public License as published by
++**    the Free Software Foundation; either version 2 of the License, or
++**    (at your option) any later version.
++** HISTORY
++** $Date        $Author         $Comment
++** 02 Mar,2009  Lei Chuanhua    Initial version
++*******************************************************************************/
++/*!
++ \defgroup IFX_PCIE_MSI MSI OS APIs
++ \ingroup IFX_PCIE
++ \brief PCIe bus driver OS interface functions
++*/
++
++/*!
++ \file ifxmips_pcie_msi.c
++ \ingroup IFX_PCIE 
++ \brief PCIe MSI OS interface file
++*/
++
++#ifndef AUTOCONF_INCLUDED
++#include <linux/config.h>
++#endif /* AUTOCONF_INCLUDED */
++#include <linux/init.h>
++#include <linux/sched.h>
++#include <linux/slab.h>
++#include <linux/interrupt.h>
++#include <linux/kernel_stat.h>
++#include <linux/pci.h>
++#include <linux/msi.h>
++#include <linux/module.h>
++#include <asm/bootinfo.h>
++#include <asm/irq.h>
++#include <asm/traps.h>
++
++#include <asm/ifx/ifx_types.h>
++#include <asm/ifx/ifx_regs.h>
++#include <asm/ifx/common_routines.h>
++#include <asm/ifx/irq.h>
++
++#include "ifxmips_pcie_reg.h"
++#include "ifxmips_pcie.h"
++
++#define IFX_MSI_IRQ_NUM    16
++
++enum {
++    IFX_PCIE_MSI_IDX0 = 0,
++    IFX_PCIE_MSI_IDX1,
++    IFX_PCIE_MSI_IDX2,
++    IFX_PCIE_MSI_IDX3,
++};
++
++typedef struct ifx_msi_irq_idx {
++    const int irq;
++    const int idx;
++}ifx_msi_irq_idx_t;
++
++struct ifx_msi_pic {
++    volatile u32  pic_table[IFX_MSI_IRQ_NUM];
++    volatile u32  pic_endian;    /* 0x40  */
++};
++typedef struct ifx_msi_pic *ifx_msi_pic_t;
++
++typedef struct ifx_msi_irq {
++    const volatile ifx_msi_pic_t msi_pic_p;
++    const u32 msi_phy_base;
++    const ifx_msi_irq_idx_t msi_irq_idx[IFX_MSI_IRQ_NUM];
++    /*
++     * Each bit in msi_free_irq_bitmask represents a MSI interrupt that is 
++     * in use.
++     */
++    u16 msi_free_irq_bitmask;
++
++    /*
++     * Each bit in msi_multiple_irq_bitmask tells that the device using 
++     * this bit in msi_free_irq_bitmask is also using the next bit. This 
++     * is used so we can disable all of the MSI interrupts when a device 
++     * uses multiple.
++     */
++    u16 msi_multiple_irq_bitmask;
++}ifx_msi_irq_t;
++
++static ifx_msi_irq_t msi_irqs[IFX_PCIE_CORE_NR] = {
++    {
++        .msi_pic_p = (const volatile ifx_msi_pic_t)IFX_MSI_PIC_REG_BASE,
++        .msi_phy_base = PCIE_MSI_PHY_BASE,
++        .msi_irq_idx = {
++            {IFX_PCIE_MSI_IR0, IFX_PCIE_MSI_IDX0}, {IFX_PCIE_MSI_IR1, IFX_PCIE_MSI_IDX1},
++            {IFX_PCIE_MSI_IR2, IFX_PCIE_MSI_IDX2}, {IFX_PCIE_MSI_IR3, IFX_PCIE_MSI_IDX3},
++            {IFX_PCIE_MSI_IR0, IFX_PCIE_MSI_IDX0}, {IFX_PCIE_MSI_IR1, IFX_PCIE_MSI_IDX1},
++            {IFX_PCIE_MSI_IR2, IFX_PCIE_MSI_IDX2}, {IFX_PCIE_MSI_IR3, IFX_PCIE_MSI_IDX3},
++            {IFX_PCIE_MSI_IR0, IFX_PCIE_MSI_IDX0}, {IFX_PCIE_MSI_IR1, IFX_PCIE_MSI_IDX1},
++            {IFX_PCIE_MSI_IR2, IFX_PCIE_MSI_IDX2}, {IFX_PCIE_MSI_IR3, IFX_PCIE_MSI_IDX3},
++            {IFX_PCIE_MSI_IR0, IFX_PCIE_MSI_IDX0}, {IFX_PCIE_MSI_IR1, IFX_PCIE_MSI_IDX1},
++            {IFX_PCIE_MSI_IR2, IFX_PCIE_MSI_IDX2}, {IFX_PCIE_MSI_IR3, IFX_PCIE_MSI_IDX3},
++        },
++        .msi_free_irq_bitmask = 0,
++        .msi_multiple_irq_bitmask= 0,
++    },
++#ifdef CONFIG_IFX_PCIE_2ND_CORE
++    {
++        .msi_pic_p = (const volatile ifx_msi_pic_t)IFX_MSI1_PIC_REG_BASE,
++        .msi_phy_base = PCIE1_MSI_PHY_BASE,
++        .msi_irq_idx = {
++            {IFX_PCIE1_MSI_IR0, IFX_PCIE_MSI_IDX0}, {IFX_PCIE1_MSI_IR1, IFX_PCIE_MSI_IDX1},
++            {IFX_PCIE1_MSI_IR2, IFX_PCIE_MSI_IDX2}, {IFX_PCIE1_MSI_IR3, IFX_PCIE_MSI_IDX3},
++            {IFX_PCIE1_MSI_IR0, IFX_PCIE_MSI_IDX0}, {IFX_PCIE1_MSI_IR1, IFX_PCIE_MSI_IDX1},
++            {IFX_PCIE1_MSI_IR2, IFX_PCIE_MSI_IDX2}, {IFX_PCIE1_MSI_IR3, IFX_PCIE_MSI_IDX3},
++            {IFX_PCIE1_MSI_IR0, IFX_PCIE_MSI_IDX0}, {IFX_PCIE1_MSI_IR1, IFX_PCIE_MSI_IDX1},
++            {IFX_PCIE1_MSI_IR2, IFX_PCIE_MSI_IDX2}, {IFX_PCIE1_MSI_IR3, IFX_PCIE_MSI_IDX3},
++            {IFX_PCIE1_MSI_IR0, IFX_PCIE_MSI_IDX0}, {IFX_PCIE1_MSI_IR1, IFX_PCIE_MSI_IDX1},
++            {IFX_PCIE1_MSI_IR2, IFX_PCIE_MSI_IDX2}, {IFX_PCIE1_MSI_IR3, IFX_PCIE_MSI_IDX3},
++        },
++        .msi_free_irq_bitmask = 0,
++        .msi_multiple_irq_bitmask= 0,
++
++    },
++#endif /* CONFIG_IFX_PCIE_2ND_CORE */
++};
++
++/* 
++ * This lock controls updates to msi_free_irq_bitmask, 
++ * msi_multiple_irq_bitmask and pic register settting
++ */ 
++static DEFINE_SPINLOCK(ifx_pcie_msi_lock);
++
++void pcie_msi_pic_init(int pcie_port)
++{
++    spin_lock(&ifx_pcie_msi_lock);
++    msi_irqs[pcie_port].msi_pic_p->pic_endian = IFX_MSI_PIC_BIG_ENDIAN;
++    spin_unlock(&ifx_pcie_msi_lock);
++}
++
++/** 
++ * \fn int arch_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc)
++ * \brief Called when a driver request MSI interrupts instead of the 
++ * legacy INT A-D. This routine will allocate multiple interrupts 
++ * for MSI devices that support them. A device can override this by 
++ * programming the MSI control bits [6:4] before calling 
++ * pci_enable_msi(). 
++ * 
++ * \param[in] pdev   Device requesting MSI interrupts 
++ * \param[in] desc   MSI descriptor 
++ * 
++ * \return   -EINVAL Invalid pcie root port or invalid msi bit
++ * \return    0        OK
++ * \ingroup IFX_PCIE_MSI
++ */
++int 
++arch_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc)
++{
++    int  irq, pos;
++    u16  control;
++    int  irq_idx;
++    int  irq_step;
++    int configured_private_bits;
++    int request_private_bits;
++    struct msi_msg msg;
++    u16 search_mask;
++    struct ifx_pci_controller *ctrl = pdev->bus->sysdata;
++    int pcie_port = ctrl->port;
++
++    IFX_PCIE_PRINT(PCIE_MSG_MSI, "%s %s enter\n", __func__, pci_name(pdev));
++
++    /* XXX, skip RC MSI itself */
++    if (pdev->pcie_type == PCI_EXP_TYPE_ROOT_PORT) {
++        IFX_PCIE_PRINT(PCIE_MSG_MSI, "%s RC itself doesn't use MSI interrupt\n", __func__);
++        return -EINVAL;
++    }
++
++    /*
++     * Read the MSI config to figure out how many IRQs this device 
++     * wants.  Most devices only want 1, which will give 
++     * configured_private_bits and request_private_bits equal 0. 
++     */
++    pci_read_config_word(pdev, desc->msi_attrib.pos + PCI_MSI_FLAGS, &control);
++
++    /*
++     * If the number of private bits has been configured then use 
++     * that value instead of the requested number. This gives the 
++     * driver the chance to override the number of interrupts 
++     * before calling pci_enable_msi(). 
++     */
++    configured_private_bits = (control & PCI_MSI_FLAGS_QSIZE) >> 4; 
++    if (configured_private_bits == 0) {
++        /* Nothing is configured, so use the hardware requested size */
++        request_private_bits = (control & PCI_MSI_FLAGS_QMASK) >> 1;
++    }
++    else {
++        /*
++         * Use the number of configured bits, assuming the 
++         * driver wanted to override the hardware request 
++         * value.
++         */
++        request_private_bits = configured_private_bits;
++    }
++
++    /*
++     * The PCI 2.3 spec mandates that there are at most 32
++     * interrupts. If this device asks for more, only give it one.
++     */
++    if (request_private_bits > 5) {
++        request_private_bits = 0;
++    }
++again:
++    /*
++     * The IRQs have to be aligned on a power of two based on the
++     * number being requested.
++     */
++    irq_step = (1 << request_private_bits);
++
++    /* Mask with one bit for each IRQ */
++    search_mask = (1 << irq_step) - 1;
++
++    /*
++     * We're going to search msi_free_irq_bitmask_lock for zero 
++     * bits. This represents an MSI interrupt number that isn't in 
++     * use.
++     */
++    spin_lock(&ifx_pcie_msi_lock);
++    for (pos = 0; pos < IFX_MSI_IRQ_NUM; pos += irq_step) {
++        if ((msi_irqs[pcie_port].msi_free_irq_bitmask & (search_mask << pos)) == 0) {
++            msi_irqs[pcie_port].msi_free_irq_bitmask |= search_mask << pos; 
++            msi_irqs[pcie_port].msi_multiple_irq_bitmask |= (search_mask >> 1) << pos;
++            break; 
++        }
++    }
++    spin_unlock(&ifx_pcie_msi_lock); 
++
++    /* Make sure the search for available interrupts didn't fail */ 
++    if (pos >= IFX_MSI_IRQ_NUM) {
++        if (request_private_bits) {
++            IFX_PCIE_PRINT(PCIE_MSG_MSI, "%s: Unable to find %d free "
++                  "interrupts, trying just one", __func__, 1 << request_private_bits);
++            request_private_bits = 0;
++            goto again;
++        }
++        else {
++            printk(KERN_ERR "%s: Unable to find a free MSI interrupt\n", __func__);
++            return -EINVAL;
++        }
++    } 
++    irq = msi_irqs[pcie_port].msi_irq_idx[pos].irq;
++    irq_idx = msi_irqs[pcie_port].msi_irq_idx[pos].idx;
++
++    IFX_PCIE_PRINT(PCIE_MSG_MSI, "pos %d, irq %d irq_idx %d\n", pos, irq, irq_idx);
++
++    /*
++     * Initialize MSI. This has to match the memory-write endianess from the device 
++     * Address bits [23:12]
++     */
++    spin_lock(&ifx_pcie_msi_lock); 
++    msi_irqs[pcie_port].msi_pic_p->pic_table[pos] = SM(irq_idx, IFX_MSI_PIC_INT_LINE) |
++                    SM((msi_irqs[pcie_port].msi_phy_base >> 12), IFX_MSI_PIC_MSG_ADDR) |
++                    SM((1 << pos), IFX_MSI_PIC_MSG_DATA);
++
++    /* Enable this entry */
++    msi_irqs[pcie_port].msi_pic_p->pic_table[pos] &= ~IFX_MSI_PCI_INT_DISABLE;
++    spin_unlock(&ifx_pcie_msi_lock);
++
++    IFX_PCIE_PRINT(PCIE_MSG_MSI, "pic_table[%d]: 0x%08x\n",
++        pos, msi_irqs[pcie_port].msi_pic_p->pic_table[pos]);
++
++    /* Update the number of IRQs the device has available to it */
++    control &= ~PCI_MSI_FLAGS_QSIZE;
++    control |= (request_private_bits << 4);
++    pci_write_config_word(pdev, desc->msi_attrib.pos + PCI_MSI_FLAGS, control);
++
++    set_irq_msi(irq, desc);
++    msg.address_hi = 0x0;
++    msg.address_lo = msi_irqs[pcie_port].msi_phy_base;
++    msg.data = SM((1 << pos), IFX_MSI_PIC_MSG_DATA);
++    IFX_PCIE_PRINT(PCIE_MSG_MSI, "msi_data: pos %d 0x%08x\n", pos, msg.data);
++
++    write_msi_msg(irq, &msg);
++    IFX_PCIE_PRINT(PCIE_MSG_MSI, "%s exit\n", __func__);
++    return 0;
++}
++
++static int
++pcie_msi_irq_to_port(unsigned int irq, int *port)
++{
++    int ret = 0;
++
++    if (irq == IFX_PCIE_MSI_IR0 || irq == IFX_PCIE_MSI_IR1 ||
++        irq == IFX_PCIE_MSI_IR2 || irq == IFX_PCIE_MSI_IR3) {
++        *port = IFX_PCIE_PORT0;
++    }
++#ifdef CONFIG_IFX_PCIE_2ND_CORE
++    else if (irq == IFX_PCIE1_MSI_IR0 || irq == IFX_PCIE1_MSI_IR1 ||
++        irq == IFX_PCIE1_MSI_IR2 || irq == IFX_PCIE1_MSI_IR3) {
++        *port = IFX_PCIE_PORT1;
++    }
++#endif /* CONFIG_IFX_PCIE_2ND_CORE */
++    else {
++        printk(KERN_ERR "%s: Attempted to teardown illegal " 
++            "MSI interrupt (%d)\n", __func__, irq);
++        ret = -EINVAL;
++    }
++    return ret;
++}
++
++/** 
++ * \fn void arch_teardown_msi_irq(unsigned int irq)
++ * \brief Called when a device no longer needs its MSI interrupts. All 
++ * MSI interrupts for the device are freed. 
++ * 
++ * \param irq   The devices first irq number. There may be multple in sequence.
++ * \return none
++ * \ingroup IFX_PCIE_MSI
++ */
++void 
++arch_teardown_msi_irq(unsigned int irq)
++{
++    int pos;
++    int number_irqs; 
++    u16 bitmask;
++    int pcie_port;
++
++    IFX_PCIE_PRINT(PCIE_MSG_MSI, "%s enter\n", __func__);
++
++    BUG_ON(irq > INT_NUM_IM4_IRL31);
++
++    if (pcie_msi_irq_to_port(irq, &pcie_port) != 0) {
++        return;
++    }
++
++    /* Shift the mask to the correct bit location, not always correct 
++     * Probally, the first match will be chosen.
++     */
++    for (pos = 0; pos < IFX_MSI_IRQ_NUM; pos++) {
++        if ((msi_irqs[pcie_port].msi_irq_idx[pos].irq == irq) 
++            && (msi_irqs[pcie_port].msi_free_irq_bitmask & ( 1 << pos))) {
++            break;
++        }
++    }
++    if (pos >= IFX_MSI_IRQ_NUM) {
++        printk(KERN_ERR "%s: Unable to find a matched MSI interrupt\n", __func__);
++        return;
++    }
++    spin_lock(&ifx_pcie_msi_lock);
++    /* Disable this entry */
++    msi_irqs[pcie_port].msi_pic_p->pic_table[pos] |= IFX_MSI_PCI_INT_DISABLE;
++    msi_irqs[pcie_port].msi_pic_p->pic_table[pos] &= ~(IFX_MSI_PIC_INT_LINE | IFX_MSI_PIC_MSG_ADDR | IFX_MSI_PIC_MSG_DATA);
++    spin_unlock(&ifx_pcie_msi_lock); 
++    /*
++     * Count the number of IRQs we need to free by looking at the
++     * msi_multiple_irq_bitmask. Each bit set means that the next
++     * IRQ is also owned by this device.
++     */ 
++    number_irqs = 0; 
++    while (((pos + number_irqs) < IFX_MSI_IRQ_NUM) && 
++        (msi_irqs[pcie_port].msi_multiple_irq_bitmask & (1 << (pos + number_irqs)))) {
++        number_irqs++;
++    }
++    number_irqs++;
++
++    /* Mask with one bit for each IRQ */
++    bitmask = (1 << number_irqs) - 1;
++
++    bitmask <<= pos;
++    if ((msi_irqs[pcie_port].msi_free_irq_bitmask & bitmask) != bitmask) {
++        printk(KERN_ERR "%s: Attempted to teardown MSI "
++             "interrupt (%d) not in use\n", __func__, irq);
++        return;
++    }
++    /* Checks are done, update the in use bitmask */
++    spin_lock(&ifx_pcie_msi_lock);
++    msi_irqs[pcie_port].msi_free_irq_bitmask &= ~bitmask;
++    msi_irqs[pcie_port].msi_multiple_irq_bitmask &= ~(bitmask >> 1);
++    spin_unlock(&ifx_pcie_msi_lock);
++    IFX_PCIE_PRINT(PCIE_MSG_MSI, "%s exit\n", __func__);
++}
++
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR("Chuanhua.Lei at infineon.com");
++MODULE_SUPPORTED_DEVICE("Infineon PCIe IP builtin MSI PIC module");
++MODULE_DESCRIPTION("Infineon PCIe IP builtin MSI PIC driver");
++
+--- /dev/null
++++ b/arch/mips/pci/ifxmips_pcie_phy.c
+@@ -0,0 +1,478 @@
++/******************************************************************************
++**
++** FILE NAME    : ifxmips_pcie_phy.c
++** PROJECT      : IFX UEIP for VRX200
++** MODULES      : PCIe PHY sub module
++**
++** DATE         : 14 May 2009
++** AUTHOR       : Lei Chuanhua
++** DESCRIPTION  : PCIe Root Complex Driver
++** COPYRIGHT    :       Copyright (c) 2009
++**                      Infineon Technologies AG
++**                      Am Campeon 1-12, 85579 Neubiberg, Germany
++**
++**    This program is free software; you can redistribute it and/or modify
++**    it under the terms of the GNU General Public License as published by
++**    the Free Software Foundation; either version 2 of the License, or
++**    (at your option) any later version.
++** HISTORY
++** $Version $Date        $Author         $Comment
++** 0.0.1    14 May,2009  Lei Chuanhua    Initial version
++*******************************************************************************/
++/*!
++ \file ifxmips_pcie_phy.c
++ \ingroup IFX_PCIE  
++ \brief PCIe PHY PLL register programming source file
++*/
++#include <linux/types.h>
++#include <linux/kernel.h>
++#include <asm/paccess.h>
++#include <linux/delay.h>
++
++#include "ifxmips_pcie_reg.h"
++#include "ifxmips_pcie.h"
++
++/* PCIe PDI only supports 16 bit operation */
++
++#define IFX_PCIE_PHY_REG_WRITE16(__addr, __data) \
++    ((*(volatile u16 *) (__addr)) = (__data))
++    
++#define IFX_PCIE_PHY_REG_READ16(__addr)  \
++    (*(volatile u16 *) (__addr))
++
++#define IFX_PCIE_PHY_REG16(__addr)   \
++    (*(volatile u16 *) (__addr))
++
++#define IFX_PCIE_PHY_REG(__reg, __value, __mask) do { \
++    u16 read_data;                                    \
++    u16 write_data;                                   \
++    read_data = IFX_PCIE_PHY_REG_READ16((__reg));      \
++    write_data = (read_data & ((u16)~(__mask))) | (((u16)(__value)) & ((u16)(__mask)));\
++    IFX_PCIE_PHY_REG_WRITE16((__reg), write_data);               \
++} while (0)
++
++#define IFX_PCIE_PLL_TIMEOUT 1000 /* Tunnable */
++
++//#define IFX_PCI_PHY_REG_DUMP
++
++#ifdef IFX_PCI_PHY_REG_DUMP
++static void
++pcie_phy_reg_dump(int pcie_port) 
++{
++    printk("PLL REGFILE\n");
++    printk("PCIE_PHY_PLL_CTRL1    0x%04x\n", IFX_PCIE_PHY_REG16(PCIE_PHY_PLL_CTRL1(pcie_port)));
++    printk("PCIE_PHY_PLL_CTRL2    0x%04x\n", IFX_PCIE_PHY_REG16(PCIE_PHY_PLL_CTRL2(pcie_port)));
++    printk("PCIE_PHY_PLL_CTRL3    0x%04x\n", IFX_PCIE_PHY_REG16(PCIE_PHY_PLL_CTRL3(pcie_port)));
++    printk("PCIE_PHY_PLL_CTRL4    0x%04x\n", IFX_PCIE_PHY_REG16(PCIE_PHY_PLL_CTRL4(pcie_port)));
++    printk("PCIE_PHY_PLL_CTRL5    0x%04x\n", IFX_PCIE_PHY_REG16(PCIE_PHY_PLL_CTRL5(pcie_port)));
++    printk("PCIE_PHY_PLL_CTRL6    0x%04x\n", IFX_PCIE_PHY_REG16(PCIE_PHY_PLL_CTRL6(pcie_port)));
++    printk("PCIE_PHY_PLL_CTRL7    0x%04x\n", IFX_PCIE_PHY_REG16(PCIE_PHY_PLL_CTRL7(pcie_port)));
++    printk("PCIE_PHY_PLL_A_CTRL1  0x%04x\n", IFX_PCIE_PHY_REG16(PCIE_PHY_PLL_A_CTRL1(pcie_port)));
++    printk("PCIE_PHY_PLL_A_CTRL2  0x%04x\n", IFX_PCIE_PHY_REG16(PCIE_PHY_PLL_A_CTRL2(pcie_port)));
++    printk("PCIE_PHY_PLL_A_CTRL3  0x%04x\n", IFX_PCIE_PHY_REG16(PCIE_PHY_PLL_A_CTRL3(pcie_port)));
++    printk("PCIE_PHY_PLL_STATUS   0x%04x\n", IFX_PCIE_PHY_REG16(PCIE_PHY_PLL_STATUS(pcie_port)));
++
++    printk("TX1 REGFILE\n");
++    printk("PCIE_PHY_TX1_CTRL1    0x%04x\n", IFX_PCIE_PHY_REG16(PCIE_PHY_TX1_CTRL1(pcie_port)));
++    printk("PCIE_PHY_TX1_CTRL2    0x%04x\n", IFX_PCIE_PHY_REG16(PCIE_PHY_TX1_CTRL2(pcie_port)));
++    printk("PCIE_PHY_TX1_CTRL3    0x%04x\n", IFX_PCIE_PHY_REG16(PCIE_PHY_TX1_CTRL3(pcie_port)));
++    printk("PCIE_PHY_TX1_A_CTRL1  0x%04x\n", IFX_PCIE_PHY_REG16(PCIE_PHY_TX1_A_CTRL1(pcie_port)));
++    printk("PCIE_PHY_TX1_A_CTRL2  0x%04x\n", IFX_PCIE_PHY_REG16(PCIE_PHY_TX1_A_CTRL2(pcie_port)));
++    printk("PCIE_PHY_TX1_MOD1     0x%04x\n", IFX_PCIE_PHY_REG16(PCIE_PHY_TX1_MOD1(pcie_port)));
++    printk("PCIE_PHY_TX1_MOD2     0x%04x\n", IFX_PCIE_PHY_REG16(PCIE_PHY_TX1_MOD2(pcie_port)));
++    printk("PCIE_PHY_TX1_MOD3     0x%04x\n", IFX_PCIE_PHY_REG16(PCIE_PHY_TX1_MOD3(pcie_port)));
++
++    printk("TX2 REGFILE\n");
++    printk("PCIE_PHY_TX2_CTRL1    0x%04x\n", IFX_PCIE_PHY_REG16(PCIE_PHY_TX2_CTRL1(pcie_port)));
++    printk("PCIE_PHY_TX2_CTRL2    0x%04x\n", IFX_PCIE_PHY_REG16(PCIE_PHY_TX2_CTRL2(pcie_port)));
++    printk("PCIE_PHY_TX2_A_CTRL1  0x%04x\n", IFX_PCIE_PHY_REG16(PCIE_PHY_TX2_A_CTRL1(pcie_port)));
++    printk("PCIE_PHY_TX2_A_CTRL2  0x%04x\n", IFX_PCIE_PHY_REG16(PCIE_PHY_TX2_A_CTRL2(pcie_port)));
++    printk("PCIE_PHY_TX2_MOD1     0x%04x\n", IFX_PCIE_PHY_REG16(PCIE_PHY_TX2_MOD1(pcie_port)));
++    printk("PCIE_PHY_TX2_MOD2     0x%04x\n", IFX_PCIE_PHY_REG16(PCIE_PHY_TX2_MOD2(pcie_port)));
++    printk("PCIE_PHY_TX2_MOD3     0x%04x\n", IFX_PCIE_PHY_REG16(PCIE_PHY_TX2_MOD3(pcie_port)));
++
++    printk("RX1 REGFILE\n");
++    printk("PCIE_PHY_RX1_CTRL1    0x%04x\n", IFX_PCIE_PHY_REG16(PCIE_PHY_RX1_CTRL1(pcie_port)));
++    printk("PCIE_PHY_RX1_CTRL2    0x%04x\n", IFX_PCIE_PHY_REG16(PCIE_PHY_RX1_CTRL2(pcie_port)));
++    printk("PCIE_PHY_RX1_CDR      0x%04x\n", IFX_PCIE_PHY_REG16(PCIE_PHY_RX1_CDR(pcie_port)));
++    printk("PCIE_PHY_RX1_EI       0x%04x\n", IFX_PCIE_PHY_REG16(PCIE_PHY_RX1_EI(pcie_port)));
++    printk("PCIE_PHY_RX1_A_CTRL   0x%04x\n", IFX_PCIE_PHY_REG16(PCIE_PHY_RX1_A_CTRL(pcie_port)));
++}
++#endif /* IFX_PCI_PHY_REG_DUMP */
++
++static void
++pcie_phy_comm_setup(int pcie_port)
++{
++   /* PLL Setting */
++    IFX_PCIE_PHY_REG(PCIE_PHY_PLL_A_CTRL1(pcie_port), 0x120e, 0xFFFF);
++
++    /* increase the bias reference voltage */
++    IFX_PCIE_PHY_REG(PCIE_PHY_PLL_A_CTRL2(pcie_port), 0x39D7, 0xFFFF);
++    IFX_PCIE_PHY_REG(PCIE_PHY_PLL_A_CTRL3(pcie_port), 0x0900, 0xFFFF);
++
++    /* Endcnt */
++    IFX_PCIE_PHY_REG(PCIE_PHY_RX1_EI(pcie_port), 0x0004, 0xFFFF);
++    IFX_PCIE_PHY_REG(PCIE_PHY_RX1_A_CTRL(pcie_port), 0x6803, 0xFFFF);
++
++    /* force */
++    IFX_PCIE_PHY_REG(PCIE_PHY_TX1_CTRL1(pcie_port), 0x0008, 0x0008);
++
++    /* predrv_ser_en */
++    IFX_PCIE_PHY_REG(PCIE_PHY_TX1_A_CTRL2(pcie_port), 0x0706, 0xFFFF);
++
++    /* ctrl_lim */
++    IFX_PCIE_PHY_REG(PCIE_PHY_TX1_CTRL3(pcie_port), 0x1FFF, 0xFFFF);
++
++    /* ctrl */
++    IFX_PCIE_PHY_REG(PCIE_PHY_TX1_A_CTRL1(pcie_port), 0x0800, 0xFF00);
++
++    /* predrv_ser_en */
++    IFX_PCIE_PHY_REG(PCIE_PHY_TX2_A_CTRL2(pcie_port), 0x4702, 0x7F00);
++
++    /* RTERM*/
++    IFX_PCIE_PHY_REG(PCIE_PHY_TX1_CTRL2(pcie_port), 0x2e00, 0xFFFF);
++
++    /* Improved 100MHz clock output  */
++    IFX_PCIE_PHY_REG(PCIE_PHY_TX2_CTRL2(pcie_port), 0x3096, 0xFFFF);
++    IFX_PCIE_PHY_REG(PCIE_PHY_TX2_A_CTRL2(pcie_port), 0x4707, 0xFFFF);
++
++    /* Reduced CDR BW to avoid glitches */
++    IFX_PCIE_PHY_REG(PCIE_PHY_RX1_CDR(pcie_port), 0x0235, 0xFFFF);
++}
++
++#ifdef CONFIG_IFX_PCIE_PHY_36MHZ_MODE
++static void 
++pcie_phy_36mhz_mode_setup(int pcie_port) 
++{
++    IFX_PCIE_PRINT(PCIE_MSG_PHY, "%s pcie_port %d enter\n", __func__, pcie_port);
++#ifdef IFX_PCI_PHY_REG_DUMP
++    IFX_PCIE_PRINT(PCIE_MSG_PHY, "Initial PHY register dump\n");
++    pcie_phy_reg_dump(pcie_port);
++#endif
++
++    /* en_ext_mmd_div_ratio */
++    IFX_PCIE_PHY_REG(PCIE_PHY_PLL_CTRL3(pcie_port), 0x0000, 0x0002);
++
++    /* ext_mmd_div_ratio*/
++    IFX_PCIE_PHY_REG(PCIE_PHY_PLL_CTRL3(pcie_port), 0x0000, 0x0070);
++
++    /* pll_ensdm */
++    IFX_PCIE_PHY_REG(PCIE_PHY_PLL_CTRL2(pcie_port), 0x0200, 0x0200);
++
++    /* en_const_sdm */
++    IFX_PCIE_PHY_REG(PCIE_PHY_PLL_CTRL2(pcie_port), 0x0100, 0x0100);
++
++    /* mmd */
++    IFX_PCIE_PHY_REG(PCIE_PHY_PLL_A_CTRL3(pcie_port), 0x2000, 0xe000);
++
++    /* lf_mode */
++    IFX_PCIE_PHY_REG(PCIE_PHY_PLL_A_CTRL2(pcie_port), 0x0000, 0x4000);
++
++    /* const_sdm */
++    IFX_PCIE_PHY_REG(PCIE_PHY_PLL_CTRL1(pcie_port), 0x38e4, 0xFFFF);
++
++    /* const sdm */
++    IFX_PCIE_PHY_REG(PCIE_PHY_PLL_CTRL2(pcie_port), 0x00ee, 0x00FF);
++
++    /* pllmod */
++    IFX_PCIE_PHY_REG(PCIE_PHY_PLL_CTRL7(pcie_port), 0x0002, 0xFFFF);
++    IFX_PCIE_PHY_REG(PCIE_PHY_PLL_CTRL6(pcie_port), 0x3a04, 0xFFFF);
++    IFX_PCIE_PHY_REG(PCIE_PHY_PLL_CTRL5(pcie_port), 0xfae3, 0xFFFF);
++    IFX_PCIE_PHY_REG(PCIE_PHY_PLL_CTRL4(pcie_port), 0x1b72, 0xFFFF);
++
++    IFX_PCIE_PRINT(PCIE_MSG_PHY, "%s pcie_port %d exit\n", __func__, pcie_port);
++}
++#endif /* CONFIG_IFX_PCIE_PHY_36MHZ_MODE */
++
++#ifdef CONFIG_IFX_PCIE_PHY_36MHZ_SSC_MODE
++static void 
++pcie_phy_36mhz_ssc_mode_setup(int pcie_port) 
++{
++    IFX_PCIE_PRINT(PCIE_MSG_PHY, "%s pcie_port %d enter\n", __func__, pcie_port);
++#ifdef IFX_PCI_PHY_REG_DUMP
++    IFX_PCIE_PRINT(PCIE_MSG_PHY, "Initial PHY register dump\n");
++    pcie_phy_reg_dump(pcie_port);
++#endif
++
++    /* PLL Setting */
++    IFX_PCIE_PHY_REG(PCIE_PHY_PLL_A_CTRL1(pcie_port), 0x120e, 0xFFFF);
++
++    /* Increase the bias reference voltage */
++    IFX_PCIE_PHY_REG(PCIE_PHY_PLL_A_CTRL2(pcie_port), 0x39D7, 0xFFFF);
++    IFX_PCIE_PHY_REG(PCIE_PHY_PLL_A_CTRL3(pcie_port), 0x0900, 0xFFFF);
++
++    /* Endcnt */
++    IFX_PCIE_PHY_REG(PCIE_PHY_RX1_EI(pcie_port), 0x0004, 0xFFFF);
++    IFX_PCIE_PHY_REG(PCIE_PHY_RX1_A_CTRL(pcie_port), 0x6803, 0xFFFF);
++
++    /* Force */
++    IFX_PCIE_PHY_REG(PCIE_PHY_TX1_CTRL1(pcie_port), 0x0008, 0x0008);
++
++    /* Predrv_ser_en */
++    IFX_PCIE_PHY_REG(PCIE_PHY_TX1_A_CTRL2(pcie_port), 0x0706, 0xFFFF);
++
++    /* ctrl_lim */
++    IFX_PCIE_PHY_REG(PCIE_PHY_TX1_CTRL3(pcie_port), 0x1FFF, 0xFFFF);
++
++    /* ctrl */
++    IFX_PCIE_PHY_REG(PCIE_PHY_TX1_A_CTRL1(pcie_port), 0x0800, 0xFF00);
++
++    /* predrv_ser_en */
++    IFX_PCIE_PHY_REG(PCIE_PHY_TX2_A_CTRL2(pcie_port), 0x4702, 0x7F00);
++
++    /* RTERM*/
++    IFX_PCIE_PHY_REG(PCIE_PHY_TX1_CTRL2(pcie_port), 0x2e00, 0xFFFF);
++
++    /* en_ext_mmd_div_ratio */
++    IFX_PCIE_PHY_REG(PCIE_PHY_PLL_CTRL3(pcie_port), 0x0000, 0x0002);
++
++    /* ext_mmd_div_ratio*/
++    IFX_PCIE_PHY_REG(PCIE_PHY_PLL_CTRL3(pcie_port), 0x0000, 0x0070);
++
++    /* pll_ensdm */
++    IFX_PCIE_PHY_REG(PCIE_PHY_PLL_CTRL2(pcie_port), 0x0400, 0x0400);
++
++    /* en_const_sdm */
++    IFX_PCIE_PHY_REG(PCIE_PHY_PLL_CTRL2(pcie_port), 0x0200, 0x0200);
++
++    /* mmd */
++    IFX_PCIE_PHY_REG(PCIE_PHY_PLL_A_CTRL3(pcie_port), 0x2000, 0xe000);
++
++    /* lf_mode */
++    IFX_PCIE_PHY_REG(PCIE_PHY_PLL_A_CTRL2(pcie_port), 0x0000, 0x4000);
++
++    /* const_sdm */
++    IFX_PCIE_PHY_REG(PCIE_PHY_PLL_CTRL1(pcie_port), 0x38e4, 0xFFFF);
++
++    IFX_PCIE_PHY_REG(PCIE_PHY_PLL_CTRL2(pcie_port), 0x0000, 0x0100);
++    /* const sdm */
++    IFX_PCIE_PHY_REG(PCIE_PHY_PLL_CTRL2(pcie_port), 0x00ee, 0x00FF);
++
++    /* pllmod */
++    IFX_PCIE_PHY_REG(PCIE_PHY_PLL_CTRL7(pcie_port), 0x0002, 0xFFFF);
++    IFX_PCIE_PHY_REG(PCIE_PHY_PLL_CTRL6(pcie_port), 0x3a04, 0xFFFF);
++    IFX_PCIE_PHY_REG(PCIE_PHY_PLL_CTRL5(pcie_port), 0xfae3, 0xFFFF);
++    IFX_PCIE_PHY_REG(PCIE_PHY_PLL_CTRL4(pcie_port), 0x1c72, 0xFFFF);
++
++    /* improved 100MHz clock output  */
++    IFX_PCIE_PHY_REG(PCIE_PHY_TX2_CTRL2(pcie_port), 0x3096, 0xFFFF);
++    IFX_PCIE_PHY_REG(PCIE_PHY_TX2_A_CTRL2(pcie_port), 0x4707, 0xFFFF);
++
++    /* reduced CDR BW to avoid glitches */
++    IFX_PCIE_PHY_REG(PCIE_PHY_RX1_CDR(pcie_port), 0x0235, 0xFFFF);
++    
++    IFX_PCIE_PRINT(PCIE_MSG_PHY, "%s pcie_port %d exit\n", __func__, pcie_port);
++}
++#endif /* CONFIG_IFX_PCIE_PHY_36MHZ_SSC_MODE */
++
++#ifdef CONFIG_IFX_PCIE_PHY_25MHZ_MODE
++static void 
++pcie_phy_25mhz_mode_setup(int pcie_port) 
++{
++    IFX_PCIE_PRINT(PCIE_MSG_PHY, "%s pcie_port %d enter\n", __func__, pcie_port);
++#ifdef IFX_PCI_PHY_REG_DUMP
++    IFX_PCIE_PRINT(PCIE_MSG_PHY, "Initial PHY register dump\n");
++    pcie_phy_reg_dump(pcie_port);
++#endif
++    /* en_const_sdm */
++    IFX_PCIE_PHY_REG(PCIE_PHY_PLL_CTRL2(pcie_port), 0x0100, 0x0100);
++
++    /* pll_ensdm */    
++    IFX_PCIE_PHY_REG(PCIE_PHY_PLL_CTRL2(pcie_port), 0x0000, 0x0200);
++
++    /* en_ext_mmd_div_ratio*/
++    IFX_PCIE_PHY_REG(PCIE_PHY_PLL_CTRL3(pcie_port), 0x0002, 0x0002);
++
++    /* ext_mmd_div_ratio*/
++    IFX_PCIE_PHY_REG(PCIE_PHY_PLL_CTRL3(pcie_port), 0x0040, 0x0070);
++
++    /* mmd */
++    IFX_PCIE_PHY_REG(PCIE_PHY_PLL_A_CTRL3(pcie_port), 0x6000, 0xe000);
++
++    /* lf_mode */
++    IFX_PCIE_PHY_REG(PCIE_PHY_PLL_A_CTRL2(pcie_port), 0x4000, 0x4000);
++
++    IFX_PCIE_PRINT(PCIE_MSG_PHY, "%s pcie_port %d exit\n", __func__, pcie_port);
++}
++#endif /* CONFIG_IFX_PCIE_PHY_25MHZ_MODE */
++
++#ifdef CONFIG_IFX_PCIE_PHY_100MHZ_MODE
++static void 
++pcie_phy_100mhz_mode_setup(int pcie_port) 
++{
++    IFX_PCIE_PRINT(PCIE_MSG_PHY, "%s pcie_port %d enter\n", __func__, pcie_port);
++#ifdef IFX_PCI_PHY_REG_DUMP
++    IFX_PCIE_PRINT(PCIE_MSG_PHY, "Initial PHY register dump\n");
++    pcie_phy_reg_dump(pcie_port);
++#endif 
++    /* en_ext_mmd_div_ratio */
++    IFX_PCIE_PHY_REG(PCIE_PHY_PLL_CTRL3(pcie_port), 0x0000, 0x0002);
++
++    /* ext_mmd_div_ratio*/
++    IFX_PCIE_PHY_REG(PCIE_PHY_PLL_CTRL3(pcie_port), 0x0000, 0x0070);
++
++    /* pll_ensdm */
++    IFX_PCIE_PHY_REG(PCIE_PHY_PLL_CTRL2(pcie_port), 0x0200, 0x0200);
++
++    /* en_const_sdm */
++    IFX_PCIE_PHY_REG(PCIE_PHY_PLL_CTRL2(pcie_port), 0x0100, 0x0100);
++
++    /* mmd */
++    IFX_PCIE_PHY_REG(PCIE_PHY_PLL_A_CTRL3(pcie_port), 0x2000, 0xe000);
++
++    /* lf_mode */
++    IFX_PCIE_PHY_REG(PCIE_PHY_PLL_A_CTRL2(pcie_port), 0x0000, 0x4000);
++
++    /* const_sdm */
++    IFX_PCIE_PHY_REG(PCIE_PHY_PLL_CTRL1(pcie_port), 0x38e4, 0xFFFF);
++
++    /* const sdm */
++    IFX_PCIE_PHY_REG(PCIE_PHY_PLL_CTRL2(pcie_port), 0x00ee, 0x00FF);
++
++    /* pllmod */
++    IFX_PCIE_PHY_REG(PCIE_PHY_PLL_CTRL7(pcie_port), 0x0002, 0xFFFF);
++    IFX_PCIE_PHY_REG(PCIE_PHY_PLL_CTRL6(pcie_port), 0x3a04, 0xFFFF);
++    IFX_PCIE_PHY_REG(PCIE_PHY_PLL_CTRL5(pcie_port), 0xfae3, 0xFFFF);
++    IFX_PCIE_PHY_REG(PCIE_PHY_PLL_CTRL4(pcie_port), 0x1b72, 0xFFFF);
++
++    IFX_PCIE_PRINT(PCIE_MSG_PHY, "%s pcie_port %d exit\n", __func__, pcie_port);
++}
++#endif /* CONFIG_IFX_PCIE_PHY_100MHZ_MODE */
++
++static int
++pcie_phy_wait_startup_ready(int pcie_port)
++{
++    int i;
++
++    for (i = 0; i < IFX_PCIE_PLL_TIMEOUT; i++) {
++        if ((IFX_PCIE_PHY_REG16(PCIE_PHY_PLL_STATUS(pcie_port)) & 0x0040) != 0) {
++            break;
++        }
++        udelay(10);
++    }
++    if (i >= IFX_PCIE_PLL_TIMEOUT) {
++        printk(KERN_ERR "%s PLL Link timeout\n", __func__);
++        return -1;
++    }
++    return 0;
++}
++
++static void 
++pcie_phy_load_enable(int pcie_port, int slice) 
++{
++    /* Set the load_en of tx/rx slice to '1' */
++    switch (slice) {
++        case 1:
++            IFX_PCIE_PHY_REG(PCIE_PHY_TX1_CTRL1(pcie_port), 0x0010, 0x0010);
++            break;
++        case 2:
++            IFX_PCIE_PHY_REG(PCIE_PHY_TX2_CTRL1(pcie_port), 0x0010, 0x0010);
++            break;
++        case 3:
++            IFX_PCIE_PHY_REG(PCIE_PHY_RX1_CTRL1(pcie_port), 0x0002, 0x0002);
++            break;
++    }
++}
++
++static void 
++pcie_phy_load_disable(int pcie_port, int slice) 
++{ 
++    /* set the load_en of tx/rx slice to '0' */ 
++    switch (slice) {
++        case 1:
++            IFX_PCIE_PHY_REG(PCIE_PHY_TX1_CTRL1(pcie_port), 0x0000, 0x0010);
++            break;
++        case 2:
++            IFX_PCIE_PHY_REG(PCIE_PHY_TX2_CTRL1(pcie_port), 0x0000, 0x0010);
++            break;
++        case 3: 
++            IFX_PCIE_PHY_REG(PCIE_PHY_RX1_CTRL1(pcie_port), 0x0000, 0x0002);
++            break;
++    }
++}
++
++static void 
++pcie_phy_load_war(int pcie_port)
++{
++    int slice;
++
++    for (slice = 1; slice < 4; slice++) {
++        pcie_phy_load_enable(pcie_port, slice);
++        udelay(1);
++        pcie_phy_load_disable(pcie_port, slice);
++    }
++}
++
++static void 
++pcie_phy_tx2_modulation(int pcie_port)
++{
++    IFX_PCIE_PHY_REG(PCIE_PHY_TX2_MOD1(pcie_port), 0x1FFE, 0xFFFF);
++    IFX_PCIE_PHY_REG(PCIE_PHY_TX2_MOD2(pcie_port), 0xFFFE, 0xFFFF);
++    IFX_PCIE_PHY_REG(PCIE_PHY_TX2_MOD3(pcie_port), 0x0601, 0xFFFF);
++    mdelay(1);
++    IFX_PCIE_PHY_REG(PCIE_PHY_TX2_MOD3(pcie_port), 0x0001, 0xFFFF);
++}
++
++static void 
++pcie_phy_tx1_modulation(int pcie_port)
++{
++    IFX_PCIE_PHY_REG(PCIE_PHY_TX1_MOD1(pcie_port), 0x1FFE, 0xFFFF);
++    IFX_PCIE_PHY_REG(PCIE_PHY_TX1_MOD2(pcie_port), 0xFFFE, 0xFFFF);
++    IFX_PCIE_PHY_REG(PCIE_PHY_TX1_MOD3(pcie_port), 0x0601, 0xFFFF);
++    mdelay(1);
++    IFX_PCIE_PHY_REG(PCIE_PHY_TX1_MOD3(pcie_port), 0x0001, 0xFFFF);
++}
++
++static void
++pcie_phy_tx_modulation_war(int pcie_port)
++{
++    int i;
++
++#define PCIE_PHY_MODULATION_NUM 5 
++    for (i = 0; i < PCIE_PHY_MODULATION_NUM; i++) {
++        pcie_phy_tx2_modulation(pcie_port);
++        pcie_phy_tx1_modulation(pcie_port);
++    }
++#undef PCIE_PHY_MODULATION_NUM
++}
++
++void
++pcie_phy_clock_mode_setup(int pcie_port)
++{
++    pcie_pdi_big_endian(pcie_port);
++
++    /* Enable PDI to access PCIe PHY register */
++    pcie_pdi_pmu_enable(pcie_port);
++
++    /* Configure PLL and PHY clock */
++    pcie_phy_comm_setup(pcie_port);
++
++#ifdef CONFIG_IFX_PCIE_PHY_36MHZ_MODE
++    pcie_phy_36mhz_mode_setup(pcie_port);
++#elif defined(CONFIG_IFX_PCIE_PHY_36MHZ_SSC_MODE)
++    pcie_phy_36mhz_ssc_mode_setup(pcie_port);
++#elif defined(CONFIG_IFX_PCIE_PHY_25MHZ_MODE)
++    pcie_phy_25mhz_mode_setup(pcie_port);
++#elif defined (CONFIG_IFX_PCIE_PHY_100MHZ_MODE)
++    pcie_phy_100mhz_mode_setup(pcie_port);
++#else
++    #error "PCIE PHY Clock Mode must be chosen first!!!!"
++#endif /* CONFIG_IFX_PCIE_PHY_36MHZ_MODE */
++
++    /* Enable PCIe PHY and make PLL setting take effect */
++    pcie_phy_pmu_enable(pcie_port);
++
++    /* Check if we are in startup_ready status */
++    pcie_phy_wait_startup_ready(pcie_port);
++
++    pcie_phy_load_war(pcie_port);
++
++    /* Apply TX modulation workarounds */
++    pcie_phy_tx_modulation_war(pcie_port);
++
++#ifdef IFX_PCI_PHY_REG_DUMP
++    IFX_PCIE_PRINT(PCIE_MSG_PHY, "Modified PHY register dump\n");
++    pcie_phy_reg_dump(pcie_port);
++#endif
++}
++
+--- /dev/null
++++ b/arch/mips/pci/ifxmips_pcie_pm.c
+@@ -0,0 +1,176 @@
++/******************************************************************************
++**
++** FILE NAME    : ifxmips_pcie_pm.c
++** PROJECT      : IFX UEIP
++** MODULES      : PCIE Root Complex Driver
++**
++** DATE         : 21 Dec 2009
++** AUTHOR       : Lei Chuanhua
++** DESCRIPTION  : PCIE Root Complex Driver Power Managment
++** COPYRIGHT    :       Copyright (c) 2009
++**                      Lantiq Deutschland GmbH
++**                      Am Campeon 3, 85579 Neubiberg, Germany
++**
++**    This program is free software; you can redistribute it and/or modify
++**    it under the terms of the GNU General Public License as published by
++**    the Free Software Foundation; either version 2 of the License, or
++**    (at your option) any later version.
++**
++** HISTORY
++** $Date        $Author         $Comment
++** 21 Dec,2009   Lei Chuanhua    First UEIP release
++*******************************************************************************/
++/*!
++  \defgroup IFX_PCIE_PM Power Management functions
++  \ingroup IFX_PCIE
++  \brief IFX PCIE Root Complex Driver power management functions
++*/
++
++/*!
++ \file ifxmips_pcie_pm.c
++ \ingroup IFX_PCIE    
++ \brief source file for PCIE Root Complex Driver Power Management
++*/
++
++#ifndef EXPORT_SYMTAB
++#define EXPORT_SYMTAB
++#endif
++#ifndef AUTOCONF_INCLUDED
++#include <linux/config.h>
++#endif /* AUTOCONF_INCLUDED */
++#include <linux/version.h>
++#include <linux/module.h>
++#include <linux/types.h>
++#include <linux/kernel.h>
++#include <asm/system.h>
++
++/* Project header */
++#include <asm/ifx/ifx_types.h>
++#include <asm/ifx/ifx_regs.h>
++#include <asm/ifx/common_routines.h>
++#include <asm/ifx/ifx_pmcu.h>
++#include "ifxmips_pcie_pm.h"
++
++/** 
++ * \fn static IFX_PMCU_RETURN_t ifx_pcie_pmcu_state_change(IFX_PMCU_STATE_t pmcuState)
++ * \brief the callback function to request pmcu state in the power management hardware-dependent module
++ *
++ * \param pmcuState This parameter is a PMCU state.
++ *
++ * \return IFX_PMCU_RETURN_SUCCESS Set Power State successfully
++ * \return IFX_PMCU_RETURN_ERROR   Failed to set power state.
++ * \return IFX_PMCU_RETURN_DENIED  Not allowed to operate power state
++ * \ingroup IFX_PCIE_PM
++ */
++static IFX_PMCU_RETURN_t 
++ifx_pcie_pmcu_state_change(IFX_PMCU_STATE_t pmcuState)
++{
++    switch(pmcuState) 
++    {
++        case IFX_PMCU_STATE_D0:
++            return IFX_PMCU_RETURN_SUCCESS;
++        case IFX_PMCU_STATE_D1: // Not Applicable
++            return IFX_PMCU_RETURN_DENIED;
++        case IFX_PMCU_STATE_D2: // Not Applicable
++            return IFX_PMCU_RETURN_DENIED;
++        case IFX_PMCU_STATE_D3: // Module clock gating and Power gating
++            return IFX_PMCU_RETURN_SUCCESS;
++        default:
++            return IFX_PMCU_RETURN_DENIED;
++    }
++}
++
++/** 
++ * \fn static IFX_PMCU_RETURN_t ifx_pcie_pmcu_state_get(IFX_PMCU_STATE_t *pmcuState)
++ * \brief the callback function to get pmcu state in the power management hardware-dependent module
++
++ * \param pmcuState Pointer to return power state.
++ *
++ * \return IFX_PMCU_RETURN_SUCCESS Set Power State successfully
++ * \return IFX_PMCU_RETURN_ERROR   Failed to set power state.
++ * \return IFX_PMCU_RETURN_DENIED  Not allowed to operate power state
++ * \ingroup IFX_PCIE_PM
++ */
++static IFX_PMCU_RETURN_t 
++ifx_pcie_pmcu_state_get(IFX_PMCU_STATE_t *pmcuState)
++{
++    return IFX_PMCU_RETURN_SUCCESS;
++}
++
++/**
++ * \fn IFX_PMCU_RETURN_t ifx_pcie_pmcu_prechange(IFX_PMCU_MODULE_t pmcuModule, IFX_PMCU_STATE_t newState, IFX_PMCU_STATE_t oldState)
++ * \brief Apply all callbacks registered to be executed before a state change for pmcuModule
++ * 
++ * \param   pmcuModule      Module
++ * \param   newState        New state
++ * \param   oldState        Old state
++ * \return  IFX_PMCU_RETURN_SUCCESS Set Power State successfully
++ * \return  IFX_PMCU_RETURN_ERROR   Failed to set power state.
++ * \ingroup IFX_PCIE_PM
++ */
++static IFX_PMCU_RETURN_t 
++ifx_pcie_pmcu_prechange(IFX_PMCU_MODULE_t pmcuModule, IFX_PMCU_STATE_t newState, IFX_PMCU_STATE_t oldState)
++{
++    return IFX_PMCU_RETURN_SUCCESS;
++}
++
++/**
++ * \fn IFX_PMCU_RETURN_t ifx_pcie_pmcu_postchange(IFX_PMCU_MODULE_t pmcuModule, IFX_PMCU_STATE_t newState, IFX_PMCU_STATE_t oldState)
++ * \brief Apply all callbacks registered to be executed before a state change for pmcuModule
++ * 
++ * \param   pmcuModule      Module
++ * \param   newState        New state
++ * \param   oldState        Old state
++ * \return IFX_PMCU_RETURN_SUCCESS Set Power State successfully
++ * \return IFX_PMCU_RETURN_ERROR   Failed to set power state.
++ * \ingroup IFX_PCIE_PM
++ */
++static IFX_PMCU_RETURN_t 
++ifx_pcie_pmcu_postchange(IFX_PMCU_MODULE_t pmcuModule, IFX_PMCU_STATE_t newState, IFX_PMCU_STATE_t oldState)
++{
++    return IFX_PMCU_RETURN_SUCCESS;
++}
++
++/** 
++ * \fn static void ifx_pcie_pmcu_init(void)
++ * \brief Register with central PMCU module
++ * \return none
++ * \ingroup IFX_PCIE_PM
++ */
++void
++ifx_pcie_pmcu_init(void)
++{
++    IFX_PMCU_REGISTER_t pmcuRegister;
++
++    /* XXX, hook driver context */
++
++    /* State function register */
++    memset(&pmcuRegister, 0, sizeof(IFX_PMCU_REGISTER_t));
++    pmcuRegister.pmcuModule = IFX_PMCU_MODULE_PCIE;
++    pmcuRegister.pmcuModuleNr = 0;
++    pmcuRegister.ifx_pmcu_state_change = ifx_pcie_pmcu_state_change;
++    pmcuRegister.ifx_pmcu_state_get = ifx_pcie_pmcu_state_get;
++    pmcuRegister.pre = ifx_pcie_pmcu_prechange;
++    pmcuRegister.post= ifx_pcie_pmcu_postchange;
++    ifx_pmcu_register(&pmcuRegister); 
++}
++
++/** 
++ * \fn static void ifx_pcie_pmcu_exit(void)
++ * \brief Unregister with central PMCU module
++ *
++ * \return none
++ * \ingroup IFX_PCIE_PM
++ */
++void
++ifx_pcie_pmcu_exit(void)
++{
++    IFX_PMCU_REGISTER_t pmcuUnRegister;
++
++   /* XXX, hook driver context */
++   
++    pmcuUnRegister.pmcuModule = IFX_PMCU_MODULE_PCIE;
++    pmcuUnRegister.pmcuModuleNr = 0;
++    ifx_pmcu_unregister(&pmcuUnRegister);
++}
++
+--- /dev/null
++++ b/arch/mips/pci/ifxmips_pcie_pm.h
+@@ -0,0 +1,36 @@
++/******************************************************************************
++**
++** FILE NAME    : ifxmips_pcie_pm.h
++** PROJECT      : IFX UEIP
++** MODULES      : PCIe Root Complex Driver
++**
++** DATE         : 21 Dec 2009
++** AUTHOR       : Lei Chuanhua
++** DESCRIPTION  : PCIe Root Complex Driver Power Managment
++** COPYRIGHT    :       Copyright (c) 2009
++**                      Lantiq Deutschland GmbH
++**                      Am Campeon 3, 85579 Neubiberg, Germany
++**
++**    This program is free software; you can redistribute it and/or modify
++**    it under the terms of the GNU General Public License as published by
++**    the Free Software Foundation; either version 2 of the License, or
++**    (at your option) any later version.
++**
++** HISTORY
++** $Date        $Author         $Comment
++** 21 Dec,2009   Lei Chuanhua    First UEIP release
++*******************************************************************************/
++/*!
++ \file ifxmips_pcie_pm.h
++ \ingroup IFX_PCIE 
++ \brief header file for PCIe Root Complex Driver Power Management
++*/
++
++#ifndef IFXMIPS_PCIE_PM_H
++#define IFXMIPS_PCIE_PM_H
++
++void ifx_pcie_pmcu_init(void);
++void ifx_pcie_pmcu_exit(void);
++
++#endif /* IFXMIPS_PCIE_PM_H  */
++
+--- /dev/null
++++ b/arch/mips/pci/ifxmips_pcie_reg.h
+@@ -0,0 +1,1001 @@
++/******************************************************************************
++**
++** FILE NAME    : ifxmips_pcie_reg.h
++** PROJECT      : IFX UEIP for VRX200
++** MODULES      : PCIe module
++**
++** DATE         : 02 Mar 2009
++** AUTHOR       : Lei Chuanhua
++** DESCRIPTION  : PCIe Root Complex Driver
++** COPYRIGHT    :       Copyright (c) 2009
++**                      Infineon Technologies AG
++**                      Am Campeon 1-12, 85579 Neubiberg, Germany
++**
++**    This program is free software; you can redistribute it and/or modify
++**    it under the terms of the GNU General Public License as published by
++**    the Free Software Foundation; either version 2 of the License, or
++**    (at your option) any later version.
++** HISTORY
++** $Version $Date        $Author         $Comment
++** 0.0.1    17 Mar,2009  Lei Chuanhua    Initial version
++*******************************************************************************/
++#ifndef IFXMIPS_PCIE_REG_H
++#define IFXMIPS_PCIE_REG_H
++/*!
++ \file ifxmips_pcie_reg.h
++ \ingroup IFX_PCIE  
++ \brief header file for PCIe module register definition
++*/
++/* PCIe Address Mapping Base */
++#define PCIE_CFG_PHY_BASE        0x1D000000UL
++#define PCIE_CFG_BASE           (KSEG1 + PCIE_CFG_PHY_BASE)
++#define PCIE_CFG_SIZE           (8 * 1024 * 1024)
++
++#define PCIE_MEM_PHY_BASE        0x1C000000UL
++#define PCIE_MEM_BASE           (KSEG1 + PCIE_MEM_PHY_BASE)
++#define PCIE_MEM_SIZE           (16 * 1024 * 1024)
++#define PCIE_MEM_PHY_END        (PCIE_MEM_PHY_BASE + PCIE_MEM_SIZE - 1)
++
++#define PCIE_IO_PHY_BASE         0x1D800000UL
++#define PCIE_IO_BASE            (KSEG1 + PCIE_IO_PHY_BASE)
++#define PCIE_IO_SIZE            (1 * 1024 * 1024)
++#define PCIE_IO_PHY_END         (PCIE_IO_PHY_BASE + PCIE_IO_SIZE - 1)
++
++#define PCIE_RC_CFG_BASE        (KSEG1 + 0x1D900000)
++#define PCIE_APP_LOGIC_REG      (KSEG1 + 0x1E100900)
++#define PCIE_MSI_PHY_BASE        0x1F600000UL
++
++#define PCIE_PDI_PHY_BASE        0x1F106800UL
++#define PCIE_PDI_BASE           (KSEG1 + PCIE_PDI_PHY_BASE)
++#define PCIE_PDI_SIZE            0x400
++
++#define PCIE1_CFG_PHY_BASE        0x19000000UL
++#define PCIE1_CFG_BASE           (KSEG1 + PCIE1_CFG_PHY_BASE)
++#define PCIE1_CFG_SIZE           (8 * 1024 * 1024)
++
++#define PCIE1_MEM_PHY_BASE        0x18000000UL
++#define PCIE1_MEM_BASE           (KSEG1 + PCIE1_MEM_PHY_BASE)
++#define PCIE1_MEM_SIZE           (16 * 1024 * 1024)
++#define PCIE1_MEM_PHY_END        (PCIE1_MEM_PHY_BASE + PCIE1_MEM_SIZE - 1)
++
++#define PCIE1_IO_PHY_BASE         0x19800000UL
++#define PCIE1_IO_BASE            (KSEG1 + PCIE1_IO_PHY_BASE)
++#define PCIE1_IO_SIZE            (1 * 1024 * 1024)
++#define PCIE1_IO_PHY_END         (PCIE1_IO_PHY_BASE + PCIE1_IO_SIZE - 1)
++
++#define PCIE1_RC_CFG_BASE        (KSEG1 + 0x19900000)
++#define PCIE1_APP_LOGIC_REG      (KSEG1 + 0x1E100700)
++#define PCIE1_MSI_PHY_BASE        0x1F400000UL
++
++#define PCIE1_PDI_PHY_BASE        0x1F700400UL
++#define PCIE1_PDI_BASE           (KSEG1 + PCIE1_PDI_PHY_BASE)
++#define PCIE1_PDI_SIZE            0x400
++
++#define PCIE_CFG_PORT_TO_BASE(X)     ((X) > 0 ? (PCIE1_CFG_BASE) : (PCIE_CFG_BASE))
++#define PCIE_MEM_PORT_TO_BASE(X)     ((X) > 0 ? (PCIE1_MEM_BASE) : (PCIE_MEM_BASE))
++#define PCIE_IO_PORT_TO_BASE(X)      ((X) > 0 ? (PCIE1_IO_BASE) : (PCIE_IO_BASE))
++#define PCIE_MEM_PHY_PORT_TO_BASE(X) ((X) > 0 ? (PCIE1_MEM_PHY_BASE) : (PCIE_MEM_PHY_BASE))
++#define PCIE_MEM_PHY_PORT_TO_END(X)  ((X) > 0 ? (PCIE1_MEM_PHY_END) : (PCIE_MEM_PHY_END))
++#define PCIE_IO_PHY_PORT_TO_BASE(X)  ((X) > 0 ? (PCIE1_IO_PHY_BASE) : (PCIE_IO_PHY_BASE))
++#define PCIE_IO_PHY_PORT_TO_END(X)   ((X) > 0 ? (PCIE1_IO_PHY_END) : (PCIE_IO_PHY_END))
++#define PCIE_APP_PORT_TO_BASE(X)     ((X) > 0 ? (PCIE1_APP_LOGIC_REG) : (PCIE_APP_LOGIC_REG))
++#define PCIE_RC_PORT_TO_BASE(X)      ((X) > 0 ? (PCIE1_RC_CFG_BASE) : (PCIE_RC_CFG_BASE))
++#define PCIE_PHY_PORT_TO_BASE(X)     ((X) > 0 ? (PCIE1_PDI_BASE) : (PCIE_PDI_BASE))
++
++/* PCIe Application Logic Register */
++/* RC Core Control Register */
++#define PCIE_RC_CCR(X)                      (volatile u32*)(PCIE_APP_PORT_TO_BASE(X) + 0x10)
++/* This should be enabled after initializing configuratin registers
++ * Also should check link status retraining bit
++ */
++#define PCIE_RC_CCR_LTSSM_ENABLE             0x00000001    /* Enable LTSSM to continue link establishment */
++
++/* RC Core Debug Register */
++#define PCIE_RC_DR(X)                       (volatile u32*)(PCIE_APP_PORT_TO_BASE(X) + 0x14)
++#define PCIE_RC_DR_DLL_UP                    0x00000001  /* Data Link Layer Up */
++#define PCIE_RC_DR_CURRENT_POWER_STATE       0x0000000E  /* Current Power State */
++#define PCIE_RC_DR_CURRENT_POWER_STATE_S     1
++#define PCIE_RC_DR_CURRENT_LTSSM_STATE       0x000001F0  /* Current LTSSM State */
++#define PCIE_RC_DR_CURRENT_LTSSM_STATE_S     4
++
++#define PCIE_RC_DR_PM_DEV_STATE              0x00000E00  /* Power Management D-State */
++#define PCIE_RC_DR_PM_DEV_STATE_S            9
++
++#define PCIE_RC_DR_PM_ENABLED                0x00001000  /* Power Management State from PMU */
++#define PCIE_RC_DR_PME_EVENT_ENABLED         0x00002000  /* Power Management Event Enable State */
++#define PCIE_RC_DR_AUX_POWER_ENABLED         0x00004000  /* Auxiliary Power Enable */
++
++/* Current Power State Definition */
++enum {
++    PCIE_RC_DR_D0 = 0,
++    PCIE_RC_DR_D1,   /* Not supported */
++    PCIE_RC_DR_D2,   /* Not supported */
++    PCIE_RC_DR_D3,
++    PCIE_RC_DR_UN,
++};
++
++/* PHY Link Status Register */
++#define PCIE_PHY_SR(X)                      (volatile u32*)(PCIE_APP_PORT_TO_BASE(X) + 0x18)
++#define PCIE_PHY_SR_PHY_LINK_UP              0x00000001   /* PHY Link Up/Down Indicator */
++
++/* Electromechanical Control Register */
++#define PCIE_EM_CR(X)                       (volatile u32*)(PCIE_APP_PORT_TO_BASE(X) + 0x1C)
++#define PCIE_EM_CR_CARD_IS_PRESENT           0x00000001  /* Card Presence Detect State */
++#define PCIE_EM_CR_MRL_OPEN                  0x00000002  /* MRL Sensor State */
++#define PCIE_EM_CR_POWER_FAULT_SET           0x00000004  /* Power Fault Detected */
++#define PCIE_EM_CR_MRL_SENSOR_SET            0x00000008  /* MRL Sensor Changed */
++#define PCIE_EM_CR_PRESENT_DETECT_SET        0x00000010  /* Card Presense Detect Changed */
++#define PCIE_EM_CR_CMD_CPL_INT_SET           0x00000020  /* Command Complete Interrupt */
++#define PCIE_EM_CR_SYS_INTERLOCK_SET         0x00000040  /* System Electromechanical IterLock Engaged */
++#define PCIE_EM_CR_ATTENTION_BUTTON_SET      0x00000080  /* Attention Button Pressed */
++
++/* Interrupt Status Register */
++#define PCIE_IR_SR(X)                       (volatile u32*)(PCIE_APP_PORT_TO_BASE(X) + 0x20)
++#define PCIE_IR_SR_PME_CAUSE_MSI             0x00000002  /* MSI caused by PME */
++#define PCIE_IR_SR_HP_PME_WAKE_GEN           0x00000004  /* Hotplug PME Wake Generation */
++#define PCIE_IR_SR_HP_MSI                    0x00000008  /* Hotplug MSI */
++#define PCIE_IR_SR_AHB_LU_ERR                0x00000030  /* AHB Bridge Lookup Error Signals */
++#define PCIE_IR_SR_AHB_LU_ERR_S              4
++#define PCIE_IR_SR_INT_MSG_NUM               0x00003E00  /* Interrupt Message Number */
++#define PCIE_IR_SR_INT_MSG_NUM_S             9
++#define PCIE_IR_SR_AER_INT_MSG_NUM           0xF8000000  /* Advanced Error Interrupt Message Number */
++#define PCIE_IR_SR_AER_INT_MSG_NUM_S         27
++
++/* Message Control Register */
++#define PCIE_MSG_CR(X)                      (volatile u32*)(PCIE_APP_PORT_TO_BASE(X) + 0x30)
++#define PCIE_MSG_CR_GEN_PME_TURN_OFF_MSG     0x00000001  /* Generate PME Turn Off Message */
++#define PCIE_MSG_CR_GEN_UNLOCK_MSG           0x00000002  /* Generate Unlock Message */
++
++#define PCIE_VDM_DR(X)                      (volatile u32*)(PCIE_APP_PORT_TO_BASE(X) + 0x34)
++
++/* Vendor-Defined Message Requester ID Register */
++#define PCIE_VDM_RID(X)                     (PCIE_APP_PORT_TO_BASE (X) + 0x38)
++#define PCIE_VDM_RID_VENROR_MSG_REQ_ID       0x0000FFFF
++#define PCIE_VDM_RID_VDMRID_S                0
++
++/* ASPM Control Register */
++#define PCIE_ASPM_CR(X)                     (volatile u32*)(PCIE_APP_PORT_TO_BASE(X) + 0x40)
++#define PCIE_ASPM_CR_HOT_RST                 0x00000001  /* Hot Reset Request to the downstream device */
++#define PCIE_ASPM_CR_REQ_EXIT_L1             0x00000002  /* Request to Exit L1 */
++#define PCIE_ASPM_CR_REQ_ENTER_L1            0x00000004  /* Request to Enter L1 */
++
++/* Vendor Message DW0 Register */
++#define PCIE_VM_MSG_DW0(X)                  (volatile u32*)(PCIE_APP_PORT_TO_BASE(X) + 0x50)
++#define PCIE_VM_MSG_DW0_TYPE                 0x0000001F  /* Message type */
++#define PCIE_VM_MSG_DW0_TYPE_S               0
++#define PCIE_VM_MSG_DW0_FORMAT               0x00000060  /* Format */
++#define PCIE_VM_MSG_DW0_FORMAT_S             5
++#define PCIE_VM_MSG_DW0_TC                   0x00007000  /* Traffic Class */
++#define PCIE_VM_MSG_DW0_TC_S                 12
++#define PCIE_VM_MSG_DW0_ATTR                 0x000C0000  /* Atrributes */
++#define PCIE_VM_MSG_DW0_ATTR_S               18
++#define PCIE_VM_MSG_DW0_EP_TLP               0x00100000  /* Poisoned TLP */
++#define PCIE_VM_MSG_DW0_TD                   0x00200000  /* TLP Digest */
++#define PCIE_VM_MSG_DW0_LEN                  0xFFC00000  /* Length */
++#define PCIE_VM_MSG_DW0_LEN_S                22
++
++/* Format Definition */
++enum {
++    PCIE_VM_MSG_FORMAT_00 = 0,  /* 3DW Hdr, no data*/
++    PCIE_VM_MSG_FORMAT_01,      /* 4DW Hdr, no data */
++    PCIE_VM_MSG_FORMAT_10,      /* 3DW Hdr, with data */
++    PCIE_VM_MSG_FORMAT_11,      /* 4DW Hdr, with data */
++};
++
++/* Traffic Class Definition */
++enum {
++    PCIE_VM_MSG_TC0 = 0,
++    PCIE_VM_MSG_TC1,
++    PCIE_VM_MSG_TC2,
++    PCIE_VM_MSG_TC3,
++    PCIE_VM_MSG_TC4,
++    PCIE_VM_MSG_TC5,
++    PCIE_VM_MSG_TC6,
++    PCIE_VM_MSG_TC7,
++};
++
++/* Attributes Definition */
++enum {
++    PCIE_VM_MSG_ATTR_00 = 0,   /* RO and No Snoop cleared */
++    PCIE_VM_MSG_ATTR_01,       /* RO cleared , No Snoop set */
++    PCIE_VM_MSG_ATTR_10,       /* RO set, No Snoop cleared*/
++    PCIE_VM_MSG_ATTR_11,       /* RO and No Snoop set */
++};
++
++/* Payload Size Definition */
++#define PCIE_VM_MSG_LEN_MIN  0
++#define PCIE_VM_MSG_LEN_MAX  1024
++
++/* Vendor Message DW1 Register */
++#define PCIE_VM_MSG_DW1(X)                 (volatile u32*)(PCIE_APP_PORT_TO_BASE(X) + 0x54)
++#define PCIE_VM_MSG_DW1_FUNC_NUM            0x00000070  /* Function Number */
++#define PCIE_VM_MSG_DW1_FUNC_NUM_S          8
++#define PCIE_VM_MSG_DW1_CODE                0x00FF0000  /* Message Code */
++#define PCIE_VM_MSG_DW1_CODE_S              16
++#define PCIE_VM_MSG_DW1_TAG                 0xFF000000  /* Tag */
++#define PCIE_VM_MSG_DW1_TAG_S               24
++
++#define PCIE_VM_MSG_DW2(X)                  (volatile u32*)(PCIE_APP_PORT_TO_BASE(X) + 0x58)
++#define PCIE_VM_MSG_DW3(X)                  (volatile u32*)(PCIE_APP_PORT_TO_BASE(X) + 0x5C)
++
++/* Vendor Message Request Register */
++#define PCIE_VM_MSG_REQR(X)                 (volatile u32*)(PCIE_APP_PORT_TO_BASE(X) + 0x60)
++#define PCIE_VM_MSG_REQR_REQ                 0x00000001  /* Vendor Message Request */
++
++
++/* AHB Slave Side Band Control Register */
++#define PCIE_AHB_SSB(X)                     (volatile u32*)(PCIE_APP_PORT_TO_BASE(X) + 0x70)
++#define PCIE_AHB_SSB_REQ_BCM                0x00000001 /* Slave Reques BCM filed */
++#define PCIE_AHB_SSB_REQ_EP                 0x00000002 /* Slave Reques EP filed */
++#define PCIE_AHB_SSB_REQ_TD                 0x00000004 /* Slave Reques TD filed */
++#define PCIE_AHB_SSB_REQ_ATTR               0x00000018 /* Slave Reques Attribute number */
++#define PCIE_AHB_SSB_REQ_ATTR_S             3
++#define PCIE_AHB_SSB_REQ_TC                 0x000000E0 /* Slave Request TC Field */
++#define PCIE_AHB_SSB_REQ_TC_S               5
++
++/* AHB Master SideBand Ctrl Register */
++#define PCIE_AHB_MSB(X)                     (volatile u32*)(PCIE_APP_PORT_TO_BASE(X) + 0x74)
++#define PCIE_AHB_MSB_RESP_ATTR               0x00000003 /* Master Response Attribute number */
++#define PCIE_AHB_MSB_RESP_ATTR_S             0
++#define PCIE_AHB_MSB_RESP_BAD_EOT            0x00000004 /* Master Response Badeot filed */
++#define PCIE_AHB_MSB_RESP_BCM                0x00000008 /* Master Response BCM filed */
++#define PCIE_AHB_MSB_RESP_EP                 0x00000010 /* Master Response EP filed */
++#define PCIE_AHB_MSB_RESP_TD                 0x00000020 /* Master Response TD filed */
++#define PCIE_AHB_MSB_RESP_FUN_NUM            0x000003C0 /* Master Response Function number */
++#define PCIE_AHB_MSB_RESP_FUN_NUM_S          6
++
++/* AHB Control Register, fixed bus enumeration exception */
++#define PCIE_AHB_CTRL(X)                     (volatile u32*)(PCIE_APP_PORT_TO_BASE(X) + 0x78)
++#define PCIE_AHB_CTRL_BUS_ERROR_SUPPRESS     0x00000001 
++
++/* Interrupt Enalbe Register */
++#define PCIE_IRNEN(X)                        (volatile u32*)(PCIE_APP_PORT_TO_BASE(X) + 0xF4)
++#define PCIE_IRNCR(X)                        (volatile u32*)(PCIE_APP_PORT_TO_BASE(X) + 0xF8)
++#define PCIE_IRNICR(X)                       (volatile u32*)(PCIE_APP_PORT_TO_BASE(X) + 0xFC)
++
++/* PCIe interrupt enable/control/capture register definition */
++#define PCIE_IRN_AER_REPORT                 0x00000001  /* AER Interrupt */
++#define PCIE_IRN_AER_MSIX                   0x00000002  /* Advanced Error MSI-X Interrupt */
++#define PCIE_IRN_PME                        0x00000004  /* PME Interrupt */
++#define PCIE_IRN_HOTPLUG                    0x00000008  /* Hotplug Interrupt */
++#define PCIE_IRN_RX_VDM_MSG                 0x00000010  /* Vendor-Defined Message Interrupt */
++#define PCIE_IRN_RX_CORRECTABLE_ERR_MSG     0x00000020  /* Correctable Error Message Interrupt */
++#define PCIE_IRN_RX_NON_FATAL_ERR_MSG       0x00000040  /* Non-fatal Error Message */
++#define PCIE_IRN_RX_FATAL_ERR_MSG           0x00000080  /* Fatal Error Message */
++#define PCIE_IRN_RX_PME_MSG                 0x00000100  /* PME Message Interrupt */
++#define PCIE_IRN_RX_PME_TURNOFF_ACK         0x00000200  /* PME Turnoff Ack Message Interrupt */
++#define PCIE_IRN_AHB_BR_FATAL_ERR           0x00000400  /* AHB Fatal Error Interrupt */
++#define PCIE_IRN_LINK_AUTO_BW_STATUS        0x00000800  /* Link Auto Bandwidth Status Interrupt */
++#define PCIE_IRN_BW_MGT                     0x00001000  /* Bandwidth Managment Interrupt */
++#define PCIE_IRN_INTA                       0x00002000  /* INTA */
++#define PCIE_IRN_INTB                       0x00004000  /* INTB */
++#define PCIE_IRN_INTC                       0x00008000  /* INTC */
++#define PCIE_IRN_INTD                       0x00010000  /* INTD */
++#define PCIE_IRN_WAKEUP                     0x00020000  /* Wake up Interrupt */
++
++#define PCIE_RC_CORE_COMBINED_INT    (PCIE_IRN_AER_REPORT |  PCIE_IRN_AER_MSIX | PCIE_IRN_PME | \
++                                      PCIE_IRN_HOTPLUG | PCIE_IRN_RX_VDM_MSG | PCIE_IRN_RX_CORRECTABLE_ERR_MSG |\
++                                      PCIE_IRN_RX_NON_FATAL_ERR_MSG | PCIE_IRN_RX_FATAL_ERR_MSG | \
++                                      PCIE_IRN_RX_PME_MSG | PCIE_IRN_RX_PME_TURNOFF_ACK | PCIE_IRN_AHB_BR_FATAL_ERR | \
++                                      PCIE_IRN_LINK_AUTO_BW_STATUS | PCIE_IRN_BW_MGT)
++/* PCIe RC Configuration Register */
++#define PCIE_VDID(X)                (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x00)
++
++/* Bit definition from pci_reg.h */
++#define PCIE_PCICMDSTS(X)           (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x04)
++#define PCIE_CCRID(X)               (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x08)
++#define PCIE_CLSLTHTBR(X)           (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x0C) /* EP only */
++/* BAR0, BAR1,Only necessary if the bridges implements a device-specific register set or memory buffer */
++#define PCIE_BAR0(X)                (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x10) /* Not used*/
++#define PCIE_BAR1(X)                (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x14) /* Not used */
++
++#define PCIE_BNR(X)                 (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x18) /* Mandatory */
++/* Bus Number Register bits */
++#define PCIE_BNR_PRIMARY_BUS_NUM             0x000000FF
++#define PCIE_BNR_PRIMARY_BUS_NUM_S           0
++#define PCIE_PNR_SECONDARY_BUS_NUM           0x0000FF00
++#define PCIE_PNR_SECONDARY_BUS_NUM_S         8
++#define PCIE_PNR_SUB_BUS_NUM                 0x00FF0000
++#define PCIE_PNR_SUB_BUS_NUM_S               16
++
++/* IO Base/Limit Register bits */
++#define PCIE_IOBLSECS(X)                       (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x1C)  /* RC only */
++#define PCIE_IOBLSECS_32BIT_IO_ADDR             0x00000001
++#define PCIE_IOBLSECS_IO_BASE_ADDR              0x000000F0
++#define PCIE_IOBLSECS_IO_BASE_ADDR_S            4
++#define PCIE_IOBLSECS_32BIT_IOLIMT              0x00000100
++#define PCIE_IOBLSECS_IO_LIMIT_ADDR             0x0000F000
++#define PCIE_IOBLSECS_IO_LIMIT_ADDR_S           12
++
++/* Non-prefetchable Memory Base/Limit Register bit */
++#define PCIE_MBML(X)                           (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x20)  /* RC only */
++#define PCIE_MBML_MEM_BASE_ADDR                 0x0000FFF0
++#define PCIE_MBML_MEM_BASE_ADDR_S               4
++#define PCIE_MBML_MEM_LIMIT_ADDR                0xFFF00000
++#define PCIE_MBML_MEM_LIMIT_ADDR_S              20
++
++/* Prefetchable Memory Base/Limit Register bit */
++#define PCIE_PMBL(X)                           (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x24)  /* RC only */
++#define PCIE_PMBL_64BIT_ADDR                    0x00000001
++#define PCIE_PMBL_UPPER_12BIT                   0x0000FFF0
++#define PCIE_PMBL_UPPER_12BIT_S                 4
++#define PCIE_PMBL_E64MA                         0x00010000
++#define PCIE_PMBL_END_ADDR                      0xFFF00000
++#define PCIE_PMBL_END_ADDR_S                    20
++#define PCIE_PMBU32(X)                          (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x28)  /* RC only */
++#define PCIE_PMLU32(X)                          (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x2C)  /* RC only */
++
++/* I/O Base/Limit Upper 16 bits register */
++#define PCIE_IO_BANDL(X)                        (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x30)  /* RC only */
++#define PCIE_IO_BANDL_UPPER_16BIT_IO_BASE        0x0000FFFF
++#define PCIE_IO_BANDL_UPPER_16BIT_IO_BASE_S      0
++#define PCIE_IO_BANDL_UPPER_16BIT_IO_LIMIT       0xFFFF0000
++#define PCIE_IO_BANDL_UPPER_16BIT_IO_LIMIT_S     16
++
++#define PCIE_CPR(X)                            (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x34)
++#define PCIE_EBBAR(X)                          (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x38)
++
++/* Interrupt and Secondary Bridge Control Register */
++#define PCIE_INTRBCTRL(X)                      (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x3C)
++
++#define PCIE_INTRBCTRL_INT_LINE                 0x000000FF
++#define PCIE_INTRBCTRL_INT_LINE_S               0
++#define PCIE_INTRBCTRL_INT_PIN                  0x0000FF00
++#define PCIE_INTRBCTRL_INT_PIN_S                8
++#define PCIE_INTRBCTRL_PARITY_ERR_RESP_ENABLE   0x00010000    /* #PERR */
++#define PCIE_INTRBCTRL_SERR_ENABLE              0x00020000    /* #SERR */
++#define PCIE_INTRBCTRL_ISA_ENABLE               0x00040000    /* ISA enable, IO 64KB only */
++#define PCIE_INTRBCTRL_VGA_ENABLE               0x00080000    /* VGA enable */
++#define PCIE_INTRBCTRL_VGA_16BIT_DECODE         0x00100000    /* VGA 16bit decode */
++#define PCIE_INTRBCTRL_RST_SECONDARY_BUS        0x00400000    /* Secondary bus rest, hot rest, 1ms */
++/* Others are read only */
++enum {
++    PCIE_INTRBCTRL_INT_NON = 0,
++    PCIE_INTRBCTRL_INTA,
++    PCIE_INTRBCTRL_INTB,
++    PCIE_INTRBCTRL_INTC,
++    PCIE_INTRBCTRL_INTD,
++};
++
++#define PCIE_PM_CAPR(X)                  (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x40)
++
++/* Power Management Control and Status Register */
++#define PCIE_PM_CSR(X)                   (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x44)
++
++#define PCIE_PM_CSR_POWER_STATE           0x00000003   /* Power State */
++#define PCIE_PM_CSR_POWER_STATE_S         0
++#define PCIE_PM_CSR_SW_RST                0x00000008   /* Soft Reset Enabled */
++#define PCIE_PM_CSR_PME_ENABLE            0x00000100   /* PME Enable */
++#define PCIE_PM_CSR_PME_STATUS            0x00008000   /* PME status */
++
++/* MSI Capability Register for EP */
++#define PCIE_MCAPR(X)                    (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x50)
++
++#define PCIE_MCAPR_MSI_CAP_ID             0x000000FF  /* MSI Capability ID */
++#define PCIE_MCAPR_MSI_CAP_ID_S           0
++#define PCIE_MCAPR_MSI_NEXT_CAP_PTR       0x0000FF00  /* Next Capability Pointer */
++#define PCIE_MCAPR_MSI_NEXT_CAP_PTR_S     8
++#define PCIE_MCAPR_MSI_ENABLE             0x00010000  /* MSI Enable */
++#define PCIE_MCAPR_MULTI_MSG_CAP          0x000E0000  /* Multiple Message Capable */
++#define PCIE_MCAPR_MULTI_MSG_CAP_S        17
++#define PCIE_MCAPR_MULTI_MSG_ENABLE       0x00700000  /* Multiple Message Enable */
++#define PCIE_MCAPR_MULTI_MSG_ENABLE_S     20
++#define PCIE_MCAPR_ADDR64_CAP             0X00800000  /* 64-bit Address Capable */
++
++/* MSI Message Address Register */
++#define PCIE_MA(X)                       (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x54)
++
++#define PCIE_MA_ADDR_MASK                 0xFFFFFFFC  /* Message Address */
++
++/* MSI Message Upper Address Register */
++#define PCIE_MUA(X)                      (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x58)
++
++/* MSI Message Data Register */
++#define PCIE_MD(X)                       (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x5C)
++
++#define PCIE_MD_DATA                      0x0000FFFF  /* Message Data */
++#define PCIE_MD_DATA_S                    0
++
++/* PCI Express Capability Register */
++#define PCIE_XCAP(X)                     (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x70)
++
++#define PCIE_XCAP_ID                      0x000000FF  /* PCI Express Capability ID */
++#define PCIE_XCAP_ID_S                    0
++#define PCIE_XCAP_NEXT_CAP                0x0000FF00  /* Next Capability Pointer */
++#define PCIE_XCAP_NEXT_CAP_S              8
++#define PCIE_XCAP_VER                     0x000F0000  /* PCI Express Capability Version */
++#define PCIE_XCAP_VER_S                   16
++#define PCIE_XCAP_DEV_PORT_TYPE           0x00F00000  /* Device Port Type */
++#define PCIE_XCAP_DEV_PORT_TYPE_S         20
++#define PCIE_XCAP_SLOT_IMPLEMENTED        0x01000000  /* Slot Implemented */
++#define PCIE_XCAP_MSG_INT_NUM             0x3E000000  /* Interrupt Message Number */
++#define PCIE_XCAP_MSG_INT_NUM_S           25
++
++/* Device Capability Register */
++#define PCIE_DCAP(X)                     (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x74)
++
++#define PCIE_DCAP_MAX_PAYLOAD_SIZE        0x00000007   /* Max Payload size */
++#define PCIE_DCAP_MAX_PAYLOAD_SIZE_S      0
++#define PCIE_DCAP_PHANTOM_FUNC            0x00000018   /* Phanton Function, not supported */
++#define PCIE_DCAP_PHANTOM_FUNC_S          3
++#define PCIE_DCAP_EXT_TAG                 0x00000020   /* Extended Tag Field */
++#define PCIE_DCAP_EP_L0S_LATENCY          0x000001C0   /* EP L0s latency only */
++#define PCIE_DCAP_EP_L0S_LATENCY_S        6
++#define PCIE_DCAP_EP_L1_LATENCY           0x00000E00   /* EP L1 latency only */
++#define PCIE_DCAP_EP_L1_LATENCY_S         9
++#define PCIE_DCAP_ROLE_BASE_ERR_REPORT    0x00008000   /* Role Based ERR */
++
++/* Maximum payload size supported */
++enum {
++    PCIE_MAX_PAYLOAD_128 = 0,
++    PCIE_MAX_PAYLOAD_256,
++    PCIE_MAX_PAYLOAD_512,
++    PCIE_MAX_PAYLOAD_1024,
++    PCIE_MAX_PAYLOAD_2048,
++    PCIE_MAX_PAYLOAD_4096,
++};
++
++/* Device Control and Status Register */
++#define PCIE_DCTLSTS(X)                       (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x78)
++
++#define PCIE_DCTLSTS_CORRECTABLE_ERR_EN        0x00000001   /* COR-ERR */
++#define PCIE_DCTLSTS_NONFATAL_ERR_EN           0x00000002   /* Non-fatal ERR */
++#define PCIE_DCTLSTS_FATAL_ERR_EN              0x00000004   /* Fatal ERR */
++#define PCIE_DCTLSYS_UR_REQ_EN                 0x00000008   /* UR ERR */
++#define PCIE_DCTLSTS_RELAXED_ORDERING_EN       0x00000010   /* Enable relaxing ordering */
++#define PCIE_DCTLSTS_MAX_PAYLOAD_SIZE          0x000000E0   /* Max payload mask */
++#define PCIE_DCTLSTS_MAX_PAYLOAD_SIZE_S        5
++#define PCIE_DCTLSTS_EXT_TAG_EN                0x00000100   /* Extended tag field */
++#define PCIE_DCTLSTS_PHANTOM_FUNC_EN           0x00000200   /* Phantom Function Enable */
++#define PCIE_DCTLSTS_AUX_PM_EN                 0x00000400   /* AUX Power PM Enable */
++#define PCIE_DCTLSTS_NO_SNOOP_EN               0x00000800   /* Enable no snoop, except root port*/
++#define PCIE_DCTLSTS_MAX_READ_SIZE             0x00007000   /* Max Read Request size*/
++#define PCIE_DCTLSTS_MAX_READ_SIZE_S           12
++#define PCIE_DCTLSTS_CORRECTABLE_ERR           0x00010000   /* COR-ERR Detected */
++#define PCIE_DCTLSTS_NONFATAL_ERR              0x00020000   /* Non-Fatal ERR Detected */
++#define PCIE_DCTLSTS_FATAL_ER                  0x00040000   /* Fatal ERR Detected */
++#define PCIE_DCTLSTS_UNSUPPORTED_REQ           0x00080000   /* UR Detected */
++#define PCIE_DCTLSTS_AUX_POWER                 0x00100000   /* Aux Power Detected */
++#define PCIE_DCTLSTS_TRANSACT_PENDING          0x00200000   /* Transaction pending */
++
++#define PCIE_DCTLSTS_ERR_EN      (PCIE_DCTLSTS_CORRECTABLE_ERR_EN | \
++                                  PCIE_DCTLSTS_NONFATAL_ERR_EN | PCIE_DCTLSTS_FATAL_ERR_EN | \
++                                  PCIE_DCTLSYS_UR_REQ_EN)
++
++/* Link Capability Register */
++#define PCIE_LCAP(X)                          (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x7C)
++#define PCIE_LCAP_MAX_LINK_SPEED               0x0000000F  /* Max link speed, 0x1 by default */
++#define PCIE_LCAP_MAX_LINK_SPEED_S             0
++#define PCIE_LCAP_MAX_LENGTH_WIDTH             0x000003F0  /* Maxium Length Width */
++#define PCIE_LCAP_MAX_LENGTH_WIDTH_S           4
++#define PCIE_LCAP_ASPM_LEVEL                   0x00000C00  /* Active State Link PM Support */
++#define PCIE_LCAP_ASPM_LEVEL_S                 10
++#define PCIE_LCAP_L0S_EIXT_LATENCY             0x00007000  /* L0s Exit Latency */
++#define PCIE_LCAP_L0S_EIXT_LATENCY_S           12
++#define PCIE_LCAP_L1_EXIT_LATENCY              0x00038000  /* L1 Exit Latency */
++#define PCIE_LCAP_L1_EXIT_LATENCY_S            15
++#define PCIE_LCAP_CLK_PM                       0x00040000  /* Clock Power Management */
++#define PCIE_LCAP_SDER                         0x00080000  /* Surprise Down Error Reporting */
++#define PCIE_LCAP_DLL_ACTIVE_REPROT            0x00100000  /* Data Link Layer Active Reporting Capable */
++#define PCIE_LCAP_PORT_NUM                     0xFF0000000  /* Port number */
++#define PCIE_LCAP_PORT_NUM_S                   24
++
++/* Maximum Length width definition */
++#define PCIE_MAX_LENGTH_WIDTH_RES  0x00
++#define PCIE_MAX_LENGTH_WIDTH_X1   0x01  /* Default */
++#define PCIE_MAX_LENGTH_WIDTH_X2   0x02
++#define PCIE_MAX_LENGTH_WIDTH_X4   0x04
++#define PCIE_MAX_LENGTH_WIDTH_X8   0x08
++#define PCIE_MAX_LENGTH_WIDTH_X12  0x0C
++#define PCIE_MAX_LENGTH_WIDTH_X16  0x10
++#define PCIE_MAX_LENGTH_WIDTH_X32  0x20
++
++/* Active State Link PM definition */
++enum {
++    PCIE_ASPM_RES0                = 0,
++    PCIE_ASPM_L0S_ENTRY_SUPPORT,        /* L0s */
++    PCIE_ASPM_RES1,
++    PCIE_ASPM_L0S_L1_ENTRY_SUPPORT,     /* L0s and L1, default */
++};
++
++/* L0s Exit Latency definition */
++enum {
++    PCIE_L0S_EIXT_LATENCY_L64NS    = 0, /* < 64 ns */
++    PCIE_L0S_EIXT_LATENCY_B64A128,      /* > 64 ns < 128 ns */
++    PCIE_L0S_EIXT_LATENCY_B128A256,     /* > 128 ns < 256 ns */
++    PCIE_L0S_EIXT_LATENCY_B256A512,     /* > 256 ns < 512 ns */
++    PCIE_L0S_EIXT_LATENCY_B512TO1U,     /* > 512 ns < 1 us */
++    PCIE_L0S_EIXT_LATENCY_B1A2U,        /* > 1 us < 2 us */
++    PCIE_L0S_EIXT_LATENCY_B2A4U,        /* > 2 us < 4 us */
++    PCIE_L0S_EIXT_LATENCY_M4US,         /* > 4 us  */
++};
++
++/* L1 Exit Latency definition */
++enum {
++    PCIE_L1_EXIT_LATENCY_L1US  = 0,  /* < 1 us */
++    PCIE_L1_EXIT_LATENCY_B1A2,       /* > 1 us < 2 us */
++    PCIE_L1_EXIT_LATENCY_B2A4,       /* > 2 us < 4 us */
++    PCIE_L1_EXIT_LATENCY_B4A8,       /* > 4 us < 8 us */
++    PCIE_L1_EXIT_LATENCY_B8A16,      /* > 8 us < 16 us */
++    PCIE_L1_EXIT_LATENCY_B16A32,     /* > 16 us < 32 us */
++    PCIE_L1_EXIT_LATENCY_B32A64,     /* > 32 us < 64 us */
++    PCIE_L1_EXIT_LATENCY_M64US,      /* > 64 us */
++};
++
++/* Link Control and Status Register */
++#define PCIE_LCTLSTS(X)                     (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x80)
++#define PCIE_LCTLSTS_ASPM_ENABLE            0x00000003  /* Active State Link PM Control */
++#define PCIE_LCTLSTS_ASPM_ENABLE_S          0
++#define PCIE_LCTLSTS_RCB128                 0x00000008  /* Read Completion Boundary 128*/
++#define PCIE_LCTLSTS_LINK_DISABLE           0x00000010  /* Link Disable */
++#define PCIE_LCTLSTS_RETRIAN_LINK           0x00000020  /* Retrain Link */
++#define PCIE_LCTLSTS_COM_CLK_CFG            0x00000040  /* Common Clock Configuration */
++#define PCIE_LCTLSTS_EXT_SYNC               0x00000080  /* Extended Synch */
++#define PCIE_LCTLSTS_CLK_PM_EN              0x00000100  /* Enable Clock Powerm Management */
++#define PCIE_LCTLSTS_LINK_SPEED             0x000F0000  /* Link Speed */
++#define PCIE_LCTLSTS_LINK_SPEED_S           16
++#define PCIE_LCTLSTS_NEGOTIATED_LINK_WIDTH  0x03F00000  /* Negotiated Link Width */
++#define PCIE_LCTLSTS_NEGOTIATED_LINK_WIDTH_S 20
++#define PCIE_LCTLSTS_RETRAIN_PENDING        0x08000000  /* Link training is ongoing */
++#define PCIE_LCTLSTS_SLOT_CLK_CFG           0x10000000  /* Slot Clock Configuration */
++#define PCIE_LCTLSTS_DLL_ACTIVE             0x20000000  /* Data Link Layer Active */
++
++/* Slot Capabilities Register */
++#define PCIE_SLCAP(X)                       (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x84)
++
++/* Slot Capabilities */
++#define PCIE_SLCTLSTS(X)                    (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x88)
++
++/* Root Control and Capability Register */
++#define PCIE_RCTLCAP(X)                     (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x8C)
++#define PCIE_RCTLCAP_SERR_ON_CORRECTABLE_ERR  0x00000001   /* #SERR on COR-ERR */
++#define PCIE_RCTLCAP_SERR_ON_NONFATAL_ERR     0x00000002   /* #SERR on Non-Fatal ERR */
++#define PCIE_RCTLCAP_SERR_ON_FATAL_ERR        0x00000004   /* #SERR on Fatal ERR */
++#define PCIE_RCTLCAP_PME_INT_EN               0x00000008   /* PME Interrupt Enable */
++#define PCIE_RCTLCAP_SERR_ENABLE    (PCIE_RCTLCAP_SERR_ON_CORRECTABLE_ERR | \
++                                     PCIE_RCTLCAP_SERR_ON_NONFATAL_ERR | PCIE_RCTLCAP_SERR_ON_FATAL_ERR)
++/* Root Status Register */
++#define PCIE_RSTS(X)                          (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x90)
++#define PCIE_RSTS_PME_REQ_ID                   0x0000FFFF   /* PME Request ID */
++#define PCIE_RSTS_PME_REQ_ID_S                 0
++#define PCIE_RSTS_PME_STATUS                   0x00010000   /* PME Status */
++#define PCIE_RSTS_PME_PENDING                  0x00020000   /* PME Pending */
++
++/* PCI Express Enhanced Capability Header */
++#define PCIE_ENHANCED_CAP(X)                (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x100)
++#define PCIE_ENHANCED_CAP_ID                 0x0000FFFF  /* PCI Express Extended Capability ID */
++#define PCIE_ENHANCED_CAP_ID_S               0
++#define PCIE_ENHANCED_CAP_VER                0x000F0000  /* Capability Version */
++#define PCIE_ENHANCED_CAP_VER_S              16
++#define PCIE_ENHANCED_CAP_NEXT_OFFSET        0xFFF00000  /* Next Capability Offset */
++#define PCIE_ENHANCED_CAP_NEXT_OFFSET_S      20
++
++/* Uncorrectable Error Status Register */
++#define PCIE_UES_R(X)                       (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x104)
++#define PCIE_DATA_LINK_PROTOCOL_ERR          0x00000010  /* Data Link Protocol Error Status */
++#define PCIE_SURPRISE_DOWN_ERROR             0x00000020  /* Surprise Down Error Status */
++#define PCIE_POISONED_TLP                    0x00001000  /* Poisoned TLP Status */
++#define PCIE_FC_PROTOCOL_ERR                 0x00002000  /* Flow Control Protocol Error Status */
++#define PCIE_COMPLETION_TIMEOUT              0x00004000  /* Completion Timeout Status */
++#define PCIE_COMPLETOR_ABORT                 0x00008000  /* Completer Abort Error */
++#define PCIE_UNEXPECTED_COMPLETION           0x00010000  /* Unexpected Completion Status */
++#define PCIE_RECEIVER_OVERFLOW               0x00020000  /* Receive Overflow Status */
++#define PCIE_MALFORNED_TLP                   0x00040000  /* Malformed TLP Stauts */
++#define PCIE_ECRC_ERR                        0x00080000  /* ECRC Error Stauts */
++#define PCIE_UR_REQ                          0x00100000  /* Unsupported Request Error Status */
++#define PCIE_ALL_UNCORRECTABLE_ERR    (PCIE_DATA_LINK_PROTOCOL_ERR | PCIE_SURPRISE_DOWN_ERROR | \
++                         PCIE_POISONED_TLP | PCIE_FC_PROTOCOL_ERR | PCIE_COMPLETION_TIMEOUT |   \
++                         PCIE_COMPLETOR_ABORT | PCIE_UNEXPECTED_COMPLETION | PCIE_RECEIVER_OVERFLOW |\
++                         PCIE_MALFORNED_TLP | PCIE_ECRC_ERR | PCIE_UR_REQ)
++
++/* Uncorrectable Error Mask Register, Mask means no report */
++#define PCIE_UEMR(X)                        (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x108)
++
++/* Uncorrectable Error Severity Register */
++#define PCIE_UESR(X)                        (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x10C)
++
++/* Correctable Error Status Register */
++#define PCIE_CESR(X)                        (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x110)
++#define PCIE_RX_ERR                          0x00000001  /* Receive Error Status */
++#define PCIE_BAD_TLP                         0x00000040  /* Bad TLP Status */
++#define PCIE_BAD_DLLP                        0x00000080  /* Bad DLLP Status */
++#define PCIE_REPLAY_NUM_ROLLOVER             0x00000100  /* Replay Number Rollover Status */
++#define PCIE_REPLAY_TIMER_TIMEOUT_ERR        0x00001000  /* Reply Timer Timeout Status */
++#define PCIE_ADVISORY_NONFTAL_ERR            0x00002000  /* Advisory Non-Fatal Error Status */
++#define PCIE_CORRECTABLE_ERR        (PCIE_RX_ERR | PCIE_BAD_TLP | PCIE_BAD_DLLP | PCIE_REPLAY_NUM_ROLLOVER |\
++                                     PCIE_REPLAY_TIMER_TIMEOUT_ERR | PCIE_ADVISORY_NONFTAL_ERR)
++
++/* Correctable Error Mask Register */
++#define PCIE_CEMR(X)                        (volatile u32*)(PCIE_RC_CFG_BASE + 0x114)
++
++/* Advanced Error Capabilities and Control Register */
++#define PCIE_AECCR(X)                       (volatile u32*)(PCIE_RC_CFG_BASE + 0x118)
++#define PCIE_AECCR_FIRST_ERR_PTR            0x0000001F  /* First Error Pointer */
++#define PCIE_AECCR_FIRST_ERR_PTR_S          0
++#define PCIE_AECCR_ECRC_GEN_CAP             0x00000020  /* ECRC Generation Capable */
++#define PCIE_AECCR_ECRC_GEN_EN              0x00000040  /* ECRC Generation Enable */
++#define PCIE_AECCR_ECRC_CHECK_CAP           0x00000080  /* ECRC Check Capable */
++#define PCIE_AECCR_ECRC_CHECK_EN            0x00000100  /* ECRC Check Enable */
++
++/* Header Log Register 1 */
++#define PCIE_HLR1(X)                        (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x11C)
++
++/* Header Log Register 2 */
++#define PCIE_HLR2(X)                        (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x120)
++
++/* Header Log Register 3 */
++#define PCIE_HLR3(X)                        (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x124)
++
++/* Header Log Register 4 */
++#define PCIE_HLR4(X)                        (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x128)
++
++/* Root Error Command Register */
++#define PCIE_RECR(X)                        (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x12C)
++#define PCIE_RECR_CORRECTABLE_ERR_REPORT_EN  0x00000001 /* COR-ERR */
++#define PCIE_RECR_NONFATAL_ERR_REPORT_EN     0x00000002 /* Non-Fatal ERR */
++#define PCIE_RECR_FATAL_ERR_REPORT_EN        0x00000004 /* Fatal ERR */
++#define PCIE_RECR_ERR_REPORT_EN  (PCIE_RECR_CORRECTABLE_ERR_REPORT_EN | \
++                PCIE_RECR_NONFATAL_ERR_REPORT_EN | PCIE_RECR_FATAL_ERR_REPORT_EN)
++
++/* Root Error Status Register */
++#define PCIE_RESR(X)                            (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x130)
++#define PCIE_RESR_CORRECTABLE_ERR                0x00000001   /* COR-ERR Receveid */
++#define PCIE_RESR_MULTI_CORRECTABLE_ERR          0x00000002   /* Multiple COR-ERR Received */
++#define PCIE_RESR_FATAL_NOFATAL_ERR              0x00000004   /* ERR Fatal/Non-Fatal Received */
++#define PCIE_RESR_MULTI_FATAL_NOFATAL_ERR        0x00000008   /* Multiple ERR Fatal/Non-Fatal Received */
++#define PCIE_RESR_FIRST_UNCORRECTABLE_FATAL_ERR  0x00000010   /* First UN-COR Fatal */
++#define PCIR_RESR_NON_FATAL_ERR                  0x00000020   /* Non-Fatal Error Message Received */
++#define PCIE_RESR_FATAL_ERR                      0x00000040   /* Fatal Message Received */
++#define PCIE_RESR_AER_INT_MSG_NUM                0xF8000000   /* Advanced Error Interrupt Message Number */
++#define PCIE_RESR_AER_INT_MSG_NUM_S              27
++
++/* Error Source Indentification Register */
++#define PCIE_ESIR(X)                            (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x134)
++#define PCIE_ESIR_CORRECTABLE_ERR_SRC_ID         0x0000FFFF
++#define PCIE_ESIR_CORRECTABLE_ERR_SRC_ID_S       0
++#define PCIE_ESIR_FATAL_NON_FATAL_SRC_ID         0xFFFF0000
++#define PCIE_ESIR_FATAL_NON_FATAL_SRC_ID_S       16
++
++/* VC Enhanced Capability Header */
++#define PCIE_VC_ECH(X)                          (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x140)
++
++/* Port VC Capability Register */
++#define PCIE_PVC1(X)                            (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x144)
++#define PCIE_PVC1_EXT_VC_CNT                    0x00000007  /* Extended VC Count */
++#define PCIE_PVC1_EXT_VC_CNT_S                  0
++#define PCIE_PVC1_LOW_PRI_EXT_VC_CNT            0x00000070  /* Low Priority Extended VC Count */
++#define PCIE_PVC1_LOW_PRI_EXT_VC_CNT_S          4
++#define PCIE_PVC1_REF_CLK                       0x00000300  /* Reference Clock */
++#define PCIE_PVC1_REF_CLK_S                     8
++#define PCIE_PVC1_PORT_ARB_TAB_ENTRY_SIZE       0x00000C00  /* Port Arbitration Table Entry Size */
++#define PCIE_PVC1_PORT_ARB_TAB_ENTRY_SIZE_S     10
++
++/* Extended Virtual Channel Count Defintion */
++#define PCIE_EXT_VC_CNT_MIN   0
++#define PCIE_EXT_VC_CNT_MAX   7
++
++/* Port Arbitration Table Entry Size Definition */
++enum {
++    PCIE_PORT_ARB_TAB_ENTRY_SIZE_S1BIT = 0,
++    PCIE_PORT_ARB_TAB_ENTRY_SIZE_S2BIT,
++    PCIE_PORT_ARB_TAB_ENTRY_SIZE_S4BIT,
++    PCIE_PORT_ARB_TAB_ENTRY_SIZE_S8BIT,
++};
++
++/* Port VC Capability Register 2 */
++#define PCIE_PVC2(X)                        (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x148)
++#define PCIE_PVC2_VC_ARB_16P_FIXED_WRR      0x00000001  /* HW Fixed arbitration, 16 phase WRR */
++#define PCIE_PVC2_VC_ARB_32P_WRR            0x00000002  /* 32 phase WRR */
++#define PCIE_PVC2_VC_ARB_64P_WRR            0x00000004  /* 64 phase WRR */
++#define PCIE_PVC2_VC_ARB_128P_WRR           0x00000008  /* 128 phase WRR */
++#define PCIE_PVC2_VC_ARB_WRR                0x0000000F
++#define PCIE_PVC2_VC_ARB_TAB_OFFSET         0xFF000000  /* VC arbitration table offset, not support */
++#define PCIE_PVC2_VC_ARB_TAB_OFFSET_S       24
++
++/* Port VC Control and Status Register */     
++#define PCIE_PVCCRSR(X)                     (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x14C)
++#define PCIE_PVCCRSR_LOAD_VC_ARB_TAB         0x00000001  /* Load VC Arbitration Table */
++#define PCIE_PVCCRSR_VC_ARB_SEL              0x0000000E  /* VC Arbitration Select */
++#define PCIE_PVCCRSR_VC_ARB_SEL_S            1
++#define PCIE_PVCCRSR_VC_ARB_TAB_STATUS       0x00010000  /* Arbitration Status */
++
++/* VC0 Resource Capability Register */
++#define PCIE_VC0_RC(X)                       (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x150)
++#define PCIE_VC0_RC_PORT_ARB_HW_FIXED        0x00000001  /* HW Fixed arbitration */
++#define PCIE_VC0_RC_PORT_ARB_32P_WRR         0x00000002  /* 32 phase WRR */
++#define PCIE_VC0_RC_PORT_ARB_64P_WRR         0x00000004  /* 64 phase WRR */
++#define PCIE_VC0_RC_PORT_ARB_128P_WRR        0x00000008  /* 128 phase WRR */
++#define PCIE_VC0_RC_PORT_ARB_TM_128P_WRR     0x00000010  /* Time-based 128 phase WRR */
++#define PCIE_VC0_RC_PORT_ARB_TM_256P_WRR     0x00000020  /* Time-based 256 phase WRR */
++#define PCIE_VC0_RC_PORT_ARB          (PCIE_VC0_RC_PORT_ARB_HW_FIXED | PCIE_VC0_RC_PORT_ARB_32P_WRR |\
++                        PCIE_VC0_RC_PORT_ARB_64P_WRR | PCIE_VC0_RC_PORT_ARB_128P_WRR | \
++                        PCIE_VC0_RC_PORT_ARB_TM_128P_WRR | PCIE_VC0_RC_PORT_ARB_TM_256P_WRR)
++
++#define PCIE_VC0_RC_REJECT_SNOOP             0x00008000  /* Reject Snoop Transactioin */
++#define PCIE_VC0_RC_MAX_TIMESLOTS            0x007F0000  /* Maximum time Slots */
++#define PCIE_VC0_RC_MAX_TIMESLOTS_S          16
++#define PCIE_VC0_RC_PORT_ARB_TAB_OFFSET      0xFF000000  /* Port Arbitration Table Offset */
++#define PCIE_VC0_RC_PORT_ARB_TAB_OFFSET_S    24
++
++/* VC0 Resource Control Register */
++#define PCIE_VC0_RC0(X)                      (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x154)
++#define PCIE_VC0_RC0_TVM0                    0x00000001  /* TC0 and VC0 */
++#define PCIE_VC0_RC0_TVM1                    0x00000002  /* TC1 and VC1 */
++#define PCIE_VC0_RC0_TVM2                    0x00000004  /* TC2 and VC2 */
++#define PCIE_VC0_RC0_TVM3                    0x00000008  /* TC3 and VC3 */
++#define PCIE_VC0_RC0_TVM4                    0x00000010  /* TC4 and VC4 */
++#define PCIE_VC0_RC0_TVM5                    0x00000020  /* TC5 and VC5 */
++#define PCIE_VC0_RC0_TVM6                    0x00000040  /* TC6 and VC6 */
++#define PCIE_VC0_RC0_TVM7                    0x00000080  /* TC7 and VC7 */
++#define PCIE_VC0_RC0_TC_VC                   0x000000FF  /* TC/VC mask */
++
++#define PCIE_VC0_RC0_LOAD_PORT_ARB_TAB       0x00010000  /* Load Port Arbitration Table */
++#define PCIE_VC0_RC0_PORT_ARB_SEL            0x000E0000  /* Port Arbitration Select */
++#define PCIE_VC0_RC0_PORT_ARB_SEL_S          17
++#define PCIE_VC0_RC0_VC_ID                   0x07000000  /* VC ID */
++#define PCIE_VC0_RC0_VC_ID_S                 24
++#define PCIE_VC0_RC0_VC_EN                   0x80000000  /* VC Enable */
++
++/* VC0 Resource Status Register */
++#define PCIE_VC0_RSR0(X)                     (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x158)
++#define PCIE_VC0_RSR0_PORT_ARB_TAB_STATUS    0x00010000  /* Port Arbitration Table Status,not used */
++#define PCIE_VC0_RSR0_VC_NEG_PENDING         0x00020000  /* VC Negotiation Pending */
++
++/* Ack Latency Timer and Replay Timer Register */
++#define PCIE_ALTRT(X)                         (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x700)
++#define PCIE_ALTRT_ROUND_TRIP_LATENCY_LIMIT   0x0000FFFF  /* Round Trip Latency Time Limit */
++#define PCIE_ALTRT_ROUND_TRIP_LATENCY_LIMIT_S 0
++#define PCIE_ALTRT_REPLAY_TIME_LIMIT          0xFFFF0000  /* Replay Time Limit */
++#define PCIE_ALTRT_REPLAY_TIME_LIMIT_S        16
++
++/* Other Message Register */
++#define PCIE_OMR(X)                          (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x704)
++
++/* Port Force Link Register */
++#define PCIE_PFLR(X)                         (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x708)
++#define PCIE_PFLR_LINK_NUM                   0x000000FF  /* Link Number */
++#define PCIE_PFLR_LINK_NUM_S                 0
++#define PCIE_PFLR_FORCE_LINK                 0x00008000  /* Force link */
++#define PCIE_PFLR_LINK_STATE                 0x003F0000  /* Link State */
++#define PCIE_PFLR_LINK_STATE_S               16
++#define PCIE_PFLR_LOW_POWER_ENTRY_CNT        0xFF000000  /* Low Power Entrance Count, only for EP */
++#define PCIE_PFLR_LOW_POWER_ENTRY_CNT_S      24
++
++/* Ack Frequency Register */
++#define PCIE_AFR(X)                          (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x70C)
++#define PCIE_AFR_AF                          0x000000FF  /* Ack Frequency */
++#define PCIE_AFR_AF_S                        0
++#define PCIE_AFR_FTS_NUM                     0x0000FF00  /* The number of Fast Training Sequence from L0S to L0 */
++#define PCIE_AFR_FTS_NUM_S                   8
++#define PCIE_AFR_COM_FTS_NUM                 0x00FF0000  /* N_FTS; when common clock is used*/
++#define PCIE_AFR_COM_FTS_NUM_S               16
++#define PCIE_AFR_L0S_ENTRY_LATENCY           0x07000000  /* L0s Entrance Latency */
++#define PCIE_AFR_L0S_ENTRY_LATENCY_S         24
++#define PCIE_AFR_L1_ENTRY_LATENCY            0x38000000  /* L1 Entrance Latency */
++#define PCIE_AFR_L1_ENTRY_LATENCY_S          27
++#define PCIE_AFR_FTS_NUM_DEFAULT             32
++#define PCIE_AFR_L0S_ENTRY_LATENCY_DEFAULT   7
++#define PCIE_AFR_L1_ENTRY_LATENCY_DEFAULT    5
++
++/* Port Link Control Register */
++#define PCIE_PLCR(X)                         (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x710)
++#define PCIE_PLCR_OTHER_MSG_REQ              0x00000001  /* Other Message Request */
++#define PCIE_PLCR_SCRAMBLE_DISABLE           0x00000002  /* Scramble Disable */  
++#define PCIE_PLCR_LOOPBACK_EN                0x00000004  /* Loopback Enable */
++#define PCIE_PLCR_LTSSM_HOT_RST              0x00000008  /* Force LTSSM to the hot reset */
++#define PCIE_PLCR_DLL_LINK_EN                0x00000020  /* Enable Link initialization */
++#define PCIE_PLCR_FAST_LINK_SIM_EN           0x00000080  /* Sets all internal timers to fast mode for simulation purposes */
++#define PCIE_PLCR_LINK_MODE                  0x003F0000  /* Link Mode Enable Mask */
++#define PCIE_PLCR_LINK_MODE_S                16
++#define PCIE_PLCR_CORRUPTED_CRC_EN           0x02000000  /* Enabled Corrupt CRC */
++
++/* Lane Skew Register */
++#define PCIE_LSR(X)                          (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x714)
++#define PCIE_LSR_LANE_SKEW_NUM               0x00FFFFFF  /* Insert Lane Skew for Transmit, not applicable */
++#define PCIE_LSR_LANE_SKEW_NUM_S             0
++#define PCIE_LSR_FC_DISABLE                  0x01000000  /* Disable of Flow Control */
++#define PCIE_LSR_ACKNAK_DISABLE              0x02000000  /* Disable of Ack/Nak */
++#define PCIE_LSR_LANE_DESKEW_DISABLE         0x80000000  /* Disable of Lane-to-Lane Skew */
++
++/* Symbol Number Register */
++#define PCIE_SNR(X)                          (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x718)
++#define PCIE_SNR_TS                          0x0000000F  /* Number of TS Symbol */
++#define PCIE_SNR_TS_S                        0
++#define PCIE_SNR_SKP                         0x00000700  /* Number of SKP Symbol */
++#define PCIE_SNR_SKP_S                       8
++#define PCIE_SNR_REPLAY_TIMER                0x0007C000  /* Timer Modifier for Replay Timer */
++#define PCIE_SNR_REPLAY_TIMER_S              14
++#define PCIE_SNR_ACKNAK_LATENCY_TIMER        0x00F80000  /* Timer Modifier for Ack/Nak Latency Timer */
++#define PCIE_SNR_ACKNAK_LATENCY_TIMER_S      19
++#define PCIE_SNR_FC_TIMER                    0x1F000000  /* Timer Modifier for Flow Control Watchdog Timer */
++#define PCIE_SNR_FC_TIMER_S                  28
++
++/* Symbol Timer Register and Filter Mask Register 1 */
++#define PCIE_STRFMR(X)                      (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x71C)
++#define PCIE_STRFMR_SKP_INTERVAL            0x000007FF  /* SKP lnterval Value */
++#define PCIE_STRFMR_SKP_INTERVAL_S          0
++#define PCIE_STRFMR_FC_WDT_DISABLE          0x00008000  /* Disable of FC Watchdog Timer */
++#define PCIE_STRFMR_TLP_FUNC_MISMATCH_OK    0x00010000  /* Mask Function Mismatch Filtering for Incoming Requests */
++#define PCIE_STRFMR_POISONED_TLP_OK         0x00020000  /* Mask Poisoned TLP Filtering */
++#define PCIE_STRFMR_BAR_MATCH_OK            0x00040000  /* Mask BAR Match Filtering */
++#define PCIE_STRFMR_TYPE1_CFG_REQ_OK        0x00080000  /* Mask Type 1 Configuration Request Filtering */
++#define PCIE_STRFMR_LOCKED_REQ_OK           0x00100000  /* Mask Locked Request Filtering */
++#define PCIE_STRFMR_CPL_TAG_ERR_RULES_OK    0x00200000  /* Mask Tag Error Rules for Received Completions */
++#define PCIE_STRFMR_CPL_REQUESTOR_ID_MISMATCH_OK 0x00400000  /* Mask Requester ID Mismatch Error for Received Completions */
++#define PCIE_STRFMR_CPL_FUNC_MISMATCH_OK         0x00800000  /* Mask Function Mismatch Error for Received Completions */
++#define PCIE_STRFMR_CPL_TC_MISMATCH_OK           0x01000000  /* Mask Traffic Class Mismatch Error for Received Completions */
++#define PCIE_STRFMR_CPL_ATTR_MISMATCH_OK         0x02000000  /* Mask Attribute Mismatch Error for Received Completions */
++#define PCIE_STRFMR_CPL_LENGTH_MISMATCH_OK       0x04000000  /* Mask Length Mismatch Error for Received Completions */
++#define PCIE_STRFMR_TLP_ECRC_ERR_OK              0x08000000  /* Mask ECRC Error Filtering */
++#define PCIE_STRFMR_CPL_TLP_ECRC_OK              0x10000000  /* Mask ECRC Error Filtering for Completions */
++#define PCIE_STRFMR_RX_TLP_MSG_NO_DROP           0x20000000  /* Send Message TLPs */
++#define PCIE_STRFMR_RX_IO_TRANS_ENABLE           0x40000000  /* Mask Filtering of received I/O Requests */
++#define PCIE_STRFMR_RX_CFG_TRANS_ENABLE          0x80000000  /* Mask Filtering of Received Configuration Requests */
++
++#define PCIE_DEF_SKP_INTERVAL    700             /* 1180 ~1538 , 125MHz * 2, 250MHz * 1 */
++
++/* Filter Masker Register 2 */
++#define PCIE_FMR2(X)                             (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x720)
++#define PCIE_FMR2_VENDOR_MSG0_PASSED_TO_TRGT1    0x00000001  /* Mask RADM Filtering and Error Handling Rules */
++#define PCIE_FMR2_VENDOR_MSG1_PASSED_TO_TRGT1    0x00000002  /* Mask RADM Filtering and Error Handling Rules */
++
++/* Debug Register 0 */
++#define PCIE_DBR0(X)                              (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x728)
++
++/* Debug Register 1 */
++#define PCIE_DBR1(X)                              (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x72C)
++
++/* Transmit Posted FC Credit Status Register */
++#define PCIE_TPFCS(X)                             (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x730)
++#define PCIE_TPFCS_TX_P_DATA_FC_CREDITS           0x00000FFF /* Transmit Posted Data FC Credits */
++#define PCIE_TPFCS_TX_P_DATA_FC_CREDITS_S         0
++#define PCIE_TPFCS_TX_P_HDR_FC_CREDITS            0x000FF000 /* Transmit Posted Header FC Credits */
++#define PCIE_TPFCS_TX_P_HDR_FC_CREDITS_S          12
++
++/* Transmit Non-Posted FC Credit Status */
++#define PCIE_TNPFCS(X)                            (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x734)
++#define PCIE_TNPFCS_TX_NP_DATA_FC_CREDITS         0x00000FFF /* Transmit Non-Posted Data FC Credits */
++#define PCIE_TNPFCS_TX_NP_DATA_FC_CREDITS_S       0
++#define PCIE_TNPFCS_TX_NP_HDR_FC_CREDITS          0x000FF000 /* Transmit Non-Posted Header FC Credits */
++#define PCIE_TNPFCS_TX_NP_HDR_FC_CREDITS_S        12
++
++/* Transmit Complete FC Credit Status Register */
++#define PCIE_TCFCS(X)                             (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x738)
++#define PCIE_TCFCS_TX_CPL_DATA_FC_CREDITS         0x00000FFF /* Transmit Completion Data FC Credits */
++#define PCIE_TCFCS_TX_CPL_DATA_FC_CREDITS_S       0
++#define PCIE_TCFCS_TX_CPL_HDR_FC_CREDITS          0x000FF000 /* Transmit Completion Header FC Credits */
++#define PCIE_TCFCS_TX_CPL_HDR_FC_CREDITS_S        12
++
++/* Queue Status Register */
++#define PCIE_QSR(X)                              (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x73C)
++#define PCIE_QSR_WAIT_UPDATE_FC_DLL               0x00000001 /* Received TLP FC Credits Not Returned */
++#define PCIE_QSR_TX_RETRY_BUF_NOT_EMPTY           0x00000002 /* Transmit Retry Buffer Not Empty */
++#define PCIE_QSR_RX_QUEUE_NOT_EMPTY               0x00000004 /* Received Queue Not Empty */
++
++/* VC Transmit Arbitration Register 1 */
++#define PCIE_VCTAR1(X)                          (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x740)
++#define PCIE_VCTAR1_WRR_WEIGHT_VC0               0x000000FF /* WRR Weight for VC0 */
++#define PCIE_VCTAR1_WRR_WEIGHT_VC1               0x0000FF00 /* WRR Weight for VC1 */
++#define PCIE_VCTAR1_WRR_WEIGHT_VC2               0x00FF0000 /* WRR Weight for VC2 */
++#define PCIE_VCTAR1_WRR_WEIGHT_VC3               0xFF000000 /* WRR Weight for VC3 */
++
++/* VC Transmit Arbitration Register 2 */
++#define PCIE_VCTAR2(X)                          (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x744)
++#define PCIE_VCTAR2_WRR_WEIGHT_VC4               0x000000FF /* WRR Weight for VC4 */
++#define PCIE_VCTAR2_WRR_WEIGHT_VC5               0x0000FF00 /* WRR Weight for VC5 */
++#define PCIE_VCTAR2_WRR_WEIGHT_VC6               0x00FF0000 /* WRR Weight for VC6 */
++#define PCIE_VCTAR2_WRR_WEIGHT_VC7               0xFF000000 /* WRR Weight for VC7 */
++
++/* VC0 Posted Receive Queue Control Register */
++#define PCIE_VC0_PRQCR(X)                       (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x748)
++#define PCIE_VC0_PRQCR_P_DATA_CREDITS            0x00000FFF /* VC0 Posted Data Credits */
++#define PCIE_VC0_PRQCR_P_DATA_CREDITS_S          0
++#define PCIE_VC0_PRQCR_P_HDR_CREDITS             0x000FF000 /* VC0 Posted Header Credits */
++#define PCIE_VC0_PRQCR_P_HDR_CREDITS_S           12
++#define PCIE_VC0_PRQCR_P_TLP_QUEUE_MODE          0x00E00000 /* VC0 Posted TLP Queue Mode */
++#define PCIE_VC0_PRQCR_P_TLP_QUEUE_MODE_S        20
++#define PCIE_VC0_PRQCR_TLP_RELAX_ORDER           0x40000000 /* TLP Type Ordering for VC0 */    
++#define PCIE_VC0_PRQCR_VC_STRICT_ORDER           0x80000000 /* VC0 Ordering for Receive Queues */
++
++/* VC0 Non-Posted Receive Queue Control */
++#define PCIE_VC0_NPRQCR(X)                      (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x74C)
++#define PCIE_VC0_NPRQCR_NP_DATA_CREDITS          0x00000FFF /* VC0 Non-Posted Data Credits */
++#define PCIE_VC0_NPRQCR_NP_DATA_CREDITS_S        0
++#define PCIE_VC0_NPRQCR_NP_HDR_CREDITS           0x000FF000 /* VC0 Non-Posted Header Credits */
++#define PCIE_VC0_NPRQCR_NP_HDR_CREDITS_S         12
++#define PCIE_VC0_NPRQCR_NP_TLP_QUEUE_MODE        0x00E00000 /* VC0 Non-Posted TLP Queue Mode */
++#define PCIE_VC0_NPRQCR_NP_TLP_QUEUE_MODE_S      20
++
++/* VC0 Completion Receive Queue Control */
++#define PCIE_VC0_CRQCR(X)                       (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x750)
++#define PCIE_VC0_CRQCR_CPL_DATA_CREDITS          0x00000FFF /* VC0 Completion TLP Queue Mode */
++#define PCIE_VC0_CRQCR_CPL_DATA_CREDITS_S        0
++#define PCIE_VC0_CRQCR_CPL_HDR_CREDITS           0x000FF000 /* VC0 Completion Header Credits */
++#define PCIE_VC0_CRQCR_CPL_HDR_CREDITS_S         12
++#define PCIE_VC0_CRQCR_CPL_TLP_QUEUE_MODE        0x00E00000 /* VC0 Completion Data Credits */
++#define PCIE_VC0_CRQCR_CPL_TLP_QUEUE_MODE_S      21
++
++/* Applicable to the above three registers */
++enum {
++    PCIE_VC0_TLP_QUEUE_MODE_STORE_FORWARD = 1,
++    PCIE_VC0_TLP_QUEUE_MODE_CUT_THROUGH   = 2,
++    PCIE_VC0_TLP_QUEUE_MODE_BYPASS        = 4,
++};
++
++/* VC0 Posted Buffer Depth Register */
++#define PCIE_VC0_PBD(X)                        (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x7A8)
++#define PCIE_VC0_PBD_P_DATA_QUEUE_ENTRIES       0x00003FFF /* VC0 Posted Data Queue Depth */
++#define PCIE_VC0_PBD_P_DATA_QUEUE_ENTRIES_S     0
++#define PCIE_VC0_PBD_P_HDR_QUEUE_ENTRIES        0x03FF0000 /* VC0 Posted Header Queue Depth */
++#define PCIE_VC0_PBD_P_HDR_QUEUE_ENTRIES_S      16
++
++/* VC0 Non-Posted Buffer Depth Register */
++#define PCIE_VC0_NPBD(X)                       (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x7AC)
++#define PCIE_VC0_NPBD_NP_DATA_QUEUE_ENTRIES     0x00003FFF /* VC0 Non-Posted Data Queue Depth */
++#define PCIE_VC0_NPBD_NP_DATA_QUEUE_ENTRIES_S   0
++#define PCIE_VC0_NPBD_NP_HDR_QUEUE_ENTRIES      0x03FF0000 /* VC0 Non-Posted Header Queue Depth */
++#define PCIE_VC0_NPBD_NP_HDR_QUEUE_ENTRIES_S    16
++
++/* VC0 Completion Buffer Depth Register */
++#define PCIE_VC0_CBD(X)                        (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x7B0)
++#define PCIE_VC0_CBD_CPL_DATA_QUEUE_ENTRIES     0x00003FFF /* C0 Completion Data Queue Depth */
++#define PCIE_VC0_CBD_CPL_DATA_QUEUE_ENTRIES_S   0
++#define PCIE_VC0_CBD_CPL_HDR_QUEUE_ENTRIES      0x03FF0000 /* VC0 Completion Header Queue Depth */
++#define PCIE_VC0_CBD_CPL_HDR_QUEUE_ENTRIES_S    16
++
++/* PHY Status Register, all zeros in VR9 */
++#define PCIE_PHYSR(X)                           (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x810)
++
++/* PHY Control Register, all zeros in VR9 */
++#define PCIE_PHYCR(X)                           (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x814)
++
++/* 
++ * PCIe PDI PHY register definition, suppose all the following 
++ * stuff is confidential. 
++ * XXX, detailed bit definition
++ */
++#define	PCIE_PHY_PLL_CTRL1(X)       (PCIE_PHY_PORT_TO_BASE(X) + (0x22 << 1))
++#define	PCIE_PHY_PLL_CTRL2(X)       (PCIE_PHY_PORT_TO_BASE(X) + (0x23 << 1))
++#define	PCIE_PHY_PLL_CTRL3(X)       (PCIE_PHY_PORT_TO_BASE(X) + (0x24 << 1))
++#define	PCIE_PHY_PLL_CTRL4(X)       (PCIE_PHY_PORT_TO_BASE(X) + (0x25 << 1))
++#define	PCIE_PHY_PLL_CTRL5(X)       (PCIE_PHY_PORT_TO_BASE(X) + (0x26 << 1))
++#define	PCIE_PHY_PLL_CTRL6(X)       (PCIE_PHY_PORT_TO_BASE(X) + (0x27 << 1))
++#define	PCIE_PHY_PLL_CTRL7(X)       (PCIE_PHY_PORT_TO_BASE(X) + (0x28 << 1))
++#define	PCIE_PHY_PLL_A_CTRL1(X)     (PCIE_PHY_PORT_TO_BASE(X) + (0x29 << 1))
++#define	PCIE_PHY_PLL_A_CTRL2(X)     (PCIE_PHY_PORT_TO_BASE(X) + (0x2A << 1))
++#define	PCIE_PHY_PLL_A_CTRL3(X)     (PCIE_PHY_PORT_TO_BASE(X) + (0x2B << 1))
++#define	PCIE_PHY_PLL_STATUS(X)      (PCIE_PHY_PORT_TO_BASE(X) + (0x2C << 1))
++ 
++#define PCIE_PHY_TX1_CTRL1(X)       (PCIE_PHY_PORT_TO_BASE(X) + (0x30 << 1))
++#define PCIE_PHY_TX1_CTRL2(X)       (PCIE_PHY_PORT_TO_BASE(X) + (0x31 << 1))
++#define PCIE_PHY_TX1_CTRL3(X)       (PCIE_PHY_PORT_TO_BASE(X) + (0x32 << 1))
++#define PCIE_PHY_TX1_A_CTRL1(X)     (PCIE_PHY_PORT_TO_BASE(X) + (0x33 << 1))
++#define PCIE_PHY_TX1_A_CTRL2(X)     (PCIE_PHY_PORT_TO_BASE(X) + (0x34 << 1))
++#define PCIE_PHY_TX1_MOD1(X)        (PCIE_PHY_PORT_TO_BASE(X) + (0x35 << 1))
++#define PCIE_PHY_TX1_MOD2(X)        (PCIE_PHY_PORT_TO_BASE(X) + (0x36 << 1))
++#define PCIE_PHY_TX1_MOD3(X)        (PCIE_PHY_PORT_TO_BASE(X) + (0x37 << 1))
++
++#define PCIE_PHY_TX2_CTRL1(X)       (PCIE_PHY_PORT_TO_BASE(X) + (0x38 << 1))
++#define PCIE_PHY_TX2_CTRL2(X)       (PCIE_PHY_PORT_TO_BASE(X) + (0x39 << 1))
++#define PCIE_PHY_TX2_A_CTRL1(X)     (PCIE_PHY_PORT_TO_BASE(X) + (0x3B << 1))
++#define PCIE_PHY_TX2_A_CTRL2(X)     (PCIE_PHY_PORT_TO_BASE(X) + (0x3C << 1))
++#define PCIE_PHY_TX2_MOD1(X)        (PCIE_PHY_PORT_TO_BASE(X) + (0x3D << 1))
++#define PCIE_PHY_TX2_MOD2(X)        (PCIE_PHY_PORT_TO_BASE(X) + (0x3E << 1))
++#define PCIE_PHY_TX2_MOD3(X)        (PCIE_PHY_PORT_TO_BASE(X) + (0x3F << 1))
++
++#define PCIE_PHY_RX1_CTRL1(X)       (PCIE_PHY_PORT_TO_BASE(X) + (0x50 << 1))
++#define PCIE_PHY_RX1_CTRL2(X)       (PCIE_PHY_PORT_TO_BASE(X) + (0x51 << 1))
++#define PCIE_PHY_RX1_CDR(X)         (PCIE_PHY_PORT_TO_BASE(X) + (0x52 << 1))
++#define PCIE_PHY_RX1_EI(X)          (PCIE_PHY_PORT_TO_BASE(X) + (0x53 << 1))
++#define PCIE_PHY_RX1_A_CTRL(X)      (PCIE_PHY_PORT_TO_BASE(X) + (0x55 << 1))
++
++/* Interrupt related stuff */
++#define PCIE_LEGACY_DISABLE 0
++#define PCIE_LEGACY_INTA  1
++#define PCIE_LEGACY_INTB  2
++#define PCIE_LEGACY_INTC  3
++#define PCIE_LEGACY_INTD  4
++#define PCIE_LEGACY_INT_MAX PCIE_LEGACY_INTD
++
++#endif /* IFXMIPS_PCIE_REG_H */
++
+--- /dev/null
++++ b/arch/mips/pci/ifxmips_pcie_vr9.h
+@@ -0,0 +1,269 @@
++/****************************************************************************
++                              Copyright (c) 2010
++                            Lantiq Deutschland GmbH
++                     Am Campeon 3; 85579 Neubiberg, Germany
++
++  For licensing information, see the file 'LICENSE' in the root folder of
++  this software module.
++
++ *****************************************************************************/
++/*!
++  \file ifxmips_pcie_vr9.h
++  \ingroup IFX_PCIE
++  \brief PCIe RC driver vr9 specific file
++*/
++
++#ifndef IFXMIPS_PCIE_VR9_H
++#define IFXMIPS_PCIE_VR9_H
++
++#include <linux/types.h>
++#include <linux/delay.h>
++
++#include <linux/gpio.h>
++#include <lantiq_soc.h>
++
++#define IFX_PCIE_GPIO_RESET  494
++
++#define IFX_REG_R32    ltq_r32
++#define IFX_REG_W32    ltq_w32
++#define CONFIG_IFX_PCIE_HW_SWAP
++#define IFX_RCU_AHB_ENDIAN                      ((volatile u32*)(IFX_RCU + 0x004C))
++#define IFX_RCU_RST_REQ                         ((volatile u32*)(IFX_RCU + 0x0010))
++#define IFX_RCU_AHB_BE_PCIE_PDI                  0x00000080  /* Configure PCIE PDI module in big endian*/
++
++#define IFX_RCU                                 (KSEG1 | 0x1F203000)
++#define IFX_RCU_AHB_BE_PCIE_M                    0x00000001  /* Configure AHB master port that connects to PCIe RC in big endian */
++#define IFX_RCU_AHB_BE_PCIE_S                    0x00000010  /* Configure AHB slave port that connects to PCIe RC in little endian */
++#define IFX_RCU_AHB_BE_XBAR_M                    0x00000002  /* Configure AHB master port that connects to XBAR in big endian */
++#define CONFIG_IFX_PCIE_PHY_36MHZ_MODE
++
++#define IFX_PMU1_MODULE_PCIE_PHY   (0)
++#define IFX_PMU1_MODULE_PCIE_CTRL  (1)
++#define IFX_PMU1_MODULE_PDI        (4)
++#define IFX_PMU1_MODULE_MSI        (5)
++
++#define IFX_PMU_MODULE_PCIE_L0_CLK (31)
++
++
++#define IFX_GPIO				(KSEG1 | 0x1E100B00)
++#define ALT0			((volatile u32*)(IFX_GPIO + 0x007c))
++#define ALT1			((volatile u32*)(IFX_GPIO + 0x0080))
++#define OD			((volatile u32*)(IFX_GPIO + 0x0084))
++#define DIR			((volatile u32*)(IFX_GPIO + 0x0078))
++#define OUT			((volatile u32*)(IFX_GPIO + 0x0070))
++
++
++static inline void pcie_ep_gpio_rst_init(int pcie_port)
++{
++
++	gpio_request(IFX_PCIE_GPIO_RESET, "pcie-reset");
++	gpio_direction_output(IFX_PCIE_GPIO_RESET, 1);
++	gpio_set_value(IFX_PCIE_GPIO_RESET, 1);
++
++/*    ifx_gpio_pin_reserve(IFX_PCIE_GPIO_RESET, ifx_pcie_gpio_module_id);
++    ifx_gpio_output_set(IFX_PCIE_GPIO_RESET, ifx_pcie_gpio_module_id);
++    ifx_gpio_dir_out_set(IFX_PCIE_GPIO_RESET, ifx_pcie_gpio_module_id);
++    ifx_gpio_altsel0_clear(IFX_PCIE_GPIO_RESET, ifx_pcie_gpio_module_id);
++    ifx_gpio_altsel1_clear(IFX_PCIE_GPIO_RESET, ifx_pcie_gpio_module_id);
++    ifx_gpio_open_drain_set(IFX_PCIE_GPIO_RESET, ifx_pcie_gpio_module_id);*/
++}
++
++static inline void pcie_ahb_pmu_setup(void) 
++{
++	/* Enable AHB bus master/slave */
++	struct clk *clk;
++	clk = clk_get_sys("1d900000.pcie", "ahb");
++	clk_enable(clk);
++
++    //AHBM_PMU_SETUP(IFX_PMU_ENABLE);
++    //AHBS_PMU_SETUP(IFX_PMU_ENABLE);
++}
++
++static inline void pcie_rcu_endian_setup(int pcie_port)
++{
++    u32 reg;
++
++    reg = IFX_REG_R32(IFX_RCU_AHB_ENDIAN);
++#ifdef CONFIG_IFX_PCIE_HW_SWAP
++    reg |= IFX_RCU_AHB_BE_PCIE_M;
++    reg |= IFX_RCU_AHB_BE_PCIE_S;
++    reg &= ~IFX_RCU_AHB_BE_XBAR_M;
++#else 
++    reg |= IFX_RCU_AHB_BE_PCIE_M;
++    reg &= ~IFX_RCU_AHB_BE_PCIE_S;
++    reg &= ~IFX_RCU_AHB_BE_XBAR_M;
++#endif /* CONFIG_IFX_PCIE_HW_SWAP */
++    IFX_REG_W32(reg, IFX_RCU_AHB_ENDIAN);
++    IFX_PCIE_PRINT(PCIE_MSG_REG, "%s IFX_RCU_AHB_ENDIAN: 0x%08x\n", __func__, IFX_REG_R32(IFX_RCU_AHB_ENDIAN));
++}
++
++static inline void pcie_phy_pmu_enable(int pcie_port)
++{
++	struct clk *clk;
++	clk = clk_get_sys("1d900000.pcie", "phy");
++	clk_enable(clk);
++
++	//PCIE_PHY_PMU_SETUP(IFX_PMU_ENABLE);
++}
++
++static inline void pcie_phy_pmu_disable(int pcie_port)
++{
++	struct clk *clk;
++	clk = clk_get_sys("1d900000.pcie", "phy");
++	clk_disable(clk);
++
++//    PCIE_PHY_PMU_SETUP(IFX_PMU_DISABLE);
++}
++
++static inline void pcie_pdi_big_endian(int pcie_port)
++{
++    u32 reg;
++
++    /* SRAM2PDI endianness control. */
++    reg = IFX_REG_R32(IFX_RCU_AHB_ENDIAN);
++    /* Config AHB->PCIe and PDI endianness */
++    reg |= IFX_RCU_AHB_BE_PCIE_PDI;
++    IFX_REG_W32(reg, IFX_RCU_AHB_ENDIAN);
++}
++
++static inline void pcie_pdi_pmu_enable(int pcie_port)
++{
++    /* Enable PDI to access PCIe PHY register */
++	struct clk *clk;
++	clk = clk_get_sys("1d900000.pcie", "pdi");
++	clk_enable(clk);
++    //PDI_PMU_SETUP(IFX_PMU_ENABLE);
++}
++
++static inline void pcie_core_rst_assert(int pcie_port)
++{
++    u32 reg;
++
++    reg = IFX_REG_R32(IFX_RCU_RST_REQ);
++
++    /* Reset PCIe PHY & Core, bit 22, bit 26 may be affected if write it directly  */
++    reg |= 0x00400000;
++    IFX_REG_W32(reg, IFX_RCU_RST_REQ);
++}
++
++static inline void pcie_core_rst_deassert(int pcie_port)
++{
++    u32 reg;
++
++    /* Make sure one micro-second delay */
++    udelay(1);
++
++    /* Reset PCIe PHY & Core, bit 22 */
++    reg = IFX_REG_R32(IFX_RCU_RST_REQ);
++    reg &= ~0x00400000;
++    IFX_REG_W32(reg, IFX_RCU_RST_REQ);
++}
++
++static inline void pcie_phy_rst_assert(int pcie_port)
++{
++    u32 reg;
++
++    reg = IFX_REG_R32(IFX_RCU_RST_REQ);
++    reg |= 0x00001000; /* Bit 12 */
++    IFX_REG_W32(reg, IFX_RCU_RST_REQ);
++}
++
++static inline void pcie_phy_rst_deassert(int pcie_port)
++{
++    u32 reg;
++
++    /* Make sure one micro-second delay */
++    udelay(1);
++
++    reg = IFX_REG_R32(IFX_RCU_RST_REQ);
++    reg &= ~0x00001000; /* Bit 12 */
++    IFX_REG_W32(reg, IFX_RCU_RST_REQ);
++}
++
++static inline void pcie_device_rst_assert(int pcie_port)
++{
++	gpio_set_value(IFX_PCIE_GPIO_RESET, 0);
++//    ifx_gpio_output_clear(IFX_PCIE_GPIO_RESET, ifx_pcie_gpio_module_id);
++}
++
++static inline void pcie_device_rst_deassert(int pcie_port)
++{
++    mdelay(100);
++	gpio_direction_output(IFX_PCIE_GPIO_RESET, 1);
++//    gpio_set_value(IFX_PCIE_GPIO_RESET, 1);
++    //ifx_gpio_output_set(IFX_PCIE_GPIO_RESET, ifx_pcie_gpio_module_id);
++}
++
++static inline void pcie_core_pmu_setup(int pcie_port)
++{
++	struct clk *clk;
++	clk = clk_get_sys("1d900000.pcie", "ctl");
++	clk_enable(clk);
++	clk = clk_get_sys("1d900000.pcie", "bus");
++	clk_enable(clk);
++
++    /* PCIe Core controller enabled */
++//    PCIE_CTRL_PMU_SETUP(IFX_PMU_ENABLE);
++
++    /* Enable PCIe L0 Clock */
++//  PCIE_L0_CLK_PMU_SETUP(IFX_PMU_ENABLE);
++}
++
++static inline void pcie_msi_init(int pcie_port)
++{
++	struct clk *clk;
++	pcie_msi_pic_init(pcie_port);
++	clk = clk_get_sys("ltq_pcie", "msi");
++	clk_enable(clk);
++//    MSI_PMU_SETUP(IFX_PMU_ENABLE);
++}
++
++static inline u32
++ifx_pcie_bus_nr_deduct(u32 bus_number, int pcie_port)
++{
++    u32 tbus_number = bus_number;
++
++#ifdef CONFIG_PCI_LANTIQ
++    if (pcibios_host_nr() > 1) {
++        tbus_number -= pcibios_1st_host_bus_nr();
++    }
++#endif /* CONFIG_PCI_LANTIQ */
++    return tbus_number;
++}
++
++static inline u32
++ifx_pcie_bus_enum_hack(struct pci_bus *bus, u32 devfn, int where, u32 value, int pcie_port, int read)
++{
++    struct pci_dev *pdev;
++    u32 tvalue = value;
++
++    /* Sanity check */
++    pdev = pci_get_slot(bus, devfn);
++    if (pdev == NULL) {
++        return tvalue;
++    }
++
++    /* Only care about PCI bridge */
++    if (pdev->hdr_type != PCI_HEADER_TYPE_BRIDGE) {
++        return tvalue;
++    }
++
++    if (read) { /* Read hack */
++    #ifdef CONFIG_PCI_LANTIQ
++        if (pcibios_host_nr() > 1) {
++            tvalue = ifx_pcie_bus_enum_read_hack(where, tvalue);
++        }
++    #endif /* CONFIG_PCI_LANTIQ */
++    }
++    else { /* Write hack */
++    #ifdef CONFIG_PCI_LANTIQ
++        if (pcibios_host_nr() > 1) {
++            tvalue = ifx_pcie_bus_enum_write_hack(where, tvalue);
++        }
++    #endif
++    }
++    return tvalue;
++}
++
++#endif /* IFXMIPS_PCIE_VR9_H */
++
+--- a/arch/mips/pci/pci.c
++++ b/arch/mips/pci/pci.c
+@@ -256,6 +256,31 @@ static int __init pcibios_init(void)
+ 
+ subsys_initcall(pcibios_init);
+ 
++int pcibios_host_nr(void)
++{
++    int count;
++    struct pci_controller *hose;
++    for (count = 0, hose = hose_head; hose; hose = hose->next, count++) {
++        ;
++    }
++    return count;
++}
++EXPORT_SYMBOL(pcibios_host_nr);
++
++int pcibios_1st_host_bus_nr(void)
++{
++    int bus_nr = 0;
++    struct pci_controller *hose = hose_head;
++
++    if (hose != NULL) {
++        if (hose->bus != NULL) {
++            bus_nr = hose->bus->number + 1;
++        }
++    }
++    return bus_nr;
++}
++EXPORT_SYMBOL(pcibios_1st_host_bus_nr);
++
+ static int pcibios_enable_resources(struct pci_dev *dev, int mask)
+ {
+ 	u16 cmd, old_cmd;
+--- /dev/null
++++ b/arch/mips/pci/pcie-lantiq.h
+@@ -0,0 +1,1305 @@
++/******************************************************************************
++**
++** FILE NAME    : ifxmips_pcie_reg.h
++** PROJECT      : IFX UEIP for VRX200
++** MODULES      : PCIe module
++**
++** DATE         : 02 Mar 2009
++** AUTHOR       : Lei Chuanhua
++** DESCRIPTION  : PCIe Root Complex Driver
++** COPYRIGHT    :       Copyright (c) 2009
++**                      Infineon Technologies AG
++**                      Am Campeon 1-12, 85579 Neubiberg, Germany
++**
++**    This program is free software; you can redistribute it and/or modify
++**    it under the terms of the GNU General Public License as published by
++**    the Free Software Foundation; either version 2 of the License, or
++**    (at your option) any later version.
++** HISTORY
++** $Version $Date        $Author         $Comment
++** 0.0.1    17 Mar,2009  Lei Chuanhua    Initial version
++*******************************************************************************/
++#ifndef IFXMIPS_PCIE_REG_H
++#define IFXMIPS_PCIE_REG_H
++#include <linux/version.h>
++#include <linux/types.h>
++#include <linux/pci.h>
++#include <linux/interrupt.h>
++/*!
++ \file ifxmips_pcie_reg.h
++ \ingroup IFX_PCIE  
++ \brief header file for PCIe module register definition
++*/
++/* PCIe Address Mapping Base */
++#define PCIE_CFG_PHY_BASE        0x1D000000UL
++#define PCIE_CFG_BASE           (KSEG1 + PCIE_CFG_PHY_BASE)
++#define PCIE_CFG_SIZE           (8 * 1024 * 1024)
++
++#define PCIE_MEM_PHY_BASE        0x1C000000UL
++#define PCIE_MEM_BASE           (KSEG1 + PCIE_MEM_PHY_BASE)
++#define PCIE_MEM_SIZE           (16 * 1024 * 1024)
++#define PCIE_MEM_PHY_END        (PCIE_MEM_PHY_BASE + PCIE_MEM_SIZE - 1)
++
++#define PCIE_IO_PHY_BASE         0x1D800000UL
++#define PCIE_IO_BASE            (KSEG1 + PCIE_IO_PHY_BASE)
++#define PCIE_IO_SIZE            (1 * 1024 * 1024)
++#define PCIE_IO_PHY_END         (PCIE_IO_PHY_BASE + PCIE_IO_SIZE - 1)
++
++#define PCIE_RC_CFG_BASE        (KSEG1 + 0x1D900000)
++#define PCIE_APP_LOGIC_REG      (KSEG1 + 0x1E100900)
++#define PCIE_MSI_PHY_BASE        0x1F600000UL
++
++#define PCIE_PDI_PHY_BASE        0x1F106800UL
++#define PCIE_PDI_BASE           (KSEG1 + PCIE_PDI_PHY_BASE)
++#define PCIE_PDI_SIZE            0x400
++
++#define PCIE1_CFG_PHY_BASE        0x19000000UL
++#define PCIE1_CFG_BASE           (KSEG1 + PCIE1_CFG_PHY_BASE)
++#define PCIE1_CFG_SIZE           (8 * 1024 * 1024)
++
++#define PCIE1_MEM_PHY_BASE        0x18000000UL
++#define PCIE1_MEM_BASE           (KSEG1 + PCIE1_MEM_PHY_BASE)
++#define PCIE1_MEM_SIZE           (16 * 1024 * 1024)
++#define PCIE1_MEM_PHY_END        (PCIE1_MEM_PHY_BASE + PCIE1_MEM_SIZE - 1)
++
++#define PCIE1_IO_PHY_BASE         0x19800000UL
++#define PCIE1_IO_BASE            (KSEG1 + PCIE1_IO_PHY_BASE)
++#define PCIE1_IO_SIZE            (1 * 1024 * 1024)
++#define PCIE1_IO_PHY_END         (PCIE1_IO_PHY_BASE + PCIE1_IO_SIZE - 1)
++
++#define PCIE1_RC_CFG_BASE        (KSEG1 + 0x19900000)
++#define PCIE1_APP_LOGIC_REG      (KSEG1 + 0x1E100700)
++#define PCIE1_MSI_PHY_BASE        0x1F400000UL
++
++#define PCIE1_PDI_PHY_BASE        0x1F700400UL
++#define PCIE1_PDI_BASE           (KSEG1 + PCIE1_PDI_PHY_BASE)
++#define PCIE1_PDI_SIZE            0x400
++
++#define PCIE_CFG_PORT_TO_BASE(X)     ((X) > 0 ? (PCIE1_CFG_BASE) : (PCIE_CFG_BASE))
++#define PCIE_MEM_PORT_TO_BASE(X)     ((X) > 0 ? (PCIE1_MEM_BASE) : (PCIE_MEM_BASE))
++#define PCIE_IO_PORT_TO_BASE(X)      ((X) > 0 ? (PCIE1_IO_BASE) : (PCIE_IO_BASE))
++#define PCIE_MEM_PHY_PORT_TO_BASE(X) ((X) > 0 ? (PCIE1_MEM_PHY_BASE) : (PCIE_MEM_PHY_BASE))
++#define PCIE_MEM_PHY_PORT_TO_END(X)  ((X) > 0 ? (PCIE1_MEM_PHY_END) : (PCIE_MEM_PHY_END))
++#define PCIE_IO_PHY_PORT_TO_BASE(X)  ((X) > 0 ? (PCIE1_IO_PHY_BASE) : (PCIE_IO_PHY_BASE))
++#define PCIE_IO_PHY_PORT_TO_END(X)   ((X) > 0 ? (PCIE1_IO_PHY_END) : (PCIE_IO_PHY_END))
++#define PCIE_APP_PORT_TO_BASE(X)     ((X) > 0 ? (PCIE1_APP_LOGIC_REG) : (PCIE_APP_LOGIC_REG))
++#define PCIE_RC_PORT_TO_BASE(X)      ((X) > 0 ? (PCIE1_RC_CFG_BASE) : (PCIE_RC_CFG_BASE))
++#define PCIE_PHY_PORT_TO_BASE(X)     ((X) > 0 ? (PCIE1_PDI_BASE) : (PCIE_PDI_BASE))
++
++/* PCIe Application Logic Register */
++/* RC Core Control Register */
++#define PCIE_RC_CCR(X)                      (volatile u32*)(PCIE_APP_PORT_TO_BASE(X) + 0x10)
++/* This should be enabled after initializing configuratin registers
++ * Also should check link status retraining bit
++ */
++#define PCIE_RC_CCR_LTSSM_ENABLE             0x00000001    /* Enable LTSSM to continue link establishment */
++
++/* RC Core Debug Register */
++#define PCIE_RC_DR(X)                       (volatile u32*)(PCIE_APP_PORT_TO_BASE(X) + 0x14)
++#define PCIE_RC_DR_DLL_UP                    0x00000001  /* Data Link Layer Up */
++#define PCIE_RC_DR_CURRENT_POWER_STATE       0x0000000E  /* Current Power State */
++#define PCIE_RC_DR_CURRENT_POWER_STATE_S     1
++#define PCIE_RC_DR_CURRENT_LTSSM_STATE       0x000001F0  /* Current LTSSM State */
++#define PCIE_RC_DR_CURRENT_LTSSM_STATE_S     4
++
++#define PCIE_RC_DR_PM_DEV_STATE              0x00000E00  /* Power Management D-State */
++#define PCIE_RC_DR_PM_DEV_STATE_S            9
++
++#define PCIE_RC_DR_PM_ENABLED                0x00001000  /* Power Management State from PMU */
++#define PCIE_RC_DR_PME_EVENT_ENABLED         0x00002000  /* Power Management Event Enable State */
++#define PCIE_RC_DR_AUX_POWER_ENABLED         0x00004000  /* Auxiliary Power Enable */
++
++/* Current Power State Definition */
++enum {
++    PCIE_RC_DR_D0 = 0,
++    PCIE_RC_DR_D1,   /* Not supported */
++    PCIE_RC_DR_D2,   /* Not supported */
++    PCIE_RC_DR_D3,
++    PCIE_RC_DR_UN,
++};
++
++/* PHY Link Status Register */
++#define PCIE_PHY_SR(X)                      (volatile u32*)(PCIE_APP_PORT_TO_BASE(X) + 0x18)
++#define PCIE_PHY_SR_PHY_LINK_UP              0x00000001   /* PHY Link Up/Down Indicator */
++
++/* Electromechanical Control Register */
++#define PCIE_EM_CR(X)                       (volatile u32*)(PCIE_APP_PORT_TO_BASE(X) + 0x1C)
++#define PCIE_EM_CR_CARD_IS_PRESENT           0x00000001  /* Card Presence Detect State */
++#define PCIE_EM_CR_MRL_OPEN                  0x00000002  /* MRL Sensor State */
++#define PCIE_EM_CR_POWER_FAULT_SET           0x00000004  /* Power Fault Detected */
++#define PCIE_EM_CR_MRL_SENSOR_SET            0x00000008  /* MRL Sensor Changed */
++#define PCIE_EM_CR_PRESENT_DETECT_SET        0x00000010  /* Card Presense Detect Changed */
++#define PCIE_EM_CR_CMD_CPL_INT_SET           0x00000020  /* Command Complete Interrupt */
++#define PCIE_EM_CR_SYS_INTERLOCK_SET         0x00000040  /* System Electromechanical IterLock Engaged */
++#define PCIE_EM_CR_ATTENTION_BUTTON_SET      0x00000080  /* Attention Button Pressed */
++
++/* Interrupt Status Register */
++#define PCIE_IR_SR(X)                       (volatile u32*)(PCIE_APP_PORT_TO_BASE(X) + 0x20)
++#define PCIE_IR_SR_PME_CAUSE_MSI             0x00000002  /* MSI caused by PME */
++#define PCIE_IR_SR_HP_PME_WAKE_GEN           0x00000004  /* Hotplug PME Wake Generation */
++#define PCIE_IR_SR_HP_MSI                    0x00000008  /* Hotplug MSI */
++#define PCIE_IR_SR_AHB_LU_ERR                0x00000030  /* AHB Bridge Lookup Error Signals */
++#define PCIE_IR_SR_AHB_LU_ERR_S              4
++#define PCIE_IR_SR_INT_MSG_NUM               0x00003E00  /* Interrupt Message Number */
++#define PCIE_IR_SR_INT_MSG_NUM_S             9
++#define PCIE_IR_SR_AER_INT_MSG_NUM           0xF8000000  /* Advanced Error Interrupt Message Number */
++#define PCIE_IR_SR_AER_INT_MSG_NUM_S         27
++
++/* Message Control Register */
++#define PCIE_MSG_CR(X)                      (volatile u32*)(PCIE_APP_PORT_TO_BASE(X) + 0x30)
++#define PCIE_MSG_CR_GEN_PME_TURN_OFF_MSG     0x00000001  /* Generate PME Turn Off Message */
++#define PCIE_MSG_CR_GEN_UNLOCK_MSG           0x00000002  /* Generate Unlock Message */
++
++#define PCIE_VDM_DR(X)                      (volatile u32*)(PCIE_APP_PORT_TO_BASE(X) + 0x34)
++
++/* Vendor-Defined Message Requester ID Register */
++#define PCIE_VDM_RID(X)                     (PCIE_APP_PORT_TO_BASE (X) + 0x38)
++#define PCIE_VDM_RID_VENROR_MSG_REQ_ID       0x0000FFFF
++#define PCIE_VDM_RID_VDMRID_S                0
++
++/* ASPM Control Register */
++#define PCIE_ASPM_CR(X)                     (volatile u32*)(PCIE_APP_PORT_TO_BASE(X) + 0x40)
++#define PCIE_ASPM_CR_HOT_RST                 0x00000001  /* Hot Reset Request to the downstream device */
++#define PCIE_ASPM_CR_REQ_EXIT_L1             0x00000002  /* Request to Exit L1 */
++#define PCIE_ASPM_CR_REQ_ENTER_L1            0x00000004  /* Request to Enter L1 */
++
++/* Vendor Message DW0 Register */
++#define PCIE_VM_MSG_DW0(X)                  (volatile u32*)(PCIE_APP_PORT_TO_BASE(X) + 0x50)
++#define PCIE_VM_MSG_DW0_TYPE                 0x0000001F  /* Message type */
++#define PCIE_VM_MSG_DW0_TYPE_S               0
++#define PCIE_VM_MSG_DW0_FORMAT               0x00000060  /* Format */
++#define PCIE_VM_MSG_DW0_FORMAT_S             5
++#define PCIE_VM_MSG_DW0_TC                   0x00007000  /* Traffic Class */
++#define PCIE_VM_MSG_DW0_TC_S                 12
++#define PCIE_VM_MSG_DW0_ATTR                 0x000C0000  /* Atrributes */
++#define PCIE_VM_MSG_DW0_ATTR_S               18
++#define PCIE_VM_MSG_DW0_EP_TLP               0x00100000  /* Poisoned TLP */
++#define PCIE_VM_MSG_DW0_TD                   0x00200000  /* TLP Digest */
++#define PCIE_VM_MSG_DW0_LEN                  0xFFC00000  /* Length */
++#define PCIE_VM_MSG_DW0_LEN_S                22
++
++/* Format Definition */
++enum {
++    PCIE_VM_MSG_FORMAT_00 = 0,  /* 3DW Hdr, no data*/
++    PCIE_VM_MSG_FORMAT_01,      /* 4DW Hdr, no data */
++    PCIE_VM_MSG_FORMAT_10,      /* 3DW Hdr, with data */
++    PCIE_VM_MSG_FORMAT_11,      /* 4DW Hdr, with data */
++};
++
++/* Traffic Class Definition */
++enum {
++    PCIE_VM_MSG_TC0 = 0,
++    PCIE_VM_MSG_TC1,
++    PCIE_VM_MSG_TC2,
++    PCIE_VM_MSG_TC3,
++    PCIE_VM_MSG_TC4,
++    PCIE_VM_MSG_TC5,
++    PCIE_VM_MSG_TC6,
++    PCIE_VM_MSG_TC7,
++};
++
++/* Attributes Definition */
++enum {
++    PCIE_VM_MSG_ATTR_00 = 0,   /* RO and No Snoop cleared */
++    PCIE_VM_MSG_ATTR_01,       /* RO cleared , No Snoop set */
++    PCIE_VM_MSG_ATTR_10,       /* RO set, No Snoop cleared*/
++    PCIE_VM_MSG_ATTR_11,       /* RO and No Snoop set */
++};
++
++/* Payload Size Definition */
++#define PCIE_VM_MSG_LEN_MIN  0
++#define PCIE_VM_MSG_LEN_MAX  1024
++
++/* Vendor Message DW1 Register */
++#define PCIE_VM_MSG_DW1(X)                 (volatile u32*)(PCIE_APP_PORT_TO_BASE(X) + 0x54)
++#define PCIE_VM_MSG_DW1_FUNC_NUM            0x00000070  /* Function Number */
++#define PCIE_VM_MSG_DW1_FUNC_NUM_S          8
++#define PCIE_VM_MSG_DW1_CODE                0x00FF0000  /* Message Code */
++#define PCIE_VM_MSG_DW1_CODE_S              16
++#define PCIE_VM_MSG_DW1_TAG                 0xFF000000  /* Tag */
++#define PCIE_VM_MSG_DW1_TAG_S               24
++
++#define PCIE_VM_MSG_DW2(X)                  (volatile u32*)(PCIE_APP_PORT_TO_BASE(X) + 0x58)
++#define PCIE_VM_MSG_DW3(X)                  (volatile u32*)(PCIE_APP_PORT_TO_BASE(X) + 0x5C)
++
++/* Vendor Message Request Register */
++#define PCIE_VM_MSG_REQR(X)                 (volatile u32*)(PCIE_APP_PORT_TO_BASE(X) + 0x60)
++#define PCIE_VM_MSG_REQR_REQ                 0x00000001  /* Vendor Message Request */
++
++
++/* AHB Slave Side Band Control Register */
++#define PCIE_AHB_SSB(X)                     (volatile u32*)(PCIE_APP_PORT_TO_BASE(X) + 0x70)
++#define PCIE_AHB_SSB_REQ_BCM                0x00000001 /* Slave Reques BCM filed */
++#define PCIE_AHB_SSB_REQ_EP                 0x00000002 /* Slave Reques EP filed */
++#define PCIE_AHB_SSB_REQ_TD                 0x00000004 /* Slave Reques TD filed */
++#define PCIE_AHB_SSB_REQ_ATTR               0x00000018 /* Slave Reques Attribute number */
++#define PCIE_AHB_SSB_REQ_ATTR_S             3
++#define PCIE_AHB_SSB_REQ_TC                 0x000000E0 /* Slave Request TC Field */
++#define PCIE_AHB_SSB_REQ_TC_S               5
++
++/* AHB Master SideBand Ctrl Register */
++#define PCIE_AHB_MSB(X)                     (volatile u32*)(PCIE_APP_PORT_TO_BASE(X) + 0x74)
++#define PCIE_AHB_MSB_RESP_ATTR               0x00000003 /* Master Response Attribute number */
++#define PCIE_AHB_MSB_RESP_ATTR_S             0
++#define PCIE_AHB_MSB_RESP_BAD_EOT            0x00000004 /* Master Response Badeot filed */
++#define PCIE_AHB_MSB_RESP_BCM                0x00000008 /* Master Response BCM filed */
++#define PCIE_AHB_MSB_RESP_EP                 0x00000010 /* Master Response EP filed */
++#define PCIE_AHB_MSB_RESP_TD                 0x00000020 /* Master Response TD filed */
++#define PCIE_AHB_MSB_RESP_FUN_NUM            0x000003C0 /* Master Response Function number */
++#define PCIE_AHB_MSB_RESP_FUN_NUM_S          6
++
++/* AHB Control Register, fixed bus enumeration exception */
++#define PCIE_AHB_CTRL(X)                     (volatile u32*)(PCIE_APP_PORT_TO_BASE(X) + 0x78)
++#define PCIE_AHB_CTRL_BUS_ERROR_SUPPRESS     0x00000001 
++
++/* Interrupt Enalbe Register */
++#define PCIE_IRNEN(X)                        (volatile u32*)(PCIE_APP_PORT_TO_BASE(X) + 0xF4)
++#define PCIE_IRNCR(X)                        (volatile u32*)(PCIE_APP_PORT_TO_BASE(X) + 0xF8)
++#define PCIE_IRNICR(X)                       (volatile u32*)(PCIE_APP_PORT_TO_BASE(X) + 0xFC)
++
++/* PCIe interrupt enable/control/capture register definition */
++#define PCIE_IRN_AER_REPORT                 0x00000001  /* AER Interrupt */
++#define PCIE_IRN_AER_MSIX                   0x00000002  /* Advanced Error MSI-X Interrupt */
++#define PCIE_IRN_PME                        0x00000004  /* PME Interrupt */
++#define PCIE_IRN_HOTPLUG                    0x00000008  /* Hotplug Interrupt */
++#define PCIE_IRN_RX_VDM_MSG                 0x00000010  /* Vendor-Defined Message Interrupt */
++#define PCIE_IRN_RX_CORRECTABLE_ERR_MSG     0x00000020  /* Correctable Error Message Interrupt */
++#define PCIE_IRN_RX_NON_FATAL_ERR_MSG       0x00000040  /* Non-fatal Error Message */
++#define PCIE_IRN_RX_FATAL_ERR_MSG           0x00000080  /* Fatal Error Message */
++#define PCIE_IRN_RX_PME_MSG                 0x00000100  /* PME Message Interrupt */
++#define PCIE_IRN_RX_PME_TURNOFF_ACK         0x00000200  /* PME Turnoff Ack Message Interrupt */
++#define PCIE_IRN_AHB_BR_FATAL_ERR           0x00000400  /* AHB Fatal Error Interrupt */
++#define PCIE_IRN_LINK_AUTO_BW_STATUS        0x00000800  /* Link Auto Bandwidth Status Interrupt */
++#define PCIE_IRN_BW_MGT                     0x00001000  /* Bandwidth Managment Interrupt */
++#define PCIE_IRN_INTA                       0x00002000  /* INTA */
++#define PCIE_IRN_INTB                       0x00004000  /* INTB */
++#define PCIE_IRN_INTC                       0x00008000  /* INTC */
++#define PCIE_IRN_INTD                       0x00010000  /* INTD */
++#define PCIE_IRN_WAKEUP                     0x00020000  /* Wake up Interrupt */
++
++#define PCIE_RC_CORE_COMBINED_INT    (PCIE_IRN_AER_REPORT |  PCIE_IRN_AER_MSIX | PCIE_IRN_PME | \
++                                      PCIE_IRN_HOTPLUG | PCIE_IRN_RX_VDM_MSG | PCIE_IRN_RX_CORRECTABLE_ERR_MSG |\
++                                      PCIE_IRN_RX_NON_FATAL_ERR_MSG | PCIE_IRN_RX_FATAL_ERR_MSG | \
++                                      PCIE_IRN_RX_PME_MSG | PCIE_IRN_RX_PME_TURNOFF_ACK | PCIE_IRN_AHB_BR_FATAL_ERR | \
++                                      PCIE_IRN_LINK_AUTO_BW_STATUS | PCIE_IRN_BW_MGT)
++/* PCIe RC Configuration Register */
++#define PCIE_VDID(X)                (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x00)
++
++/* Bit definition from pci_reg.h */
++#define PCIE_PCICMDSTS(X)           (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x04)
++#define PCIE_CCRID(X)               (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x08)
++#define PCIE_CLSLTHTBR(X)           (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x0C) /* EP only */
++/* BAR0, BAR1,Only necessary if the bridges implements a device-specific register set or memory buffer */
++#define PCIE_BAR0(X)                (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x10) /* Not used*/
++#define PCIE_BAR1(X)                (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x14) /* Not used */
++
++#define PCIE_BNR(X)                 (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x18) /* Mandatory */
++/* Bus Number Register bits */
++#define PCIE_BNR_PRIMARY_BUS_NUM             0x000000FF
++#define PCIE_BNR_PRIMARY_BUS_NUM_S           0
++#define PCIE_PNR_SECONDARY_BUS_NUM           0x0000FF00
++#define PCIE_PNR_SECONDARY_BUS_NUM_S         8
++#define PCIE_PNR_SUB_BUS_NUM                 0x00FF0000
++#define PCIE_PNR_SUB_BUS_NUM_S               16
++
++/* IO Base/Limit Register bits */
++#define PCIE_IOBLSECS(X)                       (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x1C)  /* RC only */
++#define PCIE_IOBLSECS_32BIT_IO_ADDR             0x00000001
++#define PCIE_IOBLSECS_IO_BASE_ADDR              0x000000F0
++#define PCIE_IOBLSECS_IO_BASE_ADDR_S            4
++#define PCIE_IOBLSECS_32BIT_IOLIMT              0x00000100
++#define PCIE_IOBLSECS_IO_LIMIT_ADDR             0x0000F000
++#define PCIE_IOBLSECS_IO_LIMIT_ADDR_S           12
++
++/* Non-prefetchable Memory Base/Limit Register bit */
++#define PCIE_MBML(X)                           (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x20)  /* RC only */
++#define PCIE_MBML_MEM_BASE_ADDR                 0x0000FFF0
++#define PCIE_MBML_MEM_BASE_ADDR_S               4
++#define PCIE_MBML_MEM_LIMIT_ADDR                0xFFF00000
++#define PCIE_MBML_MEM_LIMIT_ADDR_S              20
++
++/* Prefetchable Memory Base/Limit Register bit */
++#define PCIE_PMBL(X)                           (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x24)  /* RC only */
++#define PCIE_PMBL_64BIT_ADDR                    0x00000001
++#define PCIE_PMBL_UPPER_12BIT                   0x0000FFF0
++#define PCIE_PMBL_UPPER_12BIT_S                 4
++#define PCIE_PMBL_E64MA                         0x00010000
++#define PCIE_PMBL_END_ADDR                      0xFFF00000
++#define PCIE_PMBL_END_ADDR_S                    20
++#define PCIE_PMBU32(X)                          (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x28)  /* RC only */
++#define PCIE_PMLU32(X)                          (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x2C)  /* RC only */
++
++/* I/O Base/Limit Upper 16 bits register */
++#define PCIE_IO_BANDL(X)                        (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x30)  /* RC only */
++#define PCIE_IO_BANDL_UPPER_16BIT_IO_BASE        0x0000FFFF
++#define PCIE_IO_BANDL_UPPER_16BIT_IO_BASE_S      0
++#define PCIE_IO_BANDL_UPPER_16BIT_IO_LIMIT       0xFFFF0000
++#define PCIE_IO_BANDL_UPPER_16BIT_IO_LIMIT_S     16
++
++#define PCIE_CPR(X)                            (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x34)
++#define PCIE_EBBAR(X)                          (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x38)
++
++/* Interrupt and Secondary Bridge Control Register */
++#define PCIE_INTRBCTRL(X)                      (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x3C)
++
++#define PCIE_INTRBCTRL_INT_LINE                 0x000000FF
++#define PCIE_INTRBCTRL_INT_LINE_S               0
++#define PCIE_INTRBCTRL_INT_PIN                  0x0000FF00
++#define PCIE_INTRBCTRL_INT_PIN_S                8
++#define PCIE_INTRBCTRL_PARITY_ERR_RESP_ENABLE   0x00010000    /* #PERR */
++#define PCIE_INTRBCTRL_SERR_ENABLE              0x00020000    /* #SERR */
++#define PCIE_INTRBCTRL_ISA_ENABLE               0x00040000    /* ISA enable, IO 64KB only */
++#define PCIE_INTRBCTRL_VGA_ENABLE               0x00080000    /* VGA enable */
++#define PCIE_INTRBCTRL_VGA_16BIT_DECODE         0x00100000    /* VGA 16bit decode */
++#define PCIE_INTRBCTRL_RST_SECONDARY_BUS        0x00400000    /* Secondary bus rest, hot rest, 1ms */
++/* Others are read only */
++enum {
++    PCIE_INTRBCTRL_INT_NON = 0,
++    PCIE_INTRBCTRL_INTA,
++    PCIE_INTRBCTRL_INTB,
++    PCIE_INTRBCTRL_INTC,
++    PCIE_INTRBCTRL_INTD,
++};
++
++#define PCIE_PM_CAPR(X)                  (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x40)
++
++/* Power Management Control and Status Register */
++#define PCIE_PM_CSR(X)                   (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x44)
++
++#define PCIE_PM_CSR_POWER_STATE           0x00000003   /* Power State */
++#define PCIE_PM_CSR_POWER_STATE_S         0
++#define PCIE_PM_CSR_SW_RST                0x00000008   /* Soft Reset Enabled */
++#define PCIE_PM_CSR_PME_ENABLE            0x00000100   /* PME Enable */
++#define PCIE_PM_CSR_PME_STATUS            0x00008000   /* PME status */
++
++/* MSI Capability Register for EP */
++#define PCIE_MCAPR(X)                    (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x50)
++
++#define PCIE_MCAPR_MSI_CAP_ID             0x000000FF  /* MSI Capability ID */
++#define PCIE_MCAPR_MSI_CAP_ID_S           0
++#define PCIE_MCAPR_MSI_NEXT_CAP_PTR       0x0000FF00  /* Next Capability Pointer */
++#define PCIE_MCAPR_MSI_NEXT_CAP_PTR_S     8
++#define PCIE_MCAPR_MSI_ENABLE             0x00010000  /* MSI Enable */
++#define PCIE_MCAPR_MULTI_MSG_CAP          0x000E0000  /* Multiple Message Capable */
++#define PCIE_MCAPR_MULTI_MSG_CAP_S        17
++#define PCIE_MCAPR_MULTI_MSG_ENABLE       0x00700000  /* Multiple Message Enable */
++#define PCIE_MCAPR_MULTI_MSG_ENABLE_S     20
++#define PCIE_MCAPR_ADDR64_CAP             0X00800000  /* 64-bit Address Capable */
++
++/* MSI Message Address Register */
++#define PCIE_MA(X)                       (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x54)
++
++#define PCIE_MA_ADDR_MASK                 0xFFFFFFFC  /* Message Address */
++
++/* MSI Message Upper Address Register */
++#define PCIE_MUA(X)                      (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x58)
++
++/* MSI Message Data Register */
++#define PCIE_MD(X)                       (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x5C)
++
++#define PCIE_MD_DATA                      0x0000FFFF  /* Message Data */
++#define PCIE_MD_DATA_S                    0
++
++/* PCI Express Capability Register */
++#define PCIE_XCAP(X)                     (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x70)
++
++#define PCIE_XCAP_ID                      0x000000FF  /* PCI Express Capability ID */
++#define PCIE_XCAP_ID_S                    0
++#define PCIE_XCAP_NEXT_CAP                0x0000FF00  /* Next Capability Pointer */
++#define PCIE_XCAP_NEXT_CAP_S              8
++#define PCIE_XCAP_VER                     0x000F0000  /* PCI Express Capability Version */
++#define PCIE_XCAP_VER_S                   16
++#define PCIE_XCAP_DEV_PORT_TYPE           0x00F00000  /* Device Port Type */
++#define PCIE_XCAP_DEV_PORT_TYPE_S         20
++#define PCIE_XCAP_SLOT_IMPLEMENTED        0x01000000  /* Slot Implemented */
++#define PCIE_XCAP_MSG_INT_NUM             0x3E000000  /* Interrupt Message Number */
++#define PCIE_XCAP_MSG_INT_NUM_S           25
++
++/* Device Capability Register */
++#define PCIE_DCAP(X)                     (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x74)
++
++#define PCIE_DCAP_MAX_PAYLOAD_SIZE        0x00000007   /* Max Payload size */
++#define PCIE_DCAP_MAX_PAYLOAD_SIZE_S      0
++#define PCIE_DCAP_PHANTOM_FUNC            0x00000018   /* Phanton Function, not supported */
++#define PCIE_DCAP_PHANTOM_FUNC_S          3
++#define PCIE_DCAP_EXT_TAG                 0x00000020   /* Extended Tag Field */
++#define PCIE_DCAP_EP_L0S_LATENCY          0x000001C0   /* EP L0s latency only */
++#define PCIE_DCAP_EP_L0S_LATENCY_S        6
++#define PCIE_DCAP_EP_L1_LATENCY           0x00000E00   /* EP L1 latency only */
++#define PCIE_DCAP_EP_L1_LATENCY_S         9
++#define PCIE_DCAP_ROLE_BASE_ERR_REPORT    0x00008000   /* Role Based ERR */
++
++/* Maximum payload size supported */
++enum {
++    PCIE_MAX_PAYLOAD_128 = 0,
++    PCIE_MAX_PAYLOAD_256,
++    PCIE_MAX_PAYLOAD_512,
++    PCIE_MAX_PAYLOAD_1024,
++    PCIE_MAX_PAYLOAD_2048,
++    PCIE_MAX_PAYLOAD_4096,
++};
++
++/* Device Control and Status Register */
++#define PCIE_DCTLSTS(X)                       (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x78)
++
++#define PCIE_DCTLSTS_CORRECTABLE_ERR_EN        0x00000001   /* COR-ERR */
++#define PCIE_DCTLSTS_NONFATAL_ERR_EN           0x00000002   /* Non-fatal ERR */
++#define PCIE_DCTLSTS_FATAL_ERR_EN              0x00000004   /* Fatal ERR */
++#define PCIE_DCTLSYS_UR_REQ_EN                 0x00000008   /* UR ERR */
++#define PCIE_DCTLSTS_RELAXED_ORDERING_EN       0x00000010   /* Enable relaxing ordering */
++#define PCIE_DCTLSTS_MAX_PAYLOAD_SIZE          0x000000E0   /* Max payload mask */
++#define PCIE_DCTLSTS_MAX_PAYLOAD_SIZE_S        5
++#define PCIE_DCTLSTS_EXT_TAG_EN                0x00000100   /* Extended tag field */
++#define PCIE_DCTLSTS_PHANTOM_FUNC_EN           0x00000200   /* Phantom Function Enable */
++#define PCIE_DCTLSTS_AUX_PM_EN                 0x00000400   /* AUX Power PM Enable */
++#define PCIE_DCTLSTS_NO_SNOOP_EN               0x00000800   /* Enable no snoop, except root port*/
++#define PCIE_DCTLSTS_MAX_READ_SIZE             0x00007000   /* Max Read Request size*/
++#define PCIE_DCTLSTS_MAX_READ_SIZE_S           12
++#define PCIE_DCTLSTS_CORRECTABLE_ERR           0x00010000   /* COR-ERR Detected */
++#define PCIE_DCTLSTS_NONFATAL_ERR              0x00020000   /* Non-Fatal ERR Detected */
++#define PCIE_DCTLSTS_FATAL_ER                  0x00040000   /* Fatal ERR Detected */
++#define PCIE_DCTLSTS_UNSUPPORTED_REQ           0x00080000   /* UR Detected */
++#define PCIE_DCTLSTS_AUX_POWER                 0x00100000   /* Aux Power Detected */
++#define PCIE_DCTLSTS_TRANSACT_PENDING          0x00200000   /* Transaction pending */
++
++#define PCIE_DCTLSTS_ERR_EN      (PCIE_DCTLSTS_CORRECTABLE_ERR_EN | \
++                                  PCIE_DCTLSTS_NONFATAL_ERR_EN | PCIE_DCTLSTS_FATAL_ERR_EN | \
++                                  PCIE_DCTLSYS_UR_REQ_EN)
++
++/* Link Capability Register */
++#define PCIE_LCAP(X)                          (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x7C)
++#define PCIE_LCAP_MAX_LINK_SPEED               0x0000000F  /* Max link speed, 0x1 by default */
++#define PCIE_LCAP_MAX_LINK_SPEED_S             0
++#define PCIE_LCAP_MAX_LENGTH_WIDTH             0x000003F0  /* Maxium Length Width */
++#define PCIE_LCAP_MAX_LENGTH_WIDTH_S           4
++#define PCIE_LCAP_ASPM_LEVEL                   0x00000C00  /* Active State Link PM Support */
++#define PCIE_LCAP_ASPM_LEVEL_S                 10
++#define PCIE_LCAP_L0S_EIXT_LATENCY             0x00007000  /* L0s Exit Latency */
++#define PCIE_LCAP_L0S_EIXT_LATENCY_S           12
++#define PCIE_LCAP_L1_EXIT_LATENCY              0x00038000  /* L1 Exit Latency */
++#define PCIE_LCAP_L1_EXIT_LATENCY_S            15
++#define PCIE_LCAP_CLK_PM                       0x00040000  /* Clock Power Management */
++#define PCIE_LCAP_SDER                         0x00080000  /* Surprise Down Error Reporting */
++#define PCIE_LCAP_DLL_ACTIVE_REPROT            0x00100000  /* Data Link Layer Active Reporting Capable */
++#define PCIE_LCAP_PORT_NUM                     0xFF0000000  /* Port number */
++#define PCIE_LCAP_PORT_NUM_S                   24
++
++/* Maximum Length width definition */
++#define PCIE_MAX_LENGTH_WIDTH_RES  0x00
++#define PCIE_MAX_LENGTH_WIDTH_X1   0x01  /* Default */
++#define PCIE_MAX_LENGTH_WIDTH_X2   0x02
++#define PCIE_MAX_LENGTH_WIDTH_X4   0x04
++#define PCIE_MAX_LENGTH_WIDTH_X8   0x08
++#define PCIE_MAX_LENGTH_WIDTH_X12  0x0C
++#define PCIE_MAX_LENGTH_WIDTH_X16  0x10
++#define PCIE_MAX_LENGTH_WIDTH_X32  0x20
++
++/* Active State Link PM definition */
++enum {
++    PCIE_ASPM_RES0                = 0,
++    PCIE_ASPM_L0S_ENTRY_SUPPORT,        /* L0s */
++    PCIE_ASPM_RES1,
++    PCIE_ASPM_L0S_L1_ENTRY_SUPPORT,     /* L0s and L1, default */
++};
++
++/* L0s Exit Latency definition */
++enum {
++    PCIE_L0S_EIXT_LATENCY_L64NS    = 0, /* < 64 ns */
++    PCIE_L0S_EIXT_LATENCY_B64A128,      /* > 64 ns < 128 ns */
++    PCIE_L0S_EIXT_LATENCY_B128A256,     /* > 128 ns < 256 ns */
++    PCIE_L0S_EIXT_LATENCY_B256A512,     /* > 256 ns < 512 ns */
++    PCIE_L0S_EIXT_LATENCY_B512TO1U,     /* > 512 ns < 1 us */
++    PCIE_L0S_EIXT_LATENCY_B1A2U,        /* > 1 us < 2 us */
++    PCIE_L0S_EIXT_LATENCY_B2A4U,        /* > 2 us < 4 us */
++    PCIE_L0S_EIXT_LATENCY_M4US,         /* > 4 us  */
++};
++
++/* L1 Exit Latency definition */
++enum {
++    PCIE_L1_EXIT_LATENCY_L1US  = 0,  /* < 1 us */
++    PCIE_L1_EXIT_LATENCY_B1A2,       /* > 1 us < 2 us */
++    PCIE_L1_EXIT_LATENCY_B2A4,       /* > 2 us < 4 us */
++    PCIE_L1_EXIT_LATENCY_B4A8,       /* > 4 us < 8 us */
++    PCIE_L1_EXIT_LATENCY_B8A16,      /* > 8 us < 16 us */
++    PCIE_L1_EXIT_LATENCY_B16A32,     /* > 16 us < 32 us */
++    PCIE_L1_EXIT_LATENCY_B32A64,     /* > 32 us < 64 us */
++    PCIE_L1_EXIT_LATENCY_M64US,      /* > 64 us */
++};
++
++/* Link Control and Status Register */
++#define PCIE_LCTLSTS(X)                     (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x80)
++#define PCIE_LCTLSTS_ASPM_ENABLE            0x00000003  /* Active State Link PM Control */
++#define PCIE_LCTLSTS_ASPM_ENABLE_S          0
++#define PCIE_LCTLSTS_RCB128                 0x00000008  /* Read Completion Boundary 128*/
++#define PCIE_LCTLSTS_LINK_DISABLE           0x00000010  /* Link Disable */
++#define PCIE_LCTLSTS_RETRIAN_LINK           0x00000020  /* Retrain Link */
++#define PCIE_LCTLSTS_COM_CLK_CFG            0x00000040  /* Common Clock Configuration */
++#define PCIE_LCTLSTS_EXT_SYNC               0x00000080  /* Extended Synch */
++#define PCIE_LCTLSTS_CLK_PM_EN              0x00000100  /* Enable Clock Powerm Management */
++#define PCIE_LCTLSTS_LINK_SPEED             0x000F0000  /* Link Speed */
++#define PCIE_LCTLSTS_LINK_SPEED_S           16
++#define PCIE_LCTLSTS_NEGOTIATED_LINK_WIDTH  0x03F00000  /* Negotiated Link Width */
++#define PCIE_LCTLSTS_NEGOTIATED_LINK_WIDTH_S 20
++#define PCIE_LCTLSTS_RETRAIN_PENDING        0x08000000  /* Link training is ongoing */
++#define PCIE_LCTLSTS_SLOT_CLK_CFG           0x10000000  /* Slot Clock Configuration */
++#define PCIE_LCTLSTS_DLL_ACTIVE             0x20000000  /* Data Link Layer Active */
++
++/* Slot Capabilities Register */
++#define PCIE_SLCAP(X)                       (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x84)
++
++/* Slot Capabilities */
++#define PCIE_SLCTLSTS(X)                    (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x88)
++
++/* Root Control and Capability Register */
++#define PCIE_RCTLCAP(X)                     (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x8C)
++#define PCIE_RCTLCAP_SERR_ON_CORRECTABLE_ERR  0x00000001   /* #SERR on COR-ERR */
++#define PCIE_RCTLCAP_SERR_ON_NONFATAL_ERR     0x00000002   /* #SERR on Non-Fatal ERR */
++#define PCIE_RCTLCAP_SERR_ON_FATAL_ERR        0x00000004   /* #SERR on Fatal ERR */
++#define PCIE_RCTLCAP_PME_INT_EN               0x00000008   /* PME Interrupt Enable */
++#define PCIE_RCTLCAP_SERR_ENABLE    (PCIE_RCTLCAP_SERR_ON_CORRECTABLE_ERR | \
++                                     PCIE_RCTLCAP_SERR_ON_NONFATAL_ERR | PCIE_RCTLCAP_SERR_ON_FATAL_ERR)
++/* Root Status Register */
++#define PCIE_RSTS(X)                          (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x90)
++#define PCIE_RSTS_PME_REQ_ID                   0x0000FFFF   /* PME Request ID */
++#define PCIE_RSTS_PME_REQ_ID_S                 0
++#define PCIE_RSTS_PME_STATUS                   0x00010000   /* PME Status */
++#define PCIE_RSTS_PME_PENDING                  0x00020000   /* PME Pending */
++
++/* PCI Express Enhanced Capability Header */
++#define PCIE_ENHANCED_CAP(X)                (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x100)
++#define PCIE_ENHANCED_CAP_ID                 0x0000FFFF  /* PCI Express Extended Capability ID */
++#define PCIE_ENHANCED_CAP_ID_S               0
++#define PCIE_ENHANCED_CAP_VER                0x000F0000  /* Capability Version */
++#define PCIE_ENHANCED_CAP_VER_S              16
++#define PCIE_ENHANCED_CAP_NEXT_OFFSET        0xFFF00000  /* Next Capability Offset */
++#define PCIE_ENHANCED_CAP_NEXT_OFFSET_S      20
++
++/* Uncorrectable Error Status Register */
++#define PCIE_UES_R(X)                       (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x104)
++#define PCIE_DATA_LINK_PROTOCOL_ERR          0x00000010  /* Data Link Protocol Error Status */
++#define PCIE_SURPRISE_DOWN_ERROR             0x00000020  /* Surprise Down Error Status */
++#define PCIE_POISONED_TLP                    0x00001000  /* Poisoned TLP Status */
++#define PCIE_FC_PROTOCOL_ERR                 0x00002000  /* Flow Control Protocol Error Status */
++#define PCIE_COMPLETION_TIMEOUT              0x00004000  /* Completion Timeout Status */
++#define PCIE_COMPLETOR_ABORT                 0x00008000  /* Completer Abort Error */
++#define PCIE_UNEXPECTED_COMPLETION           0x00010000  /* Unexpected Completion Status */
++#define PCIE_RECEIVER_OVERFLOW               0x00020000  /* Receive Overflow Status */
++#define PCIE_MALFORNED_TLP                   0x00040000  /* Malformed TLP Stauts */
++#define PCIE_ECRC_ERR                        0x00080000  /* ECRC Error Stauts */
++#define PCIE_UR_REQ                          0x00100000  /* Unsupported Request Error Status */
++#define PCIE_ALL_UNCORRECTABLE_ERR    (PCIE_DATA_LINK_PROTOCOL_ERR | PCIE_SURPRISE_DOWN_ERROR | \
++                         PCIE_POISONED_TLP | PCIE_FC_PROTOCOL_ERR | PCIE_COMPLETION_TIMEOUT |   \
++                         PCIE_COMPLETOR_ABORT | PCIE_UNEXPECTED_COMPLETION | PCIE_RECEIVER_OVERFLOW |\
++                         PCIE_MALFORNED_TLP | PCIE_ECRC_ERR | PCIE_UR_REQ)
++
++/* Uncorrectable Error Mask Register, Mask means no report */
++#define PCIE_UEMR(X)                        (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x108)
++
++/* Uncorrectable Error Severity Register */
++#define PCIE_UESR(X)                        (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x10C)
++
++/* Correctable Error Status Register */
++#define PCIE_CESR(X)                        (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x110)
++#define PCIE_RX_ERR                          0x00000001  /* Receive Error Status */
++#define PCIE_BAD_TLP                         0x00000040  /* Bad TLP Status */
++#define PCIE_BAD_DLLP                        0x00000080  /* Bad DLLP Status */
++#define PCIE_REPLAY_NUM_ROLLOVER             0x00000100  /* Replay Number Rollover Status */
++#define PCIE_REPLAY_TIMER_TIMEOUT_ERR        0x00001000  /* Reply Timer Timeout Status */
++#define PCIE_ADVISORY_NONFTAL_ERR            0x00002000  /* Advisory Non-Fatal Error Status */
++#define PCIE_CORRECTABLE_ERR        (PCIE_RX_ERR | PCIE_BAD_TLP | PCIE_BAD_DLLP | PCIE_REPLAY_NUM_ROLLOVER |\
++                                     PCIE_REPLAY_TIMER_TIMEOUT_ERR | PCIE_ADVISORY_NONFTAL_ERR)
++
++/* Correctable Error Mask Register */
++#define PCIE_CEMR(X)                        (volatile u32*)(PCIE_RC_CFG_BASE + 0x114)
++
++/* Advanced Error Capabilities and Control Register */
++#define PCIE_AECCR(X)                       (volatile u32*)(PCIE_RC_CFG_BASE + 0x118)
++#define PCIE_AECCR_FIRST_ERR_PTR            0x0000001F  /* First Error Pointer */
++#define PCIE_AECCR_FIRST_ERR_PTR_S          0
++#define PCIE_AECCR_ECRC_GEN_CAP             0x00000020  /* ECRC Generation Capable */
++#define PCIE_AECCR_ECRC_GEN_EN              0x00000040  /* ECRC Generation Enable */
++#define PCIE_AECCR_ECRC_CHECK_CAP           0x00000080  /* ECRC Check Capable */
++#define PCIE_AECCR_ECRC_CHECK_EN            0x00000100  /* ECRC Check Enable */
++
++/* Header Log Register 1 */
++#define PCIE_HLR1(X)                        (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x11C)
++
++/* Header Log Register 2 */
++#define PCIE_HLR2(X)                        (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x120)
++
++/* Header Log Register 3 */
++#define PCIE_HLR3(X)                        (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x124)
++
++/* Header Log Register 4 */
++#define PCIE_HLR4(X)                        (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x128)
++
++/* Root Error Command Register */
++#define PCIE_RECR(X)                        (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x12C)
++#define PCIE_RECR_CORRECTABLE_ERR_REPORT_EN  0x00000001 /* COR-ERR */
++#define PCIE_RECR_NONFATAL_ERR_REPORT_EN     0x00000002 /* Non-Fatal ERR */
++#define PCIE_RECR_FATAL_ERR_REPORT_EN        0x00000004 /* Fatal ERR */
++#define PCIE_RECR_ERR_REPORT_EN  (PCIE_RECR_CORRECTABLE_ERR_REPORT_EN | \
++                PCIE_RECR_NONFATAL_ERR_REPORT_EN | PCIE_RECR_FATAL_ERR_REPORT_EN)
++
++/* Root Error Status Register */
++#define PCIE_RESR(X)                            (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x130)
++#define PCIE_RESR_CORRECTABLE_ERR                0x00000001   /* COR-ERR Receveid */
++#define PCIE_RESR_MULTI_CORRECTABLE_ERR          0x00000002   /* Multiple COR-ERR Received */
++#define PCIE_RESR_FATAL_NOFATAL_ERR              0x00000004   /* ERR Fatal/Non-Fatal Received */
++#define PCIE_RESR_MULTI_FATAL_NOFATAL_ERR        0x00000008   /* Multiple ERR Fatal/Non-Fatal Received */
++#define PCIE_RESR_FIRST_UNCORRECTABLE_FATAL_ERR  0x00000010   /* First UN-COR Fatal */
++#define PCIR_RESR_NON_FATAL_ERR                  0x00000020   /* Non-Fatal Error Message Received */
++#define PCIE_RESR_FATAL_ERR                      0x00000040   /* Fatal Message Received */
++#define PCIE_RESR_AER_INT_MSG_NUM                0xF8000000   /* Advanced Error Interrupt Message Number */
++#define PCIE_RESR_AER_INT_MSG_NUM_S              27
++
++/* Error Source Indentification Register */
++#define PCIE_ESIR(X)                            (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x134)
++#define PCIE_ESIR_CORRECTABLE_ERR_SRC_ID         0x0000FFFF
++#define PCIE_ESIR_CORRECTABLE_ERR_SRC_ID_S       0
++#define PCIE_ESIR_FATAL_NON_FATAL_SRC_ID         0xFFFF0000
++#define PCIE_ESIR_FATAL_NON_FATAL_SRC_ID_S       16
++
++/* VC Enhanced Capability Header */
++#define PCIE_VC_ECH(X)                          (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x140)
++
++/* Port VC Capability Register */
++#define PCIE_PVC1(X)                            (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x144)
++#define PCIE_PVC1_EXT_VC_CNT                    0x00000007  /* Extended VC Count */
++#define PCIE_PVC1_EXT_VC_CNT_S                  0
++#define PCIE_PVC1_LOW_PRI_EXT_VC_CNT            0x00000070  /* Low Priority Extended VC Count */
++#define PCIE_PVC1_LOW_PRI_EXT_VC_CNT_S          4
++#define PCIE_PVC1_REF_CLK                       0x00000300  /* Reference Clock */
++#define PCIE_PVC1_REF_CLK_S                     8
++#define PCIE_PVC1_PORT_ARB_TAB_ENTRY_SIZE       0x00000C00  /* Port Arbitration Table Entry Size */
++#define PCIE_PVC1_PORT_ARB_TAB_ENTRY_SIZE_S     10
++
++/* Extended Virtual Channel Count Defintion */
++#define PCIE_EXT_VC_CNT_MIN   0
++#define PCIE_EXT_VC_CNT_MAX   7
++
++/* Port Arbitration Table Entry Size Definition */
++enum {
++    PCIE_PORT_ARB_TAB_ENTRY_SIZE_S1BIT = 0,
++    PCIE_PORT_ARB_TAB_ENTRY_SIZE_S2BIT,
++    PCIE_PORT_ARB_TAB_ENTRY_SIZE_S4BIT,
++    PCIE_PORT_ARB_TAB_ENTRY_SIZE_S8BIT,
++};
++
++/* Port VC Capability Register 2 */
++#define PCIE_PVC2(X)                        (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x148)
++#define PCIE_PVC2_VC_ARB_16P_FIXED_WRR      0x00000001  /* HW Fixed arbitration, 16 phase WRR */
++#define PCIE_PVC2_VC_ARB_32P_WRR            0x00000002  /* 32 phase WRR */
++#define PCIE_PVC2_VC_ARB_64P_WRR            0x00000004  /* 64 phase WRR */
++#define PCIE_PVC2_VC_ARB_128P_WRR           0x00000008  /* 128 phase WRR */
++#define PCIE_PVC2_VC_ARB_WRR                0x0000000F
++#define PCIE_PVC2_VC_ARB_TAB_OFFSET         0xFF000000  /* VC arbitration table offset, not support */
++#define PCIE_PVC2_VC_ARB_TAB_OFFSET_S       24
++
++/* Port VC Control and Status Register */     
++#define PCIE_PVCCRSR(X)                     (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x14C)
++#define PCIE_PVCCRSR_LOAD_VC_ARB_TAB         0x00000001  /* Load VC Arbitration Table */
++#define PCIE_PVCCRSR_VC_ARB_SEL              0x0000000E  /* VC Arbitration Select */
++#define PCIE_PVCCRSR_VC_ARB_SEL_S            1
++#define PCIE_PVCCRSR_VC_ARB_TAB_STATUS       0x00010000  /* Arbitration Status */
++
++/* VC0 Resource Capability Register */
++#define PCIE_VC0_RC(X)                       (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x150)
++#define PCIE_VC0_RC_PORT_ARB_HW_FIXED        0x00000001  /* HW Fixed arbitration */
++#define PCIE_VC0_RC_PORT_ARB_32P_WRR         0x00000002  /* 32 phase WRR */
++#define PCIE_VC0_RC_PORT_ARB_64P_WRR         0x00000004  /* 64 phase WRR */
++#define PCIE_VC0_RC_PORT_ARB_128P_WRR        0x00000008  /* 128 phase WRR */
++#define PCIE_VC0_RC_PORT_ARB_TM_128P_WRR     0x00000010  /* Time-based 128 phase WRR */
++#define PCIE_VC0_RC_PORT_ARB_TM_256P_WRR     0x00000020  /* Time-based 256 phase WRR */
++#define PCIE_VC0_RC_PORT_ARB          (PCIE_VC0_RC_PORT_ARB_HW_FIXED | PCIE_VC0_RC_PORT_ARB_32P_WRR |\
++                        PCIE_VC0_RC_PORT_ARB_64P_WRR | PCIE_VC0_RC_PORT_ARB_128P_WRR | \
++                        PCIE_VC0_RC_PORT_ARB_TM_128P_WRR | PCIE_VC0_RC_PORT_ARB_TM_256P_WRR)
++
++#define PCIE_VC0_RC_REJECT_SNOOP             0x00008000  /* Reject Snoop Transactioin */
++#define PCIE_VC0_RC_MAX_TIMESLOTS            0x007F0000  /* Maximum time Slots */
++#define PCIE_VC0_RC_MAX_TIMESLOTS_S          16
++#define PCIE_VC0_RC_PORT_ARB_TAB_OFFSET      0xFF000000  /* Port Arbitration Table Offset */
++#define PCIE_VC0_RC_PORT_ARB_TAB_OFFSET_S    24
++
++/* VC0 Resource Control Register */
++#define PCIE_VC0_RC0(X)                      (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x154)
++#define PCIE_VC0_RC0_TVM0                    0x00000001  /* TC0 and VC0 */
++#define PCIE_VC0_RC0_TVM1                    0x00000002  /* TC1 and VC1 */
++#define PCIE_VC0_RC0_TVM2                    0x00000004  /* TC2 and VC2 */
++#define PCIE_VC0_RC0_TVM3                    0x00000008  /* TC3 and VC3 */
++#define PCIE_VC0_RC0_TVM4                    0x00000010  /* TC4 and VC4 */
++#define PCIE_VC0_RC0_TVM5                    0x00000020  /* TC5 and VC5 */
++#define PCIE_VC0_RC0_TVM6                    0x00000040  /* TC6 and VC6 */
++#define PCIE_VC0_RC0_TVM7                    0x00000080  /* TC7 and VC7 */
++#define PCIE_VC0_RC0_TC_VC                   0x000000FF  /* TC/VC mask */
++
++#define PCIE_VC0_RC0_LOAD_PORT_ARB_TAB       0x00010000  /* Load Port Arbitration Table */
++#define PCIE_VC0_RC0_PORT_ARB_SEL            0x000E0000  /* Port Arbitration Select */
++#define PCIE_VC0_RC0_PORT_ARB_SEL_S          17
++#define PCIE_VC0_RC0_VC_ID                   0x07000000  /* VC ID */
++#define PCIE_VC0_RC0_VC_ID_S                 24
++#define PCIE_VC0_RC0_VC_EN                   0x80000000  /* VC Enable */
++
++/* VC0 Resource Status Register */
++#define PCIE_VC0_RSR0(X)                     (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x158)
++#define PCIE_VC0_RSR0_PORT_ARB_TAB_STATUS    0x00010000  /* Port Arbitration Table Status,not used */
++#define PCIE_VC0_RSR0_VC_NEG_PENDING         0x00020000  /* VC Negotiation Pending */
++
++/* Ack Latency Timer and Replay Timer Register */
++#define PCIE_ALTRT(X)                         (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x700)
++#define PCIE_ALTRT_ROUND_TRIP_LATENCY_LIMIT   0x0000FFFF  /* Round Trip Latency Time Limit */
++#define PCIE_ALTRT_ROUND_TRIP_LATENCY_LIMIT_S 0
++#define PCIE_ALTRT_REPLAY_TIME_LIMIT          0xFFFF0000  /* Replay Time Limit */
++#define PCIE_ALTRT_REPLAY_TIME_LIMIT_S        16
++
++/* Other Message Register */
++#define PCIE_OMR(X)                          (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x704)
++
++/* Port Force Link Register */
++#define PCIE_PFLR(X)                         (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x708)
++#define PCIE_PFLR_LINK_NUM                   0x000000FF  /* Link Number */
++#define PCIE_PFLR_LINK_NUM_S                 0
++#define PCIE_PFLR_FORCE_LINK                 0x00008000  /* Force link */
++#define PCIE_PFLR_LINK_STATE                 0x003F0000  /* Link State */
++#define PCIE_PFLR_LINK_STATE_S               16
++#define PCIE_PFLR_LOW_POWER_ENTRY_CNT        0xFF000000  /* Low Power Entrance Count, only for EP */
++#define PCIE_PFLR_LOW_POWER_ENTRY_CNT_S      24
++
++/* Ack Frequency Register */
++#define PCIE_AFR(X)                          (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x70C)
++#define PCIE_AFR_AF                          0x000000FF  /* Ack Frequency */
++#define PCIE_AFR_AF_S                        0
++#define PCIE_AFR_FTS_NUM                     0x0000FF00  /* The number of Fast Training Sequence from L0S to L0 */
++#define PCIE_AFR_FTS_NUM_S                   8
++#define PCIE_AFR_COM_FTS_NUM                 0x00FF0000  /* N_FTS; when common clock is used*/
++#define PCIE_AFR_COM_FTS_NUM_S               16
++#define PCIE_AFR_L0S_ENTRY_LATENCY           0x07000000  /* L0s Entrance Latency */
++#define PCIE_AFR_L0S_ENTRY_LATENCY_S         24
++#define PCIE_AFR_L1_ENTRY_LATENCY            0x38000000  /* L1 Entrance Latency */
++#define PCIE_AFR_L1_ENTRY_LATENCY_S          27
++#define PCIE_AFR_FTS_NUM_DEFAULT             32
++#define PCIE_AFR_L0S_ENTRY_LATENCY_DEFAULT   7
++#define PCIE_AFR_L1_ENTRY_LATENCY_DEFAULT    5
++
++/* Port Link Control Register */
++#define PCIE_PLCR(X)                         (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x710)
++#define PCIE_PLCR_OTHER_MSG_REQ              0x00000001  /* Other Message Request */
++#define PCIE_PLCR_SCRAMBLE_DISABLE           0x00000002  /* Scramble Disable */  
++#define PCIE_PLCR_LOOPBACK_EN                0x00000004  /* Loopback Enable */
++#define PCIE_PLCR_LTSSM_HOT_RST              0x00000008  /* Force LTSSM to the hot reset */
++#define PCIE_PLCR_DLL_LINK_EN                0x00000020  /* Enable Link initialization */
++#define PCIE_PLCR_FAST_LINK_SIM_EN           0x00000080  /* Sets all internal timers to fast mode for simulation purposes */
++#define PCIE_PLCR_LINK_MODE                  0x003F0000  /* Link Mode Enable Mask */
++#define PCIE_PLCR_LINK_MODE_S                16
++#define PCIE_PLCR_CORRUPTED_CRC_EN           0x02000000  /* Enabled Corrupt CRC */
++
++/* Lane Skew Register */
++#define PCIE_LSR(X)                          (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x714)
++#define PCIE_LSR_LANE_SKEW_NUM               0x00FFFFFF  /* Insert Lane Skew for Transmit, not applicable */
++#define PCIE_LSR_LANE_SKEW_NUM_S             0
++#define PCIE_LSR_FC_DISABLE                  0x01000000  /* Disable of Flow Control */
++#define PCIE_LSR_ACKNAK_DISABLE              0x02000000  /* Disable of Ack/Nak */
++#define PCIE_LSR_LANE_DESKEW_DISABLE         0x80000000  /* Disable of Lane-to-Lane Skew */
++
++/* Symbol Number Register */
++#define PCIE_SNR(X)                          (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x718)
++#define PCIE_SNR_TS                          0x0000000F  /* Number of TS Symbol */
++#define PCIE_SNR_TS_S                        0
++#define PCIE_SNR_SKP                         0x00000700  /* Number of SKP Symbol */
++#define PCIE_SNR_SKP_S                       8
++#define PCIE_SNR_REPLAY_TIMER                0x0007C000  /* Timer Modifier for Replay Timer */
++#define PCIE_SNR_REPLAY_TIMER_S              14
++#define PCIE_SNR_ACKNAK_LATENCY_TIMER        0x00F80000  /* Timer Modifier for Ack/Nak Latency Timer */
++#define PCIE_SNR_ACKNAK_LATENCY_TIMER_S      19
++#define PCIE_SNR_FC_TIMER                    0x1F000000  /* Timer Modifier for Flow Control Watchdog Timer */
++#define PCIE_SNR_FC_TIMER_S                  28
++
++/* Symbol Timer Register and Filter Mask Register 1 */
++#define PCIE_STRFMR(X)                      (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x71C)
++#define PCIE_STRFMR_SKP_INTERVAL            0x000007FF  /* SKP lnterval Value */
++#define PCIE_STRFMR_SKP_INTERVAL_S          0
++#define PCIE_STRFMR_FC_WDT_DISABLE          0x00008000  /* Disable of FC Watchdog Timer */
++#define PCIE_STRFMR_TLP_FUNC_MISMATCH_OK    0x00010000  /* Mask Function Mismatch Filtering for Incoming Requests */
++#define PCIE_STRFMR_POISONED_TLP_OK         0x00020000  /* Mask Poisoned TLP Filtering */
++#define PCIE_STRFMR_BAR_MATCH_OK            0x00040000  /* Mask BAR Match Filtering */
++#define PCIE_STRFMR_TYPE1_CFG_REQ_OK        0x00080000  /* Mask Type 1 Configuration Request Filtering */
++#define PCIE_STRFMR_LOCKED_REQ_OK           0x00100000  /* Mask Locked Request Filtering */
++#define PCIE_STRFMR_CPL_TAG_ERR_RULES_OK    0x00200000  /* Mask Tag Error Rules for Received Completions */
++#define PCIE_STRFMR_CPL_REQUESTOR_ID_MISMATCH_OK 0x00400000  /* Mask Requester ID Mismatch Error for Received Completions */
++#define PCIE_STRFMR_CPL_FUNC_MISMATCH_OK         0x00800000  /* Mask Function Mismatch Error for Received Completions */
++#define PCIE_STRFMR_CPL_TC_MISMATCH_OK           0x01000000  /* Mask Traffic Class Mismatch Error for Received Completions */
++#define PCIE_STRFMR_CPL_ATTR_MISMATCH_OK         0x02000000  /* Mask Attribute Mismatch Error for Received Completions */
++#define PCIE_STRFMR_CPL_LENGTH_MISMATCH_OK       0x04000000  /* Mask Length Mismatch Error for Received Completions */
++#define PCIE_STRFMR_TLP_ECRC_ERR_OK              0x08000000  /* Mask ECRC Error Filtering */
++#define PCIE_STRFMR_CPL_TLP_ECRC_OK              0x10000000  /* Mask ECRC Error Filtering for Completions */
++#define PCIE_STRFMR_RX_TLP_MSG_NO_DROP           0x20000000  /* Send Message TLPs */
++#define PCIE_STRFMR_RX_IO_TRANS_ENABLE           0x40000000  /* Mask Filtering of received I/O Requests */
++#define PCIE_STRFMR_RX_CFG_TRANS_ENABLE          0x80000000  /* Mask Filtering of Received Configuration Requests */
++
++#define PCIE_DEF_SKP_INTERVAL    700             /* 1180 ~1538 , 125MHz * 2, 250MHz * 1 */
++
++/* Filter Masker Register 2 */
++#define PCIE_FMR2(X)                             (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x720)
++#define PCIE_FMR2_VENDOR_MSG0_PASSED_TO_TRGT1    0x00000001  /* Mask RADM Filtering and Error Handling Rules */
++#define PCIE_FMR2_VENDOR_MSG1_PASSED_TO_TRGT1    0x00000002  /* Mask RADM Filtering and Error Handling Rules */
++
++/* Debug Register 0 */
++#define PCIE_DBR0(X)                              (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x728)
++
++/* Debug Register 1 */
++#define PCIE_DBR1(X)                              (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x72C)
++
++/* Transmit Posted FC Credit Status Register */
++#define PCIE_TPFCS(X)                             (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x730)
++#define PCIE_TPFCS_TX_P_DATA_FC_CREDITS           0x00000FFF /* Transmit Posted Data FC Credits */
++#define PCIE_TPFCS_TX_P_DATA_FC_CREDITS_S         0
++#define PCIE_TPFCS_TX_P_HDR_FC_CREDITS            0x000FF000 /* Transmit Posted Header FC Credits */
++#define PCIE_TPFCS_TX_P_HDR_FC_CREDITS_S          12
++
++/* Transmit Non-Posted FC Credit Status */
++#define PCIE_TNPFCS(X)                            (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x734)
++#define PCIE_TNPFCS_TX_NP_DATA_FC_CREDITS         0x00000FFF /* Transmit Non-Posted Data FC Credits */
++#define PCIE_TNPFCS_TX_NP_DATA_FC_CREDITS_S       0
++#define PCIE_TNPFCS_TX_NP_HDR_FC_CREDITS          0x000FF000 /* Transmit Non-Posted Header FC Credits */
++#define PCIE_TNPFCS_TX_NP_HDR_FC_CREDITS_S        12
++
++/* Transmit Complete FC Credit Status Register */
++#define PCIE_TCFCS(X)                             (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x738)
++#define PCIE_TCFCS_TX_CPL_DATA_FC_CREDITS         0x00000FFF /* Transmit Completion Data FC Credits */
++#define PCIE_TCFCS_TX_CPL_DATA_FC_CREDITS_S       0
++#define PCIE_TCFCS_TX_CPL_HDR_FC_CREDITS          0x000FF000 /* Transmit Completion Header FC Credits */
++#define PCIE_TCFCS_TX_CPL_HDR_FC_CREDITS_S        12
++
++/* Queue Status Register */
++#define PCIE_QSR(X)                              (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x73C)
++#define PCIE_QSR_WAIT_UPDATE_FC_DLL               0x00000001 /* Received TLP FC Credits Not Returned */
++#define PCIE_QSR_TX_RETRY_BUF_NOT_EMPTY           0x00000002 /* Transmit Retry Buffer Not Empty */
++#define PCIE_QSR_RX_QUEUE_NOT_EMPTY               0x00000004 /* Received Queue Not Empty */
++
++/* VC Transmit Arbitration Register 1 */
++#define PCIE_VCTAR1(X)                          (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x740)
++#define PCIE_VCTAR1_WRR_WEIGHT_VC0               0x000000FF /* WRR Weight for VC0 */
++#define PCIE_VCTAR1_WRR_WEIGHT_VC1               0x0000FF00 /* WRR Weight for VC1 */
++#define PCIE_VCTAR1_WRR_WEIGHT_VC2               0x00FF0000 /* WRR Weight for VC2 */
++#define PCIE_VCTAR1_WRR_WEIGHT_VC3               0xFF000000 /* WRR Weight for VC3 */
++
++/* VC Transmit Arbitration Register 2 */
++#define PCIE_VCTAR2(X)                          (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x744)
++#define PCIE_VCTAR2_WRR_WEIGHT_VC4               0x000000FF /* WRR Weight for VC4 */
++#define PCIE_VCTAR2_WRR_WEIGHT_VC5               0x0000FF00 /* WRR Weight for VC5 */
++#define PCIE_VCTAR2_WRR_WEIGHT_VC6               0x00FF0000 /* WRR Weight for VC6 */
++#define PCIE_VCTAR2_WRR_WEIGHT_VC7               0xFF000000 /* WRR Weight for VC7 */
++
++/* VC0 Posted Receive Queue Control Register */
++#define PCIE_VC0_PRQCR(X)                       (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x748)
++#define PCIE_VC0_PRQCR_P_DATA_CREDITS            0x00000FFF /* VC0 Posted Data Credits */
++#define PCIE_VC0_PRQCR_P_DATA_CREDITS_S          0
++#define PCIE_VC0_PRQCR_P_HDR_CREDITS             0x000FF000 /* VC0 Posted Header Credits */
++#define PCIE_VC0_PRQCR_P_HDR_CREDITS_S           12
++#define PCIE_VC0_PRQCR_P_TLP_QUEUE_MODE          0x00E00000 /* VC0 Posted TLP Queue Mode */
++#define PCIE_VC0_PRQCR_P_TLP_QUEUE_MODE_S        20
++#define PCIE_VC0_PRQCR_TLP_RELAX_ORDER           0x40000000 /* TLP Type Ordering for VC0 */    
++#define PCIE_VC0_PRQCR_VC_STRICT_ORDER           0x80000000 /* VC0 Ordering for Receive Queues */
++
++/* VC0 Non-Posted Receive Queue Control */
++#define PCIE_VC0_NPRQCR(X)                      (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x74C)
++#define PCIE_VC0_NPRQCR_NP_DATA_CREDITS          0x00000FFF /* VC0 Non-Posted Data Credits */
++#define PCIE_VC0_NPRQCR_NP_DATA_CREDITS_S        0
++#define PCIE_VC0_NPRQCR_NP_HDR_CREDITS           0x000FF000 /* VC0 Non-Posted Header Credits */
++#define PCIE_VC0_NPRQCR_NP_HDR_CREDITS_S         12
++#define PCIE_VC0_NPRQCR_NP_TLP_QUEUE_MODE        0x00E00000 /* VC0 Non-Posted TLP Queue Mode */
++#define PCIE_VC0_NPRQCR_NP_TLP_QUEUE_MODE_S      20
++
++/* VC0 Completion Receive Queue Control */
++#define PCIE_VC0_CRQCR(X)                       (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x750)
++#define PCIE_VC0_CRQCR_CPL_DATA_CREDITS          0x00000FFF /* VC0 Completion TLP Queue Mode */
++#define PCIE_VC0_CRQCR_CPL_DATA_CREDITS_S        0
++#define PCIE_VC0_CRQCR_CPL_HDR_CREDITS           0x000FF000 /* VC0 Completion Header Credits */
++#define PCIE_VC0_CRQCR_CPL_HDR_CREDITS_S         12
++#define PCIE_VC0_CRQCR_CPL_TLP_QUEUE_MODE        0x00E00000 /* VC0 Completion Data Credits */
++#define PCIE_VC0_CRQCR_CPL_TLP_QUEUE_MODE_S      21
++
++/* Applicable to the above three registers */
++enum {
++    PCIE_VC0_TLP_QUEUE_MODE_STORE_FORWARD = 1,
++    PCIE_VC0_TLP_QUEUE_MODE_CUT_THROUGH   = 2,
++    PCIE_VC0_TLP_QUEUE_MODE_BYPASS        = 4,
++};
++
++/* VC0 Posted Buffer Depth Register */
++#define PCIE_VC0_PBD(X)                        (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x7A8)
++#define PCIE_VC0_PBD_P_DATA_QUEUE_ENTRIES       0x00003FFF /* VC0 Posted Data Queue Depth */
++#define PCIE_VC0_PBD_P_DATA_QUEUE_ENTRIES_S     0
++#define PCIE_VC0_PBD_P_HDR_QUEUE_ENTRIES        0x03FF0000 /* VC0 Posted Header Queue Depth */
++#define PCIE_VC0_PBD_P_HDR_QUEUE_ENTRIES_S      16
++
++/* VC0 Non-Posted Buffer Depth Register */
++#define PCIE_VC0_NPBD(X)                       (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x7AC)
++#define PCIE_VC0_NPBD_NP_DATA_QUEUE_ENTRIES     0x00003FFF /* VC0 Non-Posted Data Queue Depth */
++#define PCIE_VC0_NPBD_NP_DATA_QUEUE_ENTRIES_S   0
++#define PCIE_VC0_NPBD_NP_HDR_QUEUE_ENTRIES      0x03FF0000 /* VC0 Non-Posted Header Queue Depth */
++#define PCIE_VC0_NPBD_NP_HDR_QUEUE_ENTRIES_S    16
++
++/* VC0 Completion Buffer Depth Register */
++#define PCIE_VC0_CBD(X)                        (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x7B0)
++#define PCIE_VC0_CBD_CPL_DATA_QUEUE_ENTRIES     0x00003FFF /* C0 Completion Data Queue Depth */
++#define PCIE_VC0_CBD_CPL_DATA_QUEUE_ENTRIES_S   0
++#define PCIE_VC0_CBD_CPL_HDR_QUEUE_ENTRIES      0x03FF0000 /* VC0 Completion Header Queue Depth */
++#define PCIE_VC0_CBD_CPL_HDR_QUEUE_ENTRIES_S    16
++
++/* PHY Status Register, all zeros in VR9 */
++#define PCIE_PHYSR(X)                           (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x810)
++
++/* PHY Control Register, all zeros in VR9 */
++#define PCIE_PHYCR(X)                           (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x814)
++
++/* 
++ * PCIe PDI PHY register definition, suppose all the following 
++ * stuff is confidential. 
++ * XXX, detailed bit definition
++ */
++#define	PCIE_PHY_PLL_CTRL1(X)       (PCIE_PHY_PORT_TO_BASE(X) + (0x22 << 1))
++#define	PCIE_PHY_PLL_CTRL2(X)       (PCIE_PHY_PORT_TO_BASE(X) + (0x23 << 1))
++#define	PCIE_PHY_PLL_CTRL3(X)       (PCIE_PHY_PORT_TO_BASE(X) + (0x24 << 1))
++#define	PCIE_PHY_PLL_CTRL4(X)       (PCIE_PHY_PORT_TO_BASE(X) + (0x25 << 1))
++#define	PCIE_PHY_PLL_CTRL5(X)       (PCIE_PHY_PORT_TO_BASE(X) + (0x26 << 1))
++#define	PCIE_PHY_PLL_CTRL6(X)       (PCIE_PHY_PORT_TO_BASE(X) + (0x27 << 1))
++#define	PCIE_PHY_PLL_CTRL7(X)       (PCIE_PHY_PORT_TO_BASE(X) + (0x28 << 1))
++#define	PCIE_PHY_PLL_A_CTRL1(X)     (PCIE_PHY_PORT_TO_BASE(X) + (0x29 << 1))
++#define	PCIE_PHY_PLL_A_CTRL2(X)     (PCIE_PHY_PORT_TO_BASE(X) + (0x2A << 1))
++#define	PCIE_PHY_PLL_A_CTRL3(X)     (PCIE_PHY_PORT_TO_BASE(X) + (0x2B << 1))
++#define	PCIE_PHY_PLL_STATUS(X)      (PCIE_PHY_PORT_TO_BASE(X) + (0x2C << 1))
++ 
++#define PCIE_PHY_TX1_CTRL1(X)       (PCIE_PHY_PORT_TO_BASE(X) + (0x30 << 1))
++#define PCIE_PHY_TX1_CTRL2(X)       (PCIE_PHY_PORT_TO_BASE(X) + (0x31 << 1))
++#define PCIE_PHY_TX1_CTRL3(X)       (PCIE_PHY_PORT_TO_BASE(X) + (0x32 << 1))
++#define PCIE_PHY_TX1_A_CTRL1(X)     (PCIE_PHY_PORT_TO_BASE(X) + (0x33 << 1))
++#define PCIE_PHY_TX1_A_CTRL2(X)     (PCIE_PHY_PORT_TO_BASE(X) + (0x34 << 1))
++#define PCIE_PHY_TX1_MOD1(X)        (PCIE_PHY_PORT_TO_BASE(X) + (0x35 << 1))
++#define PCIE_PHY_TX1_MOD2(X)        (PCIE_PHY_PORT_TO_BASE(X) + (0x36 << 1))
++#define PCIE_PHY_TX1_MOD3(X)        (PCIE_PHY_PORT_TO_BASE(X) + (0x37 << 1))
++
++#define PCIE_PHY_TX2_CTRL1(X)       (PCIE_PHY_PORT_TO_BASE(X) + (0x38 << 1))
++#define PCIE_PHY_TX2_CTRL2(X)       (PCIE_PHY_PORT_TO_BASE(X) + (0x39 << 1))
++#define PCIE_PHY_TX2_A_CTRL1(X)     (PCIE_PHY_PORT_TO_BASE(X) + (0x3B << 1))
++#define PCIE_PHY_TX2_A_CTRL2(X)     (PCIE_PHY_PORT_TO_BASE(X) + (0x3C << 1))
++#define PCIE_PHY_TX2_MOD1(X)        (PCIE_PHY_PORT_TO_BASE(X) + (0x3D << 1))
++#define PCIE_PHY_TX2_MOD2(X)        (PCIE_PHY_PORT_TO_BASE(X) + (0x3E << 1))
++#define PCIE_PHY_TX2_MOD3(X)        (PCIE_PHY_PORT_TO_BASE(X) + (0x3F << 1))
++
++#define PCIE_PHY_RX1_CTRL1(X)       (PCIE_PHY_PORT_TO_BASE(X) + (0x50 << 1))
++#define PCIE_PHY_RX1_CTRL2(X)       (PCIE_PHY_PORT_TO_BASE(X) + (0x51 << 1))
++#define PCIE_PHY_RX1_CDR(X)         (PCIE_PHY_PORT_TO_BASE(X) + (0x52 << 1))
++#define PCIE_PHY_RX1_EI(X)          (PCIE_PHY_PORT_TO_BASE(X) + (0x53 << 1))
++#define PCIE_PHY_RX1_A_CTRL(X)      (PCIE_PHY_PORT_TO_BASE(X) + (0x55 << 1))
++
++/* Interrupt related stuff */
++#define PCIE_LEGACY_DISABLE 0
++#define PCIE_LEGACY_INTA  1
++#define PCIE_LEGACY_INTB  2
++#define PCIE_LEGACY_INTC  3
++#define PCIE_LEGACY_INTD  4
++#define PCIE_LEGACY_INT_MAX PCIE_LEGACY_INTD
++
++#define PCIE_IRQ_LOCK(lock) do {             \
++    unsigned long flags;                     \
++    spin_lock_irqsave(&(lock), flags);
++#define PCIE_IRQ_UNLOCK(lock)                \
++    spin_unlock_irqrestore(&(lock), flags);  \
++} while (0)
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
++#define IRQF_SHARED SA_SHIRQ
++#endif
++
++#define PCIE_MSG_MSI        0x00000001
++#define PCIE_MSG_ISR        0x00000002
++#define PCIE_MSG_FIXUP      0x00000004
++#define PCIE_MSG_READ_CFG   0x00000008
++#define PCIE_MSG_WRITE_CFG  0x00000010
++#define PCIE_MSG_CFG        (PCIE_MSG_READ_CFG | PCIE_MSG_WRITE_CFG)
++#define PCIE_MSG_REG        0x00000020
++#define PCIE_MSG_INIT       0x00000040
++#define PCIE_MSG_ERR        0x00000080
++#define PCIE_MSG_PHY        0x00000100
++#define PCIE_MSG_ANY        0x000001ff
++
++#define IFX_PCIE_PORT0      0
++#define IFX_PCIE_PORT1      1
++
++#ifdef CONFIG_IFX_PCIE_2ND_CORE
++#define IFX_PCIE_CORE_NR    2
++#else
++#define IFX_PCIE_CORE_NR    1
++#endif
++
++//#define IFX_PCIE_ERROR_INT
++
++//#define IFX_PCIE_DBG
++
++#if defined(IFX_PCIE_DBG)
++#define IFX_PCIE_PRINT(_m, _fmt, args...) do {   \
++    if (g_pcie_debug_flag & (_m)) {              \
++        ifx_pcie_debug((_fmt), ##args);          \
++    }                                            \
++} while (0)
++
++#define INLINE 
++#else
++#define IFX_PCIE_PRINT(_m, _fmt, args...)   \
++    do {} while(0)
++#define INLINE inline
++#endif
++
++struct ifx_pci_controller {
++	struct pci_controller   pcic;
++    
++	/* RC specific, per host bus information */
++	u32   port;  /* Port index, 0 -- 1st core, 1 -- 2nd core */
++};
++
++typedef struct ifx_pcie_ir_irq {
++    const unsigned int irq;
++    const char name[16];
++}ifx_pcie_ir_irq_t;
++
++typedef struct ifx_pcie_legacy_irq{
++    const u32 irq_bit;
++    const int irq;
++}ifx_pcie_legacy_irq_t;
++
++typedef struct ifx_pcie_irq {
++    ifx_pcie_ir_irq_t ir_irq;
++    ifx_pcie_legacy_irq_t legacy_irq[PCIE_LEGACY_INT_MAX];
++}ifx_pcie_irq_t;
++
++extern u32 g_pcie_debug_flag;
++extern void ifx_pcie_debug(const char *fmt, ...);
++extern void pcie_phy_clock_mode_setup(int pcie_port);
++extern void pcie_msi_pic_init(int pcie_port);
++extern u32 ifx_pcie_bus_enum_read_hack(int where, u32 value);
++extern u32 ifx_pcie_bus_enum_write_hack(int where, u32 value);
++
++
++#include <linux/types.h>
++#include <linux/delay.h>
++#include <linux/gpio.h>
++#include <linux/clk.h>
++
++#include <lantiq_soc.h>
++
++#define IFX_PCIE_GPIO_RESET  38
++#define IFX_REG_R32	ltq_r32
++#define IFX_REG_W32	ltq_w32
++#define CONFIG_IFX_PCIE_HW_SWAP
++#define IFX_RCU_AHB_ENDIAN                      ((volatile u32*)(IFX_RCU + 0x004C))
++#define IFX_RCU_RST_REQ                         ((volatile u32*)(IFX_RCU + 0x0010))
++#define IFX_RCU_AHB_BE_PCIE_PDI                  0x00000080  /* Configure PCIE PDI module in big endian*/
++
++#define IFX_RCU                                 (KSEG1 | 0x1F203000)
++#define IFX_RCU_AHB_BE_PCIE_M                    0x00000001  /* Configure AHB master port that connects to PCIe RC in big endian */
++#define IFX_RCU_AHB_BE_PCIE_S                    0x00000010  /* Configure AHB slave port that connects to PCIe RC in little endian */
++#define IFX_RCU_AHB_BE_XBAR_M                    0x00000002  /* Configure AHB master port that connects to XBAR in big endian */
++#define CONFIG_IFX_PCIE_PHY_36MHZ_MODE
++
++#define IFX_PMU1_MODULE_PCIE_PHY   (0)
++#define IFX_PMU1_MODULE_PCIE_CTRL  (1)
++#define IFX_PMU1_MODULE_PDI        (4)
++#define IFX_PMU1_MODULE_MSI        (5)
++
++#define IFX_PMU_MODULE_PCIE_L0_CLK (31)
++
++
++static inline void pcie_ep_gpio_rst_init(int pcie_port)
++{
++}
++
++static inline void pcie_ahb_pmu_setup(void)
++{
++	struct clk *clk;
++	clk = clk_get_sys("ltq_pcie", "ahb");
++	clk_enable(clk);
++	//ltq_pmu_enable(PMU_AHBM | PMU_AHBS);
++}
++
++static inline void pcie_rcu_endian_setup(int pcie_port)
++{
++    u32 reg;
++
++    reg = IFX_REG_R32(IFX_RCU_AHB_ENDIAN);
++#ifdef CONFIG_IFX_PCIE_HW_SWAP
++    reg |= IFX_RCU_AHB_BE_PCIE_M;
++    reg |= IFX_RCU_AHB_BE_PCIE_S;
++    reg &= ~IFX_RCU_AHB_BE_XBAR_M;
++#else 
++    reg |= IFX_RCU_AHB_BE_PCIE_M;
++    reg &= ~IFX_RCU_AHB_BE_PCIE_S;
++    reg &= ~IFX_RCU_AHB_BE_XBAR_M;
++#endif /* CONFIG_IFX_PCIE_HW_SWAP */
++    IFX_REG_W32(reg, IFX_RCU_AHB_ENDIAN);
++    IFX_PCIE_PRINT(PCIE_MSG_REG, "%s IFX_RCU_AHB_ENDIAN: 0x%08x\n", __func__, IFX_REG_R32(IFX_RCU_AHB_ENDIAN));
++}
++
++static inline void pcie_phy_pmu_enable(int pcie_port)
++{
++	struct clk *clk;
++	clk = clk_get_sys("ltq_pcie", "phy");
++	clk_enable(clk);
++	//ltq_pmu1_enable(1<<IFX_PMU1_MODULE_PCIE_PHY);
++}
++
++static inline void pcie_phy_pmu_disable(int pcie_port)
++{
++	struct clk *clk;
++	clk = clk_get_sys("ltq_pcie", "phy");
++	clk_disable(clk);
++	//ltq_pmu1_disable(1<<IFX_PMU1_MODULE_PCIE_PHY);
++}
++
++static inline void pcie_pdi_big_endian(int pcie_port)
++{
++    u32 reg;
++
++    /* SRAM2PDI endianness control. */
++    reg = IFX_REG_R32(IFX_RCU_AHB_ENDIAN);
++    /* Config AHB->PCIe and PDI endianness */
++    reg |= IFX_RCU_AHB_BE_PCIE_PDI;
++    IFX_REG_W32(reg, IFX_RCU_AHB_ENDIAN);
++}
++
++static inline void pcie_pdi_pmu_enable(int pcie_port)
++{
++	struct clk *clk;
++	clk = clk_get_sys("ltq_pcie", "pdi");
++	clk_enable(clk);
++	//ltq_pmu1_enable(1<<IFX_PMU1_MODULE_PDI);
++}
++
++static inline void pcie_core_rst_assert(int pcie_port)
++{
++    u32 reg;
++
++    reg = IFX_REG_R32(IFX_RCU_RST_REQ);
++
++    /* Reset PCIe PHY & Core, bit 22, bit 26 may be affected if write it directly  */
++    reg |= 0x00400000;
++    IFX_REG_W32(reg, IFX_RCU_RST_REQ);
++}
++
++static inline void pcie_core_rst_deassert(int pcie_port)
++{
++    u32 reg;
++
++    /* Make sure one micro-second delay */
++    udelay(1);
++
++    /* Reset PCIe PHY & Core, bit 22 */
++    reg = IFX_REG_R32(IFX_RCU_RST_REQ);
++    reg &= ~0x00400000;
++    IFX_REG_W32(reg, IFX_RCU_RST_REQ);
++}
++
++static inline void pcie_phy_rst_assert(int pcie_port)
++{
++    u32 reg;
++
++    reg = IFX_REG_R32(IFX_RCU_RST_REQ);
++    reg |= 0x00001000; /* Bit 12 */
++    IFX_REG_W32(reg, IFX_RCU_RST_REQ);
++}
++
++static inline void pcie_phy_rst_deassert(int pcie_port)
++{
++    u32 reg;
++
++    /* Make sure one micro-second delay */
++    udelay(1);
++
++    reg = IFX_REG_R32(IFX_RCU_RST_REQ);
++    reg &= ~0x00001000; /* Bit 12 */
++    IFX_REG_W32(reg, IFX_RCU_RST_REQ);
++}
++
++static inline void pcie_device_rst_assert(int pcie_port)
++{
++	gpio_set_value(IFX_PCIE_GPIO_RESET, 0);
++  //  ifx_gpio_output_clear(IFX_PCIE_GPIO_RESET, ifx_pcie_gpio_module_id);
++}
++
++static inline void pcie_device_rst_deassert(int pcie_port)
++{
++    mdelay(100);
++	gpio_set_value(IFX_PCIE_GPIO_RESET, 1);
++//    ifx_gpio_output_set(IFX_PCIE_GPIO_RESET, ifx_pcie_gpio_module_id);
++}
++
++static inline void pcie_core_pmu_setup(int pcie_port)
++{
++	struct clk *clk;
++	clk = clk_get_sys("ltq_pcie", "ctl");
++	clk_enable(clk);
++	clk = clk_get_sys("ltq_pcie", "bus");
++	clk_enable(clk);
++
++	//ltq_pmu1_enable(1 << IFX_PMU1_MODULE_PCIE_CTRL);
++	//ltq_pmu_enable(1 << IFX_PMU_MODULE_PCIE_L0_CLK);
++}
++
++static inline void pcie_msi_init(int pcie_port)
++{
++	struct clk *clk;
++    pcie_msi_pic_init(pcie_port);
++	clk = clk_get_sys("ltq_pcie", "msi");
++	clk_enable(clk);
++	//ltq_pmu1_enable(1 << IFX_PMU1_MODULE_MSI);
++}
++
++static inline u32
++ifx_pcie_bus_nr_deduct(u32 bus_number, int pcie_port)
++{
++    u32 tbus_number = bus_number;
++
++#ifdef CONFIG_PCI_LANTIQ
++    if (pcibios_host_nr() > 1) {
++        tbus_number -= pcibios_1st_host_bus_nr();
++    }
++#endif /* CONFIG_PCI_LANTIQ */
++    return tbus_number;
++}
++
++static inline u32
++ifx_pcie_bus_enum_hack(struct pci_bus *bus, u32 devfn, int where, u32 value, int pcie_port, int read)
++{
++    struct pci_dev *pdev;
++    u32 tvalue = value;
++
++    /* Sanity check */
++    pdev = pci_get_slot(bus, devfn);
++    if (pdev == NULL) {
++        return tvalue;
++    }
++
++    /* Only care about PCI bridge */
++    if (pdev->hdr_type != PCI_HEADER_TYPE_BRIDGE) {
++        return tvalue;
++    }
++
++    if (read) { /* Read hack */
++    #ifdef CONFIG_PCI_LANTIQ
++        if (pcibios_host_nr() > 1) {
++            tvalue = ifx_pcie_bus_enum_read_hack(where, tvalue);
++        }
++    #endif /* CONFIG_PCI_LANTIQ */
++    }
++    else { /* Write hack */
++    #ifdef CONFIG_PCI_LANTIQ
++        if (pcibios_host_nr() > 1) {
++            tvalue = ifx_pcie_bus_enum_write_hack(where, tvalue);
++        }
++    #endif
++    }
++    return tvalue;
++}
++
++#endif /* IFXMIPS_PCIE_VR9_H */
++
+--- a/drivers/pci/pcie/aer/Kconfig
++++ b/drivers/pci/pcie/aer/Kconfig
+@@ -19,6 +19,7 @@ config PCIEAER
+ config PCIE_ECRC
+ 	bool "PCI Express ECRC settings control"
+ 	depends on PCIEAER
++	default n
+ 	help
+ 	  Used to override firmware/bios settings for PCI Express ECRC
+ 	  (transaction layer end-to-end CRC checking).
+--- a/include/linux/pci.h
++++ b/include/linux/pci.h
+@@ -1228,6 +1228,8 @@ void pci_walk_bus(struct pci_bus *top, i
+ 		  void *userdata);
+ int pci_cfg_space_size(struct pci_dev *dev);
+ unsigned char pci_bus_max_busnr(struct pci_bus *bus);
++int pcibios_host_nr(void);
++int pcibios_1st_host_bus_nr(void);
+ void pci_setup_bridge(struct pci_bus *bus);
+ resource_size_t pcibios_window_alignment(struct pci_bus *bus,
+ 					 unsigned long type);
+--- a/include/linux/pci_ids.h
++++ b/include/linux/pci_ids.h
+@@ -1054,6 +1054,12 @@
+ #define PCI_DEVICE_ID_SGI_LITHIUM	0x1002
+ #define PCI_DEVICE_ID_SGI_IOC4		0x100a
+ 
++#define PCI_VENDOR_ID_INFINEON		0x15D1
++#define PCI_DEVICE_ID_INFINEON_DANUBE	0x000F
++#define PCI_DEVICE_ID_INFINEON_PCIE	0x0011
++#define PCI_VENDOR_ID_LANTIQ		0x1BEF
++#define PCI_DEVICE_ID_LANTIQ_PCIE	0x0011
++
+ #define PCI_VENDOR_ID_WINBOND		0x10ad
+ #define PCI_DEVICE_ID_WINBOND_82C105	0x0105
+ #define PCI_DEVICE_ID_WINBOND_83C553	0x0565
diff --git a/target/linux/lantiq/patches-4.4/0002-MIPS-lantiq-dtb-image-hack.patch b/target/linux/lantiq/patches-4.4/0002-MIPS-lantiq-dtb-image-hack.patch
new file mode 100644
index 0000000..89a498d
--- /dev/null
+++ b/target/linux/lantiq/patches-4.4/0002-MIPS-lantiq-dtb-image-hack.patch
@@ -0,0 +1,31 @@
+From 17348293f7f8103c97c8d2a6b0ef36eae06ec371 Mon Sep 17 00:00:00 2001
+From: John Crispin <blogic at openwrt.org>
+Date: Wed, 13 Mar 2013 09:36:16 +0100
+Subject: [PATCH 02/36] MIPS: lantiq: dtb image hack
+
+Signed-off-by: John Crispin <blogic at openwrt.org>
+---
+ arch/mips/lantiq/Makefile |    2 --
+ arch/mips/lantiq/prom.c   |    4 +++-
+ 2 files changed, 3 insertions(+), 3 deletions(-)
+
+--- a/arch/mips/lantiq/prom.c
++++ b/arch/mips/lantiq/prom.c
+@@ -63,6 +63,8 @@ static void __init prom_init_cmdline(voi
+ 	}
+ }
+ 
++extern struct boot_param_header __image_dtb;
++
+ void __init plat_mem_setup(void)
+ {
+ 	ioport_resource.start = IOPORT_RESOURCE_START;
+@@ -76,7 +78,7 @@ void __init plat_mem_setup(void)
+ 	 * Load the builtin devicetree. This causes the chosen node to be
+ 	 * parsed resulting in our memory appearing
+ 	 */
+-	__dt_setup_arch(__dtb_start);
++	__dt_setup_arch(&__image_dtb);
+ }
+ 
+ void __init device_tree_init(void)
diff --git a/target/linux/lantiq/patches-4.4/0004-MIPS-lantiq-add-atm-hack.patch b/target/linux/lantiq/patches-4.4/0004-MIPS-lantiq-add-atm-hack.patch
new file mode 100644
index 0000000..53809d0
--- /dev/null
+++ b/target/linux/lantiq/patches-4.4/0004-MIPS-lantiq-add-atm-hack.patch
@@ -0,0 +1,500 @@
+From 9afadf01b1be371ee88491819aa67364684461f9 Mon Sep 17 00:00:00 2001
+From: John Crispin <blogic at openwrt.org>
+Date: Fri, 3 Aug 2012 10:27:25 +0200
+Subject: [PATCH 04/36] MIPS: lantiq: add atm hack
+
+Signed-off-by: John Crispin <blogic at openwrt.org>
+---
+ arch/mips/include/asm/mach-lantiq/lantiq_atm.h |  196 +++++++++++++++++++++++
+ arch/mips/include/asm/mach-lantiq/lantiq_ptm.h |  203 ++++++++++++++++++++++++
+ arch/mips/lantiq/irq.c                         |    2 +
+ arch/mips/mm/cache.c                           |    2 +
+ include/uapi/linux/atm.h                       |    6 +
+ net/atm/common.c                               |    6 +
+ net/atm/proc.c                                 |    2 +-
+ 7 files changed, 416 insertions(+), 1 deletion(-)
+ create mode 100644 arch/mips/include/asm/mach-lantiq/lantiq_atm.h
+ create mode 100644 arch/mips/include/asm/mach-lantiq/lantiq_ptm.h
+
+--- /dev/null
++++ b/arch/mips/include/asm/mach-lantiq/lantiq_atm.h
+@@ -0,0 +1,196 @@
++/******************************************************************************
++**
++** FILE NAME    : ifx_atm.h
++** PROJECT      : UEIP
++** MODULES      : ATM
++**
++** DATE         : 17 Jun 2009
++** AUTHOR       : Xu Liang
++** DESCRIPTION  : Global ATM driver header file
++** COPYRIGHT    :       Copyright (c) 2006
++**                      Infineon Technologies AG
++**                      Am Campeon 1-12, 85579 Neubiberg, Germany
++**
++**    This program is free software; you can redistribute it and/or modify
++**    it under the terms of the GNU General Public License as published by
++**    the Free Software Foundation; either version 2 of the License, or
++**    (at your option) any later version.
++**
++** HISTORY
++** $Date        $Author         $Comment
++** 07 JUL 2009  Xu Liang        Init Version
++*******************************************************************************/
++
++#ifndef IFX_ATM_H
++#define IFX_ATM_H
++
++
++
++/*!
++  \defgroup IFX_ATM UEIP Project - ATM driver module
++  \brief UEIP Project - ATM driver module, support Danube, Amazon-SE, AR9, VR9.
++ */
++
++/*!
++  \defgroup IFX_ATM_IOCTL IOCTL Commands
++  \ingroup IFX_ATM
++  \brief IOCTL Commands used by user application.
++ */
++
++/*!
++  \defgroup IFX_ATM_STRUCT Structures
++  \ingroup IFX_ATM
++  \brief Structures used by user application.
++ */
++
++/*!
++  \file ifx_atm.h
++  \ingroup IFX_ATM
++  \brief ATM driver header file
++ */
++
++
++
++/*
++ * ####################################
++ *              Definition
++ * ####################################
++ */
++
++/*!
++  \addtogroup IFX_ATM_STRUCT
++ */
++/*@{*/
++
++/*
++ *  ATM MIB
++ */
++
++/*!
++  \struct atm_cell_ifEntry_t
++  \brief Structure used for Cell Level MIB Counters.
++
++  User application use this structure to call IOCTL command "PPE_ATM_MIB_CELL".
++ */
++typedef struct {
++	__u32	ifHCInOctets_h;     /*!< byte counter of ingress cells (upper 32 bits, total 64 bits)   */
++	__u32	ifHCInOctets_l;     /*!< byte counter of ingress cells (lower 32 bits, total 64 bits)   */
++	__u32	ifHCOutOctets_h;    /*!< byte counter of egress cells (upper 32 bits, total 64 bits)    */
++	__u32	ifHCOutOctets_l;    /*!< byte counter of egress cells (lower 32 bits, total 64 bits)    */
++	__u32	ifInErrors;         /*!< counter of error ingress cells     */
++	__u32	ifInUnknownProtos;  /*!< counter of unknown ingress cells   */
++	__u32	ifOutErrors;        /*!< counter of error egress cells      */
++} atm_cell_ifEntry_t;
++
++/*!
++  \struct atm_aal5_ifEntry_t
++  \brief Structure used for AAL5 Frame Level MIB Counters.
++
++  User application use this structure to call IOCTL command "PPE_ATM_MIB_AAL5".
++ */
++typedef struct {
++	__u32	ifHCInOctets_h;     /*!< byte counter of ingress packets (upper 32 bits, total 64 bits) */
++	__u32	ifHCInOctets_l;     /*!< byte counter of ingress packets (lower 32 bits, total 64 bits) */
++	__u32	ifHCOutOctets_h;    /*!< byte counter of egress packets (upper 32 bits, total 64 bits)  */
++	__u32	ifHCOutOctets_l;    /*!< byte counter of egress packets (lower 32 bits, total 64 bits)  */
++	__u32	ifInUcastPkts;      /*!< counter of ingress packets         */
++	__u32	ifOutUcastPkts;     /*!< counter of egress packets          */
++	__u32	ifInErrors;         /*!< counter of error ingress packets   */
++	__u32	ifInDiscards;       /*!< counter of dropped ingress packets */
++	__u32	ifOutErros;         /*!< counter of error egress packets    */
++	__u32	ifOutDiscards;      /*!< counter of dropped egress packets  */
++} atm_aal5_ifEntry_t;
++
++/*!
++  \struct atm_aal5_vcc_t
++  \brief Structure used for per PVC AAL5 Frame Level MIB Counters.
++
++  This structure is a part of structure "atm_aal5_vcc_x_t".
++ */
++typedef struct {
++	__u32	aal5VccCrcErrors;       /*!< counter of ingress packets with CRC error  */
++	__u32	aal5VccSarTimeOuts;     /*!< counter of ingress packets with Re-assemble timeout    */  //no timer support yet
++	__u32	aal5VccOverSizedSDUs;   /*!< counter of oversized ingress packets       */
++} atm_aal5_vcc_t;
++
++/*!
++  \struct atm_aal5_vcc_x_t
++  \brief Structure used for per PVC AAL5 Frame Level MIB Counters.
++
++  User application use this structure to call IOCTL command "PPE_ATM_MIB_VCC".
++ */
++typedef struct {
++	int             vpi;        /*!< VPI of the VCC to get MIB counters */
++	int             vci;        /*!< VCI of the VCC to get MIB counters */
++	atm_aal5_vcc_t  mib_vcc;    /*!< structure to get MIB counters      */
++} atm_aal5_vcc_x_t;
++
++/*@}*/
++
++
++
++/*
++ * ####################################
++ *                IOCTL
++ * ####################################
++ */
++
++/*!
++  \addtogroup IFX_ATM_IOCTL
++ */
++/*@{*/
++
++/*
++ *  ioctl Command
++ */
++/*!
++  \brief ATM IOCTL Magic Number
++ */
++#define PPE_ATM_IOC_MAGIC       'o'
++/*!
++  \brief ATM IOCTL Command - Get Cell Level MIB Counters
++
++   This command is obsolete. User can get cell level MIB from DSL API.
++   This command uses structure "atm_cell_ifEntry_t" as parameter for output of MIB counters.
++ */
++#define PPE_ATM_MIB_CELL        _IOW(PPE_ATM_IOC_MAGIC,  0, atm_cell_ifEntry_t)
++/*!
++  \brief ATM IOCTL Command - Get AAL5 Level MIB Counters
++
++   Get AAL5 packet counters.
++   This command uses structure "atm_aal5_ifEntry_t" as parameter for output of MIB counters.
++ */
++#define PPE_ATM_MIB_AAL5        _IOW(PPE_ATM_IOC_MAGIC,  1, atm_aal5_ifEntry_t)
++/*!
++  \brief ATM IOCTL Command - Get Per PVC MIB Counters
++
++   Get AAL5 packet counters for each PVC.
++   This command uses structure "atm_aal5_vcc_x_t" as parameter for input of VPI/VCI information and output of MIB counters.
++ */
++#define PPE_ATM_MIB_VCC         _IOWR(PPE_ATM_IOC_MAGIC, 2, atm_aal5_vcc_x_t)
++/*!
++  \brief Total Number of ATM IOCTL Commands
++ */
++#define PPE_ATM_IOC_MAXNR       3
++
++/*@}*/
++
++
++
++/*
++ * ####################################
++ *                 API
++ * ####################################
++ */
++
++#ifdef __KERNEL__
++struct port_cell_info {
++    unsigned int    port_num;
++    unsigned int    tx_link_rate[2];
++};
++#endif
++
++
++
++#endif  //  IFX_ATM_H
++
+--- /dev/null
++++ b/arch/mips/include/asm/mach-lantiq/lantiq_ptm.h
+@@ -0,0 +1,203 @@
++/******************************************************************************
++**
++** FILE NAME    : ifx_ptm.h
++** PROJECT      : UEIP
++** MODULES      : PTM
++**
++** DATE         : 17 Jun 2009
++** AUTHOR       : Xu Liang
++** DESCRIPTION  : Global PTM driver header file
++** COPYRIGHT    :       Copyright (c) 2006
++**                      Infineon Technologies AG
++**                      Am Campeon 1-12, 85579 Neubiberg, Germany
++**
++**    This program is free software; you can redistribute it and/or modify
++**    it under the terms of the GNU General Public License as published by
++**    the Free Software Foundation; either version 2 of the License, or
++**    (at your option) any later version.
++**
++** HISTORY
++** $Date        $Author         $Comment
++** 07 JUL 2009  Xu Liang        Init Version
++*******************************************************************************/
++
++#ifndef IFX_PTM_H
++#define IFX_PTM_H
++
++
++
++/*!
++  \defgroup IFX_PTM UEIP Project - PTM driver module
++  \brief UEIP Project - PTM driver module, support Danube, Amazon-SE, AR9, VR9.
++ */
++
++/*!
++  \defgroup IFX_PTM_IOCTL IOCTL Commands
++  \ingroup IFX_PTM
++  \brief IOCTL Commands used by user application.
++ */
++
++/*!
++  \defgroup IFX_PTM_STRUCT Structures
++  \ingroup IFX_PTM
++  \brief Structures used by user application.
++ */
++
++/*!
++  \file ifx_ptm.h
++  \ingroup IFX_PTM
++  \brief PTM driver header file
++ */
++
++
++
++/*
++ * ####################################
++ *              Definition
++ * ####################################
++ */
++
++
++
++/*
++ * ####################################
++ *                IOCTL
++ * ####################################
++ */
++
++/*!
++  \addtogroup IFX_PTM_IOCTL
++ */
++/*@{*/
++
++/*
++ *  ioctl Command
++ */
++/*!
++  \brief PTM IOCTL Command - Get codeword MIB counters.
++
++  This command uses structure "PTM_CW_IF_ENTRY_T" to get codeword level MIB counters.
++ */
++#define IFX_PTM_MIB_CW_GET              SIOCDEVPRIVATE + 1
++/*!
++  \brief PTM IOCTL Command - Get packet MIB counters.
++
++  This command uses structure "PTM_FRAME_MIB_T" to get packet level MIB counters.
++ */
++#define IFX_PTM_MIB_FRAME_GET           SIOCDEVPRIVATE + 2
++/*!
++  \brief PTM IOCTL Command - Get firmware configuration (CRC).
++
++  This command uses structure "IFX_PTM_CFG_T" to get firmware configuration (CRC).
++ */
++#define IFX_PTM_CFG_GET                 SIOCDEVPRIVATE + 3
++/*!
++  \brief PTM IOCTL Command - Set firmware configuration (CRC).
++
++  This command uses structure "IFX_PTM_CFG_T" to set firmware configuration (CRC).
++ */
++#define IFX_PTM_CFG_SET                 SIOCDEVPRIVATE + 4
++/*!
++  \brief PTM IOCTL Command - Program priority value to TX queue mapping.
++
++  This command uses structure "IFX_PTM_PRIO_Q_MAP_T" to program priority value to TX queue mapping.
++ */
++#define IFX_PTM_MAP_PKT_PRIO_TO_Q       SIOCDEVPRIVATE + 14
++
++/*@}*/
++
++
++/*!
++  \addtogroup IFX_PTM_STRUCT
++ */
++/*@{*/
++
++/*
++ *  ioctl Data Type
++ */
++
++/*!
++  \typedef PTM_CW_IF_ENTRY_T
++  \brief Wrapping of structure "ptm_cw_ifEntry_t".
++ */
++/*!
++  \struct ptm_cw_ifEntry_t
++  \brief Structure used for CodeWord level MIB counters.
++ */
++typedef struct ptm_cw_ifEntry_t {
++    uint32_t    ifRxNoIdleCodewords;    /*!< output, number of ingress user codeword */
++    uint32_t    ifRxIdleCodewords;      /*!< output, number of ingress idle codeword */
++    uint32_t    ifRxCodingViolation;    /*!< output, number of error ingress codeword */
++    uint32_t    ifTxNoIdleCodewords;    /*!< output, number of egress user codeword */
++    uint32_t    ifTxIdleCodewords;      /*!< output, number of egress idle codeword */
++} PTM_CW_IF_ENTRY_T;
++
++/*!
++  \typedef PTM_FRAME_MIB_T
++  \brief Wrapping of structure "ptm_frame_mib_t".
++ */
++/*!
++  \struct ptm_frame_mib_t
++  \brief Structure used for packet level MIB counters.
++ */
++typedef struct ptm_frame_mib_t {
++    uint32_t    RxCorrect;      /*!< output, number of ingress packet */
++    uint32_t    TC_CrcError;    /*!< output, number of egress packet with CRC error */
++    uint32_t    RxDropped;      /*!< output, number of dropped ingress packet */
++    uint32_t    TxSend;         /*!< output, number of egress packet */
++} PTM_FRAME_MIB_T;
++
++/*!
++  \typedef IFX_PTM_CFG_T
++  \brief Wrapping of structure "ptm_cfg_t".
++ */
++/*!
++  \struct ptm_cfg_t
++  \brief Structure used for ETH/TC CRC configuration.
++ */
++typedef struct ptm_cfg_t {
++    uint32_t    RxEthCrcPresent;    /*!< input/output, ingress packet has ETH CRC */
++    uint32_t    RxEthCrcCheck;      /*!< input/output, check ETH CRC of ingress packet */
++    uint32_t    RxTcCrcCheck;       /*!< input/output, check TC CRC of ingress codeword */
++    uint32_t    RxTcCrcLen;         /*!< input/output, length of TC CRC of ingress codeword */
++    uint32_t    TxEthCrcGen;        /*!< input/output, generate ETH CRC for egress packet */
++    uint32_t    TxTcCrcGen;         /*!< input/output, generate TC CRC for egress codeword */
++    uint32_t    TxTcCrcLen;         /*!< input/output, length of TC CRC of egress codeword */
++} IFX_PTM_CFG_T;
++
++/*!
++  \typedef IFX_PTM_PRIO_Q_MAP_T
++  \brief Wrapping of structure "ppe_prio_q_map".
++ */
++/*!
++  \struct ppe_prio_q_map
++  \brief Structure used for Priority Value to TX Queue mapping.
++ */
++typedef struct ppe_prio_q_map {
++    int             pkt_prio;
++    int             qid;
++    int             vpi;    //  ignored in eth interface
++    int             vci;    //  ignored in eth interface
++} IFX_PTM_PRIO_Q_MAP_T;
++
++/*@}*/
++
++
++
++/*
++ * ####################################
++ *                 API
++ * ####################################
++ */
++
++#ifdef __KERNEL__
++struct port_cell_info {
++    unsigned int    port_num;
++    unsigned int    tx_link_rate[2];
++};
++#endif
++
++
++
++#endif  //  IFX_PTM_H
++
+--- a/arch/mips/lantiq/irq.c
++++ b/arch/mips/lantiq/irq.c
+@@ -14,6 +14,7 @@
+ #include <linux/of_platform.h>
+ #include <linux/of_address.h>
+ #include <linux/of_irq.h>
++#include <linux/module.h>
+ 
+ #include <asm/bootinfo.h>
+ #include <asm/irq_cpu.h>
+@@ -100,6 +101,7 @@ void ltq_mask_and_ack_irq(struct irq_dat
+ 	ltq_icu_w32(im, ltq_icu_r32(im, ier) & ~BIT(offset), ier);
+ 	ltq_icu_w32(im, BIT(offset), isr);
+ }
++EXPORT_SYMBOL(ltq_mask_and_ack_irq);
+ 
+ static void ltq_ack_irq(struct irq_data *d)
+ {
+--- a/arch/mips/mm/cache.c
++++ b/arch/mips/mm/cache.c
+@@ -59,6 +59,8 @@ void (*_dma_cache_wback)(unsigned long s
+ void (*_dma_cache_inv)(unsigned long start, unsigned long size);
+ 
+ EXPORT_SYMBOL(_dma_cache_wback_inv);
++EXPORT_SYMBOL(_dma_cache_wback);
++EXPORT_SYMBOL(_dma_cache_inv);
+ 
+ #endif /* CONFIG_DMA_NONCOHERENT || CONFIG_DMA_MAYBE_COHERENT */
+ 
+--- a/include/uapi/linux/atm.h
++++ b/include/uapi/linux/atm.h
+@@ -130,8 +130,14 @@
+ #define ATM_ABR		4
+ #define ATM_ANYCLASS	5		/* compatible with everything */
+ 
++#define ATM_VBR_NRT     ATM_VBR
++#define ATM_VBR_RT      6
++#define ATM_UBR_PLUS    7
++#define ATM_GFR         8
++
+ #define ATM_MAX_PCR	-1		/* maximum available PCR */
+ 
++
+ struct atm_trafprm {
+ 	unsigned char	traffic_class;	/* traffic class (ATM_UBR, ...) */
+ 	int		max_pcr;	/* maximum PCR in cells per second */
+--- a/net/atm/common.c
++++ b/net/atm/common.c
+@@ -62,11 +62,17 @@ static void vcc_remove_socket(struct soc
+ 	write_unlock_irq(&vcc_sklist_lock);
+ }
+ 
++struct sk_buff* (*ifx_atm_alloc_tx)(struct atm_vcc *, unsigned int) = NULL;
++EXPORT_SYMBOL(ifx_atm_alloc_tx);
++
+ static struct sk_buff *alloc_tx(struct atm_vcc *vcc, unsigned int size)
+ {
+ 	struct sk_buff *skb;
+ 	struct sock *sk = sk_atm(vcc);
+ 
++	if (ifx_atm_alloc_tx != NULL)
++		return ifx_atm_alloc_tx(vcc, size);
++
+ 	if (sk_wmem_alloc_get(sk) && !atm_may_send(vcc, size)) {
+ 		pr_debug("Sorry: wmem_alloc = %d, size = %d, sndbuf = %d\n",
+ 			 sk_wmem_alloc_get(sk), size, sk->sk_sndbuf);
+--- a/net/atm/proc.c
++++ b/net/atm/proc.c
+@@ -154,7 +154,7 @@ static void *vcc_seq_next(struct seq_fil
+ static void pvc_info(struct seq_file *seq, struct atm_vcc *vcc)
+ {
+ 	static const char *const class_name[] = {
+-		"off", "UBR", "CBR", "VBR", "ABR"};
++		"off","UBR","CBR","NTR-VBR","ABR","ANY","RT-VBR","UBR+","GFR"};
+ 	static const char *const aal_name[] = {
+ 		"---",	"1",	"2",	"3/4",	/*  0- 3 */
+ 		"???",	"5",	"???",	"???",	/*  4- 7 */
diff --git a/target/linux/lantiq/patches-4.4/0007-MIPS-lantiq-add-basic-tffs-driver.patch b/target/linux/lantiq/patches-4.4/0007-MIPS-lantiq-add-basic-tffs-driver.patch
new file mode 100644
index 0000000..7081373
--- /dev/null
+++ b/target/linux/lantiq/patches-4.4/0007-MIPS-lantiq-add-basic-tffs-driver.patch
@@ -0,0 +1,111 @@
+From d27ec8bb97db0f60d81ab255d51ac4e967362067 Mon Sep 17 00:00:00 2001
+From: John Crispin <blogic at openwrt.org>
+Date: Thu, 7 Aug 2014 18:34:19 +0200
+Subject: [PATCH 07/36] MIPS: lantiq: add basic tffs driver
+
+Signed-off-by: John Crispin <blogic at openwrt.org>
+---
+ arch/mips/lantiq/xway/Makefile |    2 +-
+ arch/mips/lantiq/xway/tffs.c   |   87 ++++++++++++++++++++++++++++++++++++++++
+ 2 files changed, 88 insertions(+), 1 deletion(-)
+ create mode 100644 arch/mips/lantiq/xway/tffs.c
+
+--- a/arch/mips/lantiq/xway/Makefile
++++ b/arch/mips/lantiq/xway/Makefile
+@@ -1,5 +1,5 @@
+ obj-y := prom.o sysctrl.o clk.o reset.o dma.o gptu.o dcdc.o
+ 
+-obj-y += vmmc.o
++obj-y += vmmc.o tffs.o
+ 
+ obj-$(CONFIG_XRX200_PHY_FW) += xrx200_phy_fw.o
+--- /dev/null
++++ b/arch/mips/lantiq/xway/tffs.c
+@@ -0,0 +1,87 @@
++#include <linux/module.h>
++#include <linux/mtd/mtd.h>
++#include <linux/errno.h>
++#include <linux/slab.h>
++
++struct tffs_entry {
++    uint16_t id;
++    uint16_t len;
++};
++
++static struct tffs_id {
++	uint32_t id;
++	char *name;
++	unsigned char *val;
++	uint32_t offset;
++	uint32_t len;
++} ids[] = {
++	{ 0x01A9, "annex" },
++	{ 0x0188, "maca" },
++	{ 0x0189, "macb" },
++	{ 0x018a, "macwlan" },
++	{ 0x0195, "macwlan2" },
++	{ 0x018b, "macdsl" },
++	{ 0x01C2, "webgui_pass" },
++	{ 0x01AB, "wlan_key" },
++};
++
++static struct mtd_info *tffs1, *tffs2;
++
++static struct tffs_id* tffs_find_id(int id)
++{
++	int i;
++
++	for (i = 0; i < ARRAY_SIZE(ids); i++)
++		if (id == ids[i].id)
++			return &ids[i];
++
++	return NULL;
++}
++
++static void tffs_index(void)
++{
++	struct tffs_entry *E = NULL;
++	struct tffs_entry entry;
++	int ret, retlen;
++
++	while ((unsigned int) E + sizeof(struct tffs_entry) < tffs2->size) {
++		struct tffs_id *id;
++		int len;
++
++		ret = mtd_read(tffs2, (unsigned int) E, sizeof(struct tffs_entry), &retlen, (unsigned char *)&entry);
++		if (ret)
++			return;
++
++		if (entry.id == 0xffff)
++			return;
++
++		id = tffs_find_id(entry.id);
++		if (id) {
++			id->offset = (uint32_t) E;
++			id->len = entry.len;
++			id->val = kzalloc(entry.len + 1, GFP_KERNEL);
++			mtd_read(tffs2, ((unsigned int) E) + sizeof(struct tffs_entry), entry.len, &retlen, id->val);
++
++		}
++		//printk(KERN_INFO "found entry at 0x%08X-> [<0x%x> %u bytes]\n", (uint32_t) E, entry.id, entry.len);
++		if (id && id->name)
++			printk(KERN_INFO "found entry name -> %s=%s\n", id->name, id->val);
++
++		len = (entry.len + 3) & ~0x03;
++		E = (struct tffs_entry *)(((unsigned int)E) + sizeof(struct tffs_entry) + len);
++	}
++}
++
++static int __init tffs_init(void)
++{
++	tffs1 = get_mtd_device_nm("tffs (1)");
++	tffs2 = get_mtd_device_nm("tffs (2)");
++	if (IS_ERR(tffs1) || IS_ERR(tffs2))
++		return -1;
++
++	tffs_index();
++
++	return 0;
++}
++late_initcall(tffs_init);
++
diff --git a/target/linux/lantiq/patches-4.4/0008-MIPS-lantiq-backport-old-timer-code.patch b/target/linux/lantiq/patches-4.4/0008-MIPS-lantiq-backport-old-timer-code.patch
new file mode 100644
index 0000000..5525503
--- /dev/null
+++ b/target/linux/lantiq/patches-4.4/0008-MIPS-lantiq-backport-old-timer-code.patch
@@ -0,0 +1,1028 @@
+From 94800350cb8d2f29dda2206b5e9a3772024ee168 Mon Sep 17 00:00:00 2001
+From: John Crispin <blogic at openwrt.org>
+Date: Thu, 7 Aug 2014 18:30:56 +0200
+Subject: [PATCH 08/36] MIPS: lantiq: backport old timer code
+
+Signed-off-by: John Crispin <blogic at openwrt.org>
+---
+ arch/mips/include/asm/mach-lantiq/lantiq_timer.h |  155 ++++
+ arch/mips/lantiq/xway/Makefile                   |    2 +-
+ arch/mips/lantiq/xway/timer.c                    |  845 ++++++++++++++++++++++
+ 3 files changed, 1001 insertions(+), 1 deletion(-)
+ create mode 100644 arch/mips/include/asm/mach-lantiq/lantiq_timer.h
+ create mode 100644 arch/mips/lantiq/xway/timer.c
+
+--- /dev/null
++++ b/arch/mips/include/asm/mach-lantiq/lantiq_timer.h
+@@ -0,0 +1,155 @@
++#ifndef __DANUBE_GPTU_DEV_H__2005_07_26__10_19__
++#define __DANUBE_GPTU_DEV_H__2005_07_26__10_19__
++
++
++/******************************************************************************
++       Copyright (c) 2002, Infineon Technologies.  All rights reserved.
++
++                               No Warranty
++   Because the program is licensed free of charge, there is no warranty for
++   the program, to the extent permitted by applicable law.  Except when
++   otherwise stated in writing the copyright holders and/or other parties
++   provide the program "as is" without warranty of any kind, either
++   expressed or implied, including, but not limited to, the implied
++   warranties of merchantability and fitness for a particular purpose. The
++   entire risk as to the quality and performance of the program is with
++   you.  should the program prove defective, you assume the cost of all
++   necessary servicing, repair or correction.
++
++   In no event unless required by applicable law or agreed to in writing
++   will any copyright holder, or any other party who may modify and/or
++   redistribute the program as permitted above, be liable to you for
++   damages, including any general, special, incidental or consequential
++   damages arising out of the use or inability to use the program
++   (including but not limited to loss of data or data being rendered
++   inaccurate or losses sustained by you or third parties or a failure of
++   the program to operate with any other programs), even if such holder or
++   other party has been advised of the possibility of such damages.
++******************************************************************************/
++
++
++/*
++ * ####################################
++ *              Definition
++ * ####################################
++ */
++
++/*
++ *  Available Timer/Counter Index
++ */
++#define TIMER(n, X)                     (n * 2 + (X ? 1 : 0))
++#define TIMER_ANY                       0x00
++#define TIMER1A                         TIMER(1, 0)
++#define TIMER1B                         TIMER(1, 1)
++#define TIMER2A                         TIMER(2, 0)
++#define TIMER2B                         TIMER(2, 1)
++#define TIMER3A                         TIMER(3, 0)
++#define TIMER3B                         TIMER(3, 1)
++
++/*
++ *  Flag of Timer/Counter
++ *  These flags specify the way in which timer is configured.
++ */
++/*  Bit size of timer/counter.                      */
++#define TIMER_FLAG_16BIT                0x0000
++#define TIMER_FLAG_32BIT                0x0001
++/*  Switch between timer and counter.               */
++#define TIMER_FLAG_TIMER                0x0000
++#define TIMER_FLAG_COUNTER              0x0002
++/*  Stop or continue when overflowing/underflowing. */
++#define TIMER_FLAG_ONCE                 0x0000
++#define TIMER_FLAG_CYCLIC               0x0004
++/*  Count up or counter down.                       */
++#define TIMER_FLAG_UP                   0x0000
++#define TIMER_FLAG_DOWN                 0x0008
++/*  Count on specific level or edge.                */
++#define TIMER_FLAG_HIGH_LEVEL_SENSITIVE 0x0000
++#define TIMER_FLAG_LOW_LEVEL_SENSITIVE  0x0040
++#define TIMER_FLAG_RISE_EDGE            0x0010
++#define TIMER_FLAG_FALL_EDGE            0x0020
++#define TIMER_FLAG_ANY_EDGE             0x0030
++/*  Signal is syncronous to module clock or not.    */
++#define TIMER_FLAG_UNSYNC               0x0000
++#define TIMER_FLAG_SYNC                 0x0080
++/*  Different interrupt handle type.                */
++#define TIMER_FLAG_NO_HANDLE            0x0000
++#if defined(__KERNEL__)
++    #define TIMER_FLAG_CALLBACK_IN_IRQ  0x0100
++#endif  //  defined(__KERNEL__)
++#define TIMER_FLAG_SIGNAL               0x0300
++/*  Internal clock source or external clock source  */
++#define TIMER_FLAG_INT_SRC              0x0000
++#define TIMER_FLAG_EXT_SRC              0x1000
++
++
++/*
++ *  ioctl Command
++ */
++#define GPTU_REQUEST_TIMER              0x01    /*  General method to setup timer/counter.  */
++#define GPTU_FREE_TIMER                 0x02    /*  Free timer/counter.                     */
++#define GPTU_START_TIMER                0x03    /*  Start or resume timer/counter.          */
++#define GPTU_STOP_TIMER                 0x04    /*  Suspend timer/counter.                  */
++#define GPTU_GET_COUNT_VALUE            0x05    /*  Get current count value.                */
++#define GPTU_CALCULATE_DIVIDER          0x06    /*  Calculate timer divider from given freq.*/
++#define GPTU_SET_TIMER                  0x07    /*  Simplified method to setup timer.       */
++#define GPTU_SET_COUNTER                0x08    /*  Simplified method to setup counter.     */
++
++/*
++ *  Data Type Used to Call ioctl
++ */
++struct gptu_ioctl_param {
++    unsigned int                        timer;  /*  In command GPTU_REQUEST_TIMER, GPTU_SET_TIMER, and  *
++                                                 *  GPTU_SET_COUNTER, this field is ID of expected      *
++                                                 *  timer/counter. If it's zero, a timer/counter would  *
++                                                 *  be dynamically allocated and ID would be stored in  *
++                                                 *  this field.                                         *
++                                                 *  In command GPTU_GET_COUNT_VALUE, this field is      *
++                                                 *  ignored.                                            *
++                                                 *  In other command, this field is ID of timer/counter *
++                                                 *  allocated.                                          */
++    unsigned int                        flag;   /*  In command GPTU_REQUEST_TIMER, GPTU_SET_TIMER, and  *
++                                                 *  GPTU_SET_COUNTER, this field contains flags to      *
++                                                 *  specify how to configure timer/counter.             *
++                                                 *  In command GPTU_START_TIMER, zero indicate start    *
++                                                 *  and non-zero indicate resume timer/counter.         *
++                                                 *  In other command, this field is ignored.            */
++    unsigned long                       value;  /*  In command GPTU_REQUEST_TIMER, this field contains  *
++                                                 *  init/reload value.                                  *
++                                                 *  In command GPTU_SET_TIMER, this field contains      *
++                                                 *  frequency (0.001Hz) of timer.                       *
++                                                 *  In command GPTU_GET_COUNT_VALUE, current count      *
++                                                 *  value would be stored in this field.                *
++                                                 *  In command GPTU_CALCULATE_DIVIDER, this field       *
++                                                 *  contains frequency wanted, and after calculation,   *
++                                                 *  divider would be stored in this field to overwrite  *
++                                                 *  the frequency.                                      *
++                                                 *  In other command, this field is ignored.            */
++    int                                 pid;    /*  In command GPTU_REQUEST_TIMER and GPTU_SET_TIMER,   *
++                                                 *  if signal is required, this field contains process  *
++                                                 *  ID to which signal would be sent.                   *
++                                                 *  In other command, this field is ignored.            */
++    int                                 sig;    /*  In command GPTU_REQUEST_TIMER and GPTU_SET_TIMER,   *
++                                                 *  if signal is required, this field contains signal   *
++                                                 *  number which would be sent.                         *
++                                                 *  In other command, this field is ignored.            */
++};
++
++/*
++ * ####################################
++ *              Data Type
++ * ####################################
++ */
++typedef void (*timer_callback)(unsigned long arg);
++
++extern int lq_request_timer(unsigned int, unsigned int, unsigned long, unsigned long, unsigned long);
++extern int lq_free_timer(unsigned int);
++extern int lq_start_timer(unsigned int, int);
++extern int lq_stop_timer(unsigned int);
++extern int lq_reset_counter_flags(u32 timer, u32 flags);
++extern int lq_get_count_value(unsigned int, unsigned long *);
++extern u32 lq_cal_divider(unsigned long);
++extern int lq_set_timer(unsigned int, unsigned int, int, int, unsigned int, unsigned long, unsigned long);
++extern int lq_set_counter(unsigned int timer, unsigned int flag,
++	u32 reload, unsigned long arg1, unsigned long arg2);
++
++#endif /* __DANUBE_GPTU_DEV_H__2005_07_26__10_19__ */
+--- a/arch/mips/lantiq/xway/Makefile
++++ b/arch/mips/lantiq/xway/Makefile
+@@ -1,4 +1,4 @@
+-obj-y := prom.o sysctrl.o clk.o reset.o dma.o gptu.o dcdc.o
++obj-y := prom.o sysctrl.o clk.o reset.o dma.o timer.o dcdc.o
+ 
+ obj-y += vmmc.o tffs.o
+ 
+--- /dev/null
++++ b/arch/mips/lantiq/xway/timer.c
+@@ -0,0 +1,845 @@
++#ifndef CONFIG_SOC_AMAZON_SE
++
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/version.h>
++#include <linux/types.h>
++#include <linux/fs.h>
++#include <linux/miscdevice.h>
++#include <linux/init.h>
++#include <linux/uaccess.h>
++#include <linux/unistd.h>
++#include <linux/errno.h>
++#include <linux/interrupt.h>
++#include <linux/sched.h>
++
++#include <asm/irq.h>
++#include <asm/div64.h>
++#include "../clk.h"
++
++#include <lantiq_soc.h>
++#include <lantiq_irq.h>
++#include <lantiq_timer.h>
++
++#define MAX_NUM_OF_32BIT_TIMER_BLOCKS	6
++
++#ifdef TIMER1A
++#define FIRST_TIMER			TIMER1A
++#else
++#define FIRST_TIMER			2
++#endif
++
++/*
++ *  GPTC divider is set or not.
++ */
++#define GPTU_CLC_RMC_IS_SET		0
++
++/*
++ *  Timer Interrupt (IRQ)
++ */
++/*  Must be adjusted when ICU driver is available */
++#define TIMER_INTERRUPT			(INT_NUM_IM3_IRL0 + 22)
++
++/*
++ *  Bits Operation
++ */
++#define GET_BITS(x, msb, lsb)		\
++	(((x) & ((1 << ((msb) + 1)) - 1)) >> (lsb))
++#define SET_BITS(x, msb, lsb, value)	\
++	(((x) & ~(((1 << ((msb) + 1)) - 1) ^ ((1 << (lsb)) - 1))) | \
++	(((value) & ((1 << (1 + (msb) - (lsb))) - 1)) << (lsb)))
++
++/*
++ *  GPTU Register Mapping
++ */
++#define LQ_GPTU			(KSEG1 + 0x1E100A00)
++#define LQ_GPTU_CLC		((volatile u32 *)(LQ_GPTU + 0x0000))
++#define LQ_GPTU_ID			((volatile u32 *)(LQ_GPTU + 0x0008))
++#define LQ_GPTU_CON(n, X)		((volatile u32 *)(LQ_GPTU + 0x0010 + ((X) * 4) + ((n) - 1) * 0x0020))	/* X must be either A or B */
++#define LQ_GPTU_RUN(n, X)		((volatile u32 *)(LQ_GPTU + 0x0018 + ((X) * 4) + ((n) - 1) * 0x0020))	/* X must be either A or B */
++#define LQ_GPTU_RELOAD(n, X)	((volatile u32 *)(LQ_GPTU + 0x0020 + ((X) * 4) + ((n) - 1) * 0x0020))	/* X must be either A or B */
++#define LQ_GPTU_COUNT(n, X)	((volatile u32 *)(LQ_GPTU + 0x0028 + ((X) * 4) + ((n) - 1) * 0x0020))	/* X must be either A or B */
++#define LQ_GPTU_IRNEN		((volatile u32 *)(LQ_GPTU + 0x00F4))
++#define LQ_GPTU_IRNICR		((volatile u32 *)(LQ_GPTU + 0x00F8))
++#define LQ_GPTU_IRNCR		((volatile u32 *)(LQ_GPTU + 0x00FC))
++
++/*
++ *  Clock Control Register
++ */
++#define GPTU_CLC_SMC			GET_BITS(*LQ_GPTU_CLC, 23, 16)
++#define GPTU_CLC_RMC			GET_BITS(*LQ_GPTU_CLC, 15, 8)
++#define GPTU_CLC_FSOE			(*LQ_GPTU_CLC & (1 << 5))
++#define GPTU_CLC_EDIS			(*LQ_GPTU_CLC & (1 << 3))
++#define GPTU_CLC_SPEN			(*LQ_GPTU_CLC & (1 << 2))
++#define GPTU_CLC_DISS			(*LQ_GPTU_CLC & (1 << 1))
++#define GPTU_CLC_DISR			(*LQ_GPTU_CLC & (1 << 0))
++
++#define GPTU_CLC_SMC_SET(value)		SET_BITS(0, 23, 16, (value))
++#define GPTU_CLC_RMC_SET(value)		SET_BITS(0, 15, 8, (value))
++#define GPTU_CLC_FSOE_SET(value)	((value) ? (1 << 5) : 0)
++#define GPTU_CLC_SBWE_SET(value)	((value) ? (1 << 4) : 0)
++#define GPTU_CLC_EDIS_SET(value)	((value) ? (1 << 3) : 0)
++#define GPTU_CLC_SPEN_SET(value)	((value) ? (1 << 2) : 0)
++#define GPTU_CLC_DISR_SET(value)	((value) ? (1 << 0) : 0)
++
++/*
++ *  ID Register
++ */
++#define GPTU_ID_ID			GET_BITS(*LQ_GPTU_ID, 15, 8)
++#define GPTU_ID_CFG			GET_BITS(*LQ_GPTU_ID, 7, 5)
++#define GPTU_ID_REV			GET_BITS(*LQ_GPTU_ID, 4, 0)
++
++/*
++ *  Control Register of Timer/Counter nX
++ *    n is the index of block (1 based index)
++ *    X is either A or B
++ */
++#define GPTU_CON_SRC_EG(n, X)		(*LQ_GPTU_CON(n, X) & (1 << 10))
++#define GPTU_CON_SRC_EXT(n, X)		(*LQ_GPTU_CON(n, X) & (1 << 9))
++#define GPTU_CON_SYNC(n, X)		(*LQ_GPTU_CON(n, X) & (1 << 8))
++#define GPTU_CON_EDGE(n, X)		GET_BITS(*LQ_GPTU_CON(n, X), 7, 6)
++#define GPTU_CON_INV(n, X)		(*LQ_GPTU_CON(n, X) & (1 << 5))
++#define GPTU_CON_EXT(n, X)		(*LQ_GPTU_CON(n, A) & (1 << 4))	/* Timer/Counter B does not have this bit */
++#define GPTU_CON_STP(n, X)		(*LQ_GPTU_CON(n, X) & (1 << 3))
++#define GPTU_CON_CNT(n, X)		(*LQ_GPTU_CON(n, X) & (1 << 2))
++#define GPTU_CON_DIR(n, X)		(*LQ_GPTU_CON(n, X) & (1 << 1))
++#define GPTU_CON_EN(n, X)		(*LQ_GPTU_CON(n, X) & (1 << 0))
++
++#define GPTU_CON_SRC_EG_SET(value)	((value) ? 0 : (1 << 10))
++#define GPTU_CON_SRC_EXT_SET(value)	((value) ? (1 << 9) : 0)
++#define GPTU_CON_SYNC_SET(value)	((value) ? (1 << 8) : 0)
++#define GPTU_CON_EDGE_SET(value)	SET_BITS(0, 7, 6, (value))
++#define GPTU_CON_INV_SET(value)		((value) ? (1 << 5) : 0)
++#define GPTU_CON_EXT_SET(value)		((value) ? (1 << 4) : 0)
++#define GPTU_CON_STP_SET(value)		((value) ? (1 << 3) : 0)
++#define GPTU_CON_CNT_SET(value)		((value) ? (1 << 2) : 0)
++#define GPTU_CON_DIR_SET(value)		((value) ? (1 << 1) : 0)
++
++#define GPTU_RUN_RL_SET(value)		((value) ? (1 << 2) : 0)
++#define GPTU_RUN_CEN_SET(value)		((value) ? (1 << 1) : 0)
++#define GPTU_RUN_SEN_SET(value)		((value) ? (1 << 0) : 0)
++
++#define GPTU_IRNEN_TC_SET(n, X, value)	((value) ? (1 << (((n) - 1) * 2 + (X))) : 0)
++#define GPTU_IRNCR_TC_SET(n, X, value)	((value) ? (1 << (((n) - 1) * 2 + (X))) : 0)
++
++#define TIMER_FLAG_MASK_SIZE(x)		(x & 0x0001)
++#define TIMER_FLAG_MASK_TYPE(x)		(x & 0x0002)
++#define TIMER_FLAG_MASK_STOP(x)		(x & 0x0004)
++#define TIMER_FLAG_MASK_DIR(x)		(x & 0x0008)
++#define TIMER_FLAG_NONE_EDGE		0x0000
++#define TIMER_FLAG_MASK_EDGE(x)		(x & 0x0030)
++#define TIMER_FLAG_REAL			0x0000
++#define TIMER_FLAG_INVERT		0x0040
++#define TIMER_FLAG_MASK_INVERT(x)	(x & 0x0040)
++#define TIMER_FLAG_MASK_TRIGGER(x)	(x & 0x0070)
++#define TIMER_FLAG_MASK_SYNC(x)		(x & 0x0080)
++#define TIMER_FLAG_CALLBACK_IN_HB	0x0200
++#define TIMER_FLAG_MASK_HANDLE(x)	(x & 0x0300)
++#define TIMER_FLAG_MASK_SRC(x)		(x & 0x1000)
++
++struct timer_dev_timer {
++	unsigned int f_irq_on;
++	unsigned int irq;
++	unsigned int flag;
++	unsigned long arg1;
++	unsigned long arg2;
++};
++
++struct timer_dev {
++	struct mutex gptu_mutex;
++	unsigned int number_of_timers;
++	unsigned int occupation;
++	unsigned int f_gptu_on;
++	struct timer_dev_timer timer[MAX_NUM_OF_32BIT_TIMER_BLOCKS * 2];
++};
++
++
++unsigned int ltq_get_fpi_bus_clock(int fpi) {
++	struct clk *clk = clk_get_fpi();
++	return clk_get_rate(clk);
++}
++
++
++static long gptu_ioctl(struct file *, unsigned int, unsigned long);
++static int gptu_open(struct inode *, struct file *);
++static int gptu_release(struct inode *, struct file *);
++
++static struct file_operations gptu_fops = {
++	.owner = THIS_MODULE,
++	.unlocked_ioctl = gptu_ioctl,
++	.open = gptu_open,
++	.release = gptu_release
++};
++
++static struct miscdevice gptu_miscdev = {
++	.minor = MISC_DYNAMIC_MINOR,
++	.name = "gptu",
++	.fops = &gptu_fops,
++};
++
++static struct timer_dev timer_dev;
++
++static irqreturn_t timer_irq_handler(int irq, void *p)
++{
++	unsigned int timer;
++	unsigned int flag;
++	struct timer_dev_timer *dev_timer = (struct timer_dev_timer *)p;
++
++	timer = irq - TIMER_INTERRUPT;
++	if (timer < timer_dev.number_of_timers
++		&& dev_timer == &timer_dev.timer[timer]) {
++		/*  Clear interrupt.    */
++		ltq_w32(1 << timer, LQ_GPTU_IRNCR);
++
++		/*  Call user hanler or signal. */
++		flag = dev_timer->flag;
++		if (!(timer & 0x01)
++			|| TIMER_FLAG_MASK_SIZE(flag) == TIMER_FLAG_16BIT) {
++			/* 16-bit timer or timer A of 32-bit timer  */
++			switch (TIMER_FLAG_MASK_HANDLE(flag)) {
++			case TIMER_FLAG_CALLBACK_IN_IRQ:
++			case TIMER_FLAG_CALLBACK_IN_HB:
++				if (dev_timer->arg1)
++					(*(timer_callback)dev_timer->arg1)(dev_timer->arg2);
++				break;
++			case TIMER_FLAG_SIGNAL:
++				send_sig((int)dev_timer->arg2, (struct task_struct *)dev_timer->arg1, 0);
++				break;
++			}
++		}
++	}
++	return IRQ_HANDLED;
++}
++
++static inline void lq_enable_gptu(void)
++{
++	struct clk *clk = clk_get_sys("1e100a00.gptu", NULL);
++	clk_enable(clk);
++
++	//ltq_pmu_enable(PMU_GPT);
++
++	/*  Set divider as 1, disable write protection for SPEN, enable module. */
++	*LQ_GPTU_CLC =
++		GPTU_CLC_SMC_SET(0x00) |
++		GPTU_CLC_RMC_SET(0x01) |
++		GPTU_CLC_FSOE_SET(0) |
++		GPTU_CLC_SBWE_SET(1) |
++		GPTU_CLC_EDIS_SET(0) |
++		GPTU_CLC_SPEN_SET(0) |
++		GPTU_CLC_DISR_SET(0);
++}
++
++static inline void lq_disable_gptu(void)
++{
++	struct clk *clk = clk_get_sys("1e100a00.gptu", NULL);
++	ltq_w32(0x00, LQ_GPTU_IRNEN);
++	ltq_w32(0xfff, LQ_GPTU_IRNCR);
++
++	/*  Set divider as 0, enable write protection for SPEN, disable module. */
++	*LQ_GPTU_CLC =
++		GPTU_CLC_SMC_SET(0x00) |
++		GPTU_CLC_RMC_SET(0x00) |
++		GPTU_CLC_FSOE_SET(0) |
++		GPTU_CLC_SBWE_SET(0) |
++		GPTU_CLC_EDIS_SET(0) |
++		GPTU_CLC_SPEN_SET(0) |
++		GPTU_CLC_DISR_SET(1);
++
++	clk_enable(clk);
++}
++
++int lq_request_timer(unsigned int timer, unsigned int flag,
++	unsigned long value, unsigned long arg1, unsigned long arg2)
++{
++	int ret = 0;
++	unsigned int con_reg, irnen_reg;
++	int n, X;
++
++	if (timer >= FIRST_TIMER + timer_dev.number_of_timers)
++		return -EINVAL;
++
++	printk(KERN_INFO "request_timer(%d, 0x%08X, %lu)...",
++		timer, flag, value);
++
++	if (TIMER_FLAG_MASK_SIZE(flag) == TIMER_FLAG_16BIT)
++		value &= 0xFFFF;
++	else
++		timer &= ~0x01;
++
++	mutex_lock(&timer_dev.gptu_mutex);
++
++	/*
++	 *  Allocate timer.
++	 */
++	if (timer < FIRST_TIMER) {
++		unsigned int mask;
++		unsigned int shift;
++		/* This takes care of TIMER1B which is the only choice for Voice TAPI system */
++		unsigned int offset = TIMER2A;
++
++		/*
++		 *  Pick up a free timer.
++		 */
++		if (TIMER_FLAG_MASK_SIZE(flag) == TIMER_FLAG_16BIT) {
++			mask = 1 << offset;
++			shift = 1;
++		} else {
++			mask = 3 << offset;
++			shift = 2;
++		}
++		for (timer = offset;
++		     timer < offset + timer_dev.number_of_timers;
++		     timer += shift, mask <<= shift)
++			if (!(timer_dev.occupation & mask)) {
++				timer_dev.occupation |= mask;
++				break;
++			}
++		if (timer >= offset + timer_dev.number_of_timers) {
++			printk("failed![%d]\n", __LINE__);
++			mutex_unlock(&timer_dev.gptu_mutex);
++			return -EINVAL;
++		} else
++			ret = timer;
++	} else {
++		register unsigned int mask;
++
++		/*
++		 *  Check if the requested timer is free.
++		 */
++		mask = (TIMER_FLAG_MASK_SIZE(flag) == TIMER_FLAG_16BIT ? 1 : 3) << timer;
++		if ((timer_dev.occupation & mask)) {
++			printk("failed![%d] mask %#x, timer_dev.occupation %#x\n",
++				__LINE__, mask, timer_dev.occupation);
++			mutex_unlock(&timer_dev.gptu_mutex);
++			return -EBUSY;
++		} else {
++			timer_dev.occupation |= mask;
++			ret = 0;
++		}
++	}
++
++	/*
++	 *  Prepare control register value.
++	 */
++	switch (TIMER_FLAG_MASK_EDGE(flag)) {
++	default:
++	case TIMER_FLAG_NONE_EDGE:
++		con_reg = GPTU_CON_EDGE_SET(0x00);
++		break;
++	case TIMER_FLAG_RISE_EDGE:
++		con_reg = GPTU_CON_EDGE_SET(0x01);
++		break;
++	case TIMER_FLAG_FALL_EDGE:
++		con_reg = GPTU_CON_EDGE_SET(0x02);
++		break;
++	case TIMER_FLAG_ANY_EDGE:
++		con_reg = GPTU_CON_EDGE_SET(0x03);
++		break;
++	}
++	if (TIMER_FLAG_MASK_TYPE(flag) == TIMER_FLAG_TIMER)
++		con_reg |=
++			TIMER_FLAG_MASK_SRC(flag) ==
++			TIMER_FLAG_EXT_SRC ? GPTU_CON_SRC_EXT_SET(1) :
++			GPTU_CON_SRC_EXT_SET(0);
++	else
++		con_reg |=
++			TIMER_FLAG_MASK_SRC(flag) ==
++			TIMER_FLAG_EXT_SRC ? GPTU_CON_SRC_EG_SET(1) :
++			GPTU_CON_SRC_EG_SET(0);
++	con_reg |=
++		TIMER_FLAG_MASK_SYNC(flag) ==
++		TIMER_FLAG_UNSYNC ? GPTU_CON_SYNC_SET(0) :
++		GPTU_CON_SYNC_SET(1);
++	con_reg |=
++		TIMER_FLAG_MASK_INVERT(flag) ==
++		TIMER_FLAG_REAL ? GPTU_CON_INV_SET(0) : GPTU_CON_INV_SET(1);
++	con_reg |=
++		TIMER_FLAG_MASK_SIZE(flag) ==
++		TIMER_FLAG_16BIT ? GPTU_CON_EXT_SET(0) :
++		GPTU_CON_EXT_SET(1);
++	con_reg |=
++		TIMER_FLAG_MASK_STOP(flag) ==
++		TIMER_FLAG_ONCE ? GPTU_CON_STP_SET(1) : GPTU_CON_STP_SET(0);
++	con_reg |=
++		TIMER_FLAG_MASK_TYPE(flag) ==
++		TIMER_FLAG_TIMER ? GPTU_CON_CNT_SET(0) :
++		GPTU_CON_CNT_SET(1);
++	con_reg |=
++		TIMER_FLAG_MASK_DIR(flag) ==
++		TIMER_FLAG_UP ? GPTU_CON_DIR_SET(1) : GPTU_CON_DIR_SET(0);
++
++	/*
++	 *  Fill up running data.
++	 */
++	timer_dev.timer[timer - FIRST_TIMER].flag = flag;
++	timer_dev.timer[timer - FIRST_TIMER].arg1 = arg1;
++	timer_dev.timer[timer - FIRST_TIMER].arg2 = arg2;
++	if (TIMER_FLAG_MASK_SIZE(flag) != TIMER_FLAG_16BIT)
++		timer_dev.timer[timer - FIRST_TIMER + 1].flag = flag;
++
++	/*
++	 *  Enable GPTU module.
++	 */
++	if (!timer_dev.f_gptu_on) {
++		lq_enable_gptu();
++		timer_dev.f_gptu_on = 1;
++	}
++
++	/*
++	 *  Enable IRQ.
++	 */
++	if (TIMER_FLAG_MASK_HANDLE(flag) != TIMER_FLAG_NO_HANDLE) {
++		if (TIMER_FLAG_MASK_HANDLE(flag) == TIMER_FLAG_SIGNAL)
++			timer_dev.timer[timer - FIRST_TIMER].arg1 =
++				(unsigned long) find_task_by_vpid((int) arg1);
++
++		irnen_reg = 1 << (timer - FIRST_TIMER);
++
++		if (TIMER_FLAG_MASK_HANDLE(flag) == TIMER_FLAG_SIGNAL
++		    || (TIMER_FLAG_MASK_HANDLE(flag) ==
++			TIMER_FLAG_CALLBACK_IN_IRQ
++			&& timer_dev.timer[timer - FIRST_TIMER].arg1)) {
++			enable_irq(timer_dev.timer[timer - FIRST_TIMER].irq);
++			timer_dev.timer[timer - FIRST_TIMER].f_irq_on = 1;
++		}
++	} else
++		irnen_reg = 0;
++
++	/*
++	 *  Write config register, reload value and enable interrupt.
++	 */
++	n = timer >> 1;
++	X = timer & 0x01;
++	*LQ_GPTU_CON(n, X) = con_reg;
++	*LQ_GPTU_RELOAD(n, X) = value;
++	/* printk("reload value = %d\n", (u32)value); */
++	*LQ_GPTU_IRNEN |= irnen_reg;
++
++	mutex_unlock(&timer_dev.gptu_mutex);
++	printk("successful!\n");
++	return ret;
++}
++EXPORT_SYMBOL(lq_request_timer);
++
++int lq_free_timer(unsigned int timer)
++{
++	unsigned int flag;
++	unsigned int mask;
++	int n, X;
++
++	if (!timer_dev.f_gptu_on)
++		return -EINVAL;
++
++	if (timer < FIRST_TIMER || timer >= FIRST_TIMER + timer_dev.number_of_timers)
++		return -EINVAL;
++
++	mutex_lock(&timer_dev.gptu_mutex);
++
++	flag = timer_dev.timer[timer - FIRST_TIMER].flag;
++	if (TIMER_FLAG_MASK_SIZE(flag) != TIMER_FLAG_16BIT)
++		timer &= ~0x01;
++
++	mask = (TIMER_FLAG_MASK_SIZE(flag) == TIMER_FLAG_16BIT ? 1 : 3) << timer;
++	if (((timer_dev.occupation & mask) ^ mask)) {
++		mutex_unlock(&timer_dev.gptu_mutex);
++		return -EINVAL;
++	}
++
++	n = timer >> 1;
++	X = timer & 0x01;
++
++	if (GPTU_CON_EN(n, X))
++		*LQ_GPTU_RUN(n, X) = GPTU_RUN_CEN_SET(1);
++
++	*LQ_GPTU_IRNEN &= ~GPTU_IRNEN_TC_SET(n, X, 1);
++	*LQ_GPTU_IRNCR |= GPTU_IRNCR_TC_SET(n, X, 1);
++
++	if (timer_dev.timer[timer - FIRST_TIMER].f_irq_on) {
++		disable_irq(timer_dev.timer[timer - FIRST_TIMER].irq);
++		timer_dev.timer[timer - FIRST_TIMER].f_irq_on = 0;
++	}
++
++	timer_dev.occupation &= ~mask;
++	if (!timer_dev.occupation && timer_dev.f_gptu_on) {
++		lq_disable_gptu();
++		timer_dev.f_gptu_on = 0;
++	}
++
++	mutex_unlock(&timer_dev.gptu_mutex);
++
++	return 0;
++}
++EXPORT_SYMBOL(lq_free_timer);
++
++int lq_start_timer(unsigned int timer, int is_resume)
++{
++	unsigned int flag;
++	unsigned int mask;
++	int n, X;
++
++	if (!timer_dev.f_gptu_on)
++		return -EINVAL;
++
++	if (timer < FIRST_TIMER || timer >= FIRST_TIMER + timer_dev.number_of_timers)
++		return -EINVAL;
++
++	mutex_lock(&timer_dev.gptu_mutex);
++
++	flag = timer_dev.timer[timer - FIRST_TIMER].flag;
++	if (TIMER_FLAG_MASK_SIZE(flag) != TIMER_FLAG_16BIT)
++		timer &= ~0x01;
++
++	mask = (TIMER_FLAG_MASK_SIZE(flag) ==
++	TIMER_FLAG_16BIT ? 1 : 3) << timer;
++	if (((timer_dev.occupation & mask) ^ mask)) {
++		mutex_unlock(&timer_dev.gptu_mutex);
++		return -EINVAL;
++	}
++
++	n = timer >> 1;
++	X = timer & 0x01;
++
++	*LQ_GPTU_RUN(n, X) = GPTU_RUN_RL_SET(!is_resume) | GPTU_RUN_SEN_SET(1);
++
++
++	mutex_unlock(&timer_dev.gptu_mutex);
++
++	return 0;
++}
++EXPORT_SYMBOL(lq_start_timer);
++
++int lq_stop_timer(unsigned int timer)
++{
++	unsigned int flag;
++	unsigned int mask;
++	int n, X;
++
++	if (!timer_dev.f_gptu_on)
++		return -EINVAL;
++
++	if (timer < FIRST_TIMER
++	    || timer >= FIRST_TIMER + timer_dev.number_of_timers)
++		return -EINVAL;
++
++	mutex_lock(&timer_dev.gptu_mutex);
++
++	flag = timer_dev.timer[timer - FIRST_TIMER].flag;
++	if (TIMER_FLAG_MASK_SIZE(flag) != TIMER_FLAG_16BIT)
++		timer &= ~0x01;
++
++	mask = (TIMER_FLAG_MASK_SIZE(flag) == TIMER_FLAG_16BIT ? 1 : 3) << timer;
++	if (((timer_dev.occupation & mask) ^ mask)) {
++		mutex_unlock(&timer_dev.gptu_mutex);
++		return -EINVAL;
++	}
++
++	n = timer >> 1;
++	X = timer & 0x01;
++
++	*LQ_GPTU_RUN(n, X) = GPTU_RUN_CEN_SET(1);
++
++	mutex_unlock(&timer_dev.gptu_mutex);
++
++	return 0;
++}
++EXPORT_SYMBOL(lq_stop_timer);
++
++int lq_reset_counter_flags(u32 timer, u32 flags)
++{
++	unsigned int oflag;
++	unsigned int mask, con_reg;
++	int n, X;
++
++	if (!timer_dev.f_gptu_on)
++		return -EINVAL;
++
++	if (timer < FIRST_TIMER || timer >= FIRST_TIMER + timer_dev.number_of_timers)
++		return -EINVAL;
++
++	mutex_lock(&timer_dev.gptu_mutex);
++
++	oflag = timer_dev.timer[timer - FIRST_TIMER].flag;
++	if (TIMER_FLAG_MASK_SIZE(oflag) != TIMER_FLAG_16BIT)
++		timer &= ~0x01;
++
++	mask = (TIMER_FLAG_MASK_SIZE(oflag) == TIMER_FLAG_16BIT ? 1 : 3) << timer;
++	if (((timer_dev.occupation & mask) ^ mask)) {
++		mutex_unlock(&timer_dev.gptu_mutex);
++		return -EINVAL;
++	}
++
++	switch (TIMER_FLAG_MASK_EDGE(flags)) {
++	default:
++	case TIMER_FLAG_NONE_EDGE:
++		con_reg = GPTU_CON_EDGE_SET(0x00);
++		break;
++	case TIMER_FLAG_RISE_EDGE:
++		con_reg = GPTU_CON_EDGE_SET(0x01);
++		break;
++	case TIMER_FLAG_FALL_EDGE:
++		con_reg = GPTU_CON_EDGE_SET(0x02);
++		break;
++	case TIMER_FLAG_ANY_EDGE:
++		con_reg = GPTU_CON_EDGE_SET(0x03);
++		break;
++	}
++	if (TIMER_FLAG_MASK_TYPE(flags) == TIMER_FLAG_TIMER)
++		con_reg |= TIMER_FLAG_MASK_SRC(flags) == TIMER_FLAG_EXT_SRC ? GPTU_CON_SRC_EXT_SET(1) : GPTU_CON_SRC_EXT_SET(0);
++	else
++		con_reg |= TIMER_FLAG_MASK_SRC(flags) == TIMER_FLAG_EXT_SRC ? GPTU_CON_SRC_EG_SET(1) : GPTU_CON_SRC_EG_SET(0);
++	con_reg |= TIMER_FLAG_MASK_SYNC(flags) == TIMER_FLAG_UNSYNC ? GPTU_CON_SYNC_SET(0) : GPTU_CON_SYNC_SET(1);
++	con_reg |= TIMER_FLAG_MASK_INVERT(flags) == TIMER_FLAG_REAL ? GPTU_CON_INV_SET(0) : GPTU_CON_INV_SET(1);
++	con_reg |= TIMER_FLAG_MASK_SIZE(flags) == TIMER_FLAG_16BIT ? GPTU_CON_EXT_SET(0) : GPTU_CON_EXT_SET(1);
++	con_reg |= TIMER_FLAG_MASK_STOP(flags) == TIMER_FLAG_ONCE ? GPTU_CON_STP_SET(1) : GPTU_CON_STP_SET(0);
++	con_reg |= TIMER_FLAG_MASK_TYPE(flags) == TIMER_FLAG_TIMER ? GPTU_CON_CNT_SET(0) : GPTU_CON_CNT_SET(1);
++	con_reg |= TIMER_FLAG_MASK_DIR(flags) == TIMER_FLAG_UP ? GPTU_CON_DIR_SET(1) : GPTU_CON_DIR_SET(0);
++
++	timer_dev.timer[timer - FIRST_TIMER].flag = flags;
++	if (TIMER_FLAG_MASK_SIZE(flags) != TIMER_FLAG_16BIT)
++		timer_dev.timer[timer - FIRST_TIMER + 1].flag = flags;
++
++	n = timer >> 1;
++	X = timer & 0x01;
++
++	*LQ_GPTU_CON(n, X) = con_reg;
++	smp_wmb();
++	mutex_unlock(&timer_dev.gptu_mutex);
++	return 0;
++}
++EXPORT_SYMBOL(lq_reset_counter_flags);
++
++int lq_get_count_value(unsigned int timer, unsigned long *value)
++{
++	unsigned int flag;
++	unsigned int mask;
++	int n, X;
++
++	if (!timer_dev.f_gptu_on)
++		return -EINVAL;
++
++	if (timer < FIRST_TIMER
++	    || timer >= FIRST_TIMER + timer_dev.number_of_timers)
++		return -EINVAL;
++
++	mutex_lock(&timer_dev.gptu_mutex);
++
++	flag = timer_dev.timer[timer - FIRST_TIMER].flag;
++	if (TIMER_FLAG_MASK_SIZE(flag) != TIMER_FLAG_16BIT)
++		timer &= ~0x01;
++
++	mask = (TIMER_FLAG_MASK_SIZE(flag) == TIMER_FLAG_16BIT ? 1 : 3) << timer;
++	if (((timer_dev.occupation & mask) ^ mask)) {
++		mutex_unlock(&timer_dev.gptu_mutex);
++		return -EINVAL;
++	}
++
++	n = timer >> 1;
++	X = timer & 0x01;
++
++	*value = *LQ_GPTU_COUNT(n, X);
++
++
++	mutex_unlock(&timer_dev.gptu_mutex);
++
++	return 0;
++}
++EXPORT_SYMBOL(lq_get_count_value);
++
++u32 lq_cal_divider(unsigned long freq)
++{
++	u64 module_freq, fpi = ltq_get_fpi_bus_clock(2);
++	u32 clock_divider = 1;
++	module_freq = fpi * 1000;
++	do_div(module_freq, clock_divider * freq);
++	return module_freq;
++}
++EXPORT_SYMBOL(lq_cal_divider);
++
++int lq_set_timer(unsigned int timer, unsigned int freq, int is_cyclic,
++	int is_ext_src, unsigned int handle_flag, unsigned long arg1,
++	unsigned long arg2)
++{
++	unsigned long divider;
++	unsigned int flag;
++
++	divider = lq_cal_divider(freq);
++	if (divider == 0)
++		return -EINVAL;
++	flag = ((divider & ~0xFFFF) ? TIMER_FLAG_32BIT : TIMER_FLAG_16BIT)
++		| (is_cyclic ? TIMER_FLAG_CYCLIC : TIMER_FLAG_ONCE)
++		| (is_ext_src ? TIMER_FLAG_EXT_SRC : TIMER_FLAG_INT_SRC)
++		| TIMER_FLAG_TIMER | TIMER_FLAG_DOWN
++		| TIMER_FLAG_MASK_HANDLE(handle_flag);
++
++	printk(KERN_INFO "lq_set_timer(%d, %d), divider = %lu\n",
++		timer, freq, divider);
++	return lq_request_timer(timer, flag, divider, arg1, arg2);
++}
++EXPORT_SYMBOL(lq_set_timer);
++
++int lq_set_counter(unsigned int timer, unsigned int flag, u32 reload,
++	unsigned long arg1, unsigned long arg2)
++{
++	printk(KERN_INFO "lq_set_counter(%d, %#x, %d)\n", timer, flag, reload);
++	return lq_request_timer(timer, flag, reload, arg1, arg2);
++}
++EXPORT_SYMBOL(lq_set_counter);
++
++static long gptu_ioctl(struct file *file, unsigned int cmd,
++	unsigned long arg)
++{
++	int ret;
++	struct gptu_ioctl_param param;
++
++	if (!access_ok(VERIFY_READ, arg, sizeof(struct gptu_ioctl_param)))
++		return -EFAULT;
++	copy_from_user(&param, (void *) arg, sizeof(param));
++
++	if ((((cmd == GPTU_REQUEST_TIMER || cmd == GPTU_SET_TIMER
++	       || GPTU_SET_COUNTER) && param.timer < 2)
++	     || cmd == GPTU_GET_COUNT_VALUE || cmd == GPTU_CALCULATE_DIVIDER)
++	    && !access_ok(VERIFY_WRITE, arg,
++			   sizeof(struct gptu_ioctl_param)))
++		return -EFAULT;
++
++	switch (cmd) {
++	case GPTU_REQUEST_TIMER:
++		ret = lq_request_timer(param.timer, param.flag, param.value,
++				     (unsigned long) param.pid,
++				     (unsigned long) param.sig);
++		if (ret > 0) {
++			copy_to_user(&((struct gptu_ioctl_param *) arg)->
++				      timer, &ret, sizeof(&ret));
++			ret = 0;
++		}
++		break;
++	case GPTU_FREE_TIMER:
++		ret = lq_free_timer(param.timer);
++		break;
++	case GPTU_START_TIMER:
++		ret = lq_start_timer(param.timer, param.flag);
++		break;
++	case GPTU_STOP_TIMER:
++		ret = lq_stop_timer(param.timer);
++		break;
++	case GPTU_GET_COUNT_VALUE:
++		ret = lq_get_count_value(param.timer, &param.value);
++		if (!ret)
++			copy_to_user(&((struct gptu_ioctl_param *) arg)->
++				      value, &param.value,
++				      sizeof(param.value));
++		break;
++	case GPTU_CALCULATE_DIVIDER:
++		param.value = lq_cal_divider(param.value);
++		if (param.value == 0)
++			ret = -EINVAL;
++		else {
++			copy_to_user(&((struct gptu_ioctl_param *) arg)->
++				      value, &param.value,
++				      sizeof(param.value));
++			ret = 0;
++		}
++		break;
++	case GPTU_SET_TIMER:
++		ret = lq_set_timer(param.timer, param.value,
++				 TIMER_FLAG_MASK_STOP(param.flag) !=
++				 TIMER_FLAG_ONCE ? 1 : 0,
++				 TIMER_FLAG_MASK_SRC(param.flag) ==
++				 TIMER_FLAG_EXT_SRC ? 1 : 0,
++				 TIMER_FLAG_MASK_HANDLE(param.flag) ==
++				 TIMER_FLAG_SIGNAL ? TIMER_FLAG_SIGNAL :
++				 TIMER_FLAG_NO_HANDLE,
++				 (unsigned long) param.pid,
++				 (unsigned long) param.sig);
++		if (ret > 0) {
++			copy_to_user(&((struct gptu_ioctl_param *) arg)->
++				      timer, &ret, sizeof(&ret));
++			ret = 0;
++		}
++		break;
++	case GPTU_SET_COUNTER:
++		lq_set_counter(param.timer, param.flag, param.value, 0, 0);
++		if (ret > 0) {
++			copy_to_user(&((struct gptu_ioctl_param *) arg)->
++				      timer, &ret, sizeof(&ret));
++			ret = 0;
++		}
++		break;
++	default:
++		ret = -ENOTTY;
++	}
++
++	return ret;
++}
++
++static int gptu_open(struct inode *inode, struct file *file)
++{
++	return 0;
++}
++
++static int gptu_release(struct inode *inode, struct file *file)
++{
++	return 0;
++}
++
++int __init lq_gptu_init(void)
++{
++	int ret;
++	unsigned int i;
++
++	ltq_w32(0, LQ_GPTU_IRNEN);
++	ltq_w32(0xfff, LQ_GPTU_IRNCR);
++
++	memset(&timer_dev, 0, sizeof(timer_dev));
++	mutex_init(&timer_dev.gptu_mutex);
++
++	lq_enable_gptu();
++	timer_dev.number_of_timers = GPTU_ID_CFG * 2;
++	lq_disable_gptu();
++	if (timer_dev.number_of_timers > MAX_NUM_OF_32BIT_TIMER_BLOCKS * 2)
++		timer_dev.number_of_timers = MAX_NUM_OF_32BIT_TIMER_BLOCKS * 2;
++	printk(KERN_INFO "gptu: totally %d 16-bit timers/counters\n", timer_dev.number_of_timers);
++
++	ret = misc_register(&gptu_miscdev);
++	if (ret) {
++		printk(KERN_ERR "gptu: can't misc_register, get error %d\n", -ret);
++		return ret;
++	} else {
++		printk(KERN_INFO "gptu: misc_register on minor %d\n", gptu_miscdev.minor);
++	}
++
++	for (i = 0; i < timer_dev.number_of_timers; i++) {
++		ret = request_irq(TIMER_INTERRUPT + i, timer_irq_handler, IRQF_TIMER, gptu_miscdev.name, &timer_dev.timer[i]);
++		if (ret) {
++			for (; i >= 0; i--)
++				free_irq(TIMER_INTERRUPT + i, &timer_dev.timer[i]);
++			misc_deregister(&gptu_miscdev);
++			printk(KERN_ERR "gptu: failed in requesting irq (%d), get error %d\n", i, -ret);
++			return ret;
++		} else {
++			timer_dev.timer[i].irq = TIMER_INTERRUPT + i;
++			disable_irq(timer_dev.timer[i].irq);
++			printk(KERN_INFO "gptu: succeeded to request irq %d\n", timer_dev.timer[i].irq);
++		}
++	}
++
++	return 0;
++}
++
++void __exit lq_gptu_exit(void)
++{
++	unsigned int i;
++
++	for (i = 0; i < timer_dev.number_of_timers; i++) {
++		if (timer_dev.timer[i].f_irq_on)
++			disable_irq(timer_dev.timer[i].irq);
++		free_irq(timer_dev.timer[i].irq, &timer_dev.timer[i]);
++	}
++	lq_disable_gptu();
++	misc_deregister(&gptu_miscdev);
++}
++
++module_init(lq_gptu_init);
++module_exit(lq_gptu_exit);
++
++#endif
diff --git a/target/linux/lantiq/patches-4.4/0012-pinctrl-lantiq-fix-up-pinmux.patch b/target/linux/lantiq/patches-4.4/0012-pinctrl-lantiq-fix-up-pinmux.patch
new file mode 100644
index 0000000..8ec8f81
--- /dev/null
+++ b/target/linux/lantiq/patches-4.4/0012-pinctrl-lantiq-fix-up-pinmux.patch
@@ -0,0 +1,78 @@
+From 25494c55a4007a1409f53ddbafd661636e47ea34 Mon Sep 17 00:00:00 2001
+From: John Crispin <blogic at openwrt.org>
+Date: Fri, 9 Aug 2013 20:38:15 +0200
+Subject: [PATCH 12/36] pinctrl/lantiq: fix up pinmux
+
+We found out how to set the gphy led pinmuxing.
+
+Signed-off-by: John Crispin <blogic at openwrt.org>
+---
+ drivers/pinctrl/pinctrl-xway.c |   28 ++++++++++++++++++++++++++--
+ 1 file changed, 26 insertions(+), 2 deletions(-)
+
+--- a/drivers/pinctrl/pinctrl-xway.c
++++ b/drivers/pinctrl/pinctrl-xway.c
+@@ -609,10 +609,9 @@ static struct pinctrl_desc xway_pctrl_de
+ 	.confops	= &xway_pinconf_ops,
+ };
+ 
+-static inline int xway_mux_apply(struct pinctrl_dev *pctrldev,
++static int mux_apply(struct ltq_pinmux_info *info,
+ 				int pin, int mux)
+ {
+-	struct ltq_pinmux_info *info = pinctrl_dev_get_drvdata(pctrldev);
+ 	int port = PORT(pin);
+ 	u32 alt1_reg = GPIO_ALT1(pin);
+ 
+@@ -632,6 +631,14 @@ static inline int xway_mux_apply(struct
+ 	return 0;
+ }
+ 
++static inline int xway_mux_apply(struct pinctrl_dev *pctrldev,
++				int pin, int mux)
++{
++	struct ltq_pinmux_info *info = pinctrl_dev_get_drvdata(pctrldev);
++
++	return mux_apply(info, pin, mux);
++}
++
+ static const struct ltq_cfg_param xway_cfg_params[] = {
+ 	{"lantiq,pull",		LTQ_PINCONF_PARAM_PULL},
+ 	{"lantiq,open-drain",	LTQ_PINCONF_PARAM_OPEN_DRAIN},
+@@ -676,12 +683,28 @@ static int xway_gpio_dir_out(struct gpio
+ {
+ 	struct ltq_pinmux_info *info = dev_get_drvdata(chip->dev);
+ 
++	if (PORT(pin) == PORT3)
++		gpio_setbit(info->membase[0], GPIO3_OD, PORT_PIN(pin));
++	else
++		gpio_setbit(info->membase[0], GPIO_OD(pin), PORT_PIN(pin));
+ 	gpio_setbit(info->membase[0], GPIO_DIR(pin), PORT_PIN(pin));
+ 	xway_gpio_set(chip, pin, val);
+ 
+ 	return 0;
+ }
+ 
++static int xway_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
++{
++	struct ltq_pinmux_info *info = dev_get_drvdata(chip->dev);
++	int i;
++
++	for (i = 0; i < info->num_exin; i++)
++		if (info->exin[i] == offset)
++			return ltq_eiu_get_irq(i);
++
++	return -1;
++}
++
+ static struct gpio_chip xway_chip = {
+ 	.label = "gpio-xway",
+ 	.direction_input = xway_gpio_dir_in,
+@@ -690,6 +713,7 @@ static struct gpio_chip xway_chip = {
+ 	.set = xway_gpio_set,
+ 	.request = gpiochip_generic_request,
+ 	.free = gpiochip_generic_free,
++	.to_irq = xway_gpio_to_irq,
+ 	.base = -1,
+ };
+ 
diff --git a/target/linux/lantiq/patches-4.4/0013-MTD-lantiq-xway-fix-invalid-operator.patch b/target/linux/lantiq/patches-4.4/0013-MTD-lantiq-xway-fix-invalid-operator.patch
new file mode 100644
index 0000000..c6d3819
--- /dev/null
+++ b/target/linux/lantiq/patches-4.4/0013-MTD-lantiq-xway-fix-invalid-operator.patch
@@ -0,0 +1,24 @@
+From 8e34da603f442624bb70e887d8f42064bb924224 Mon Sep 17 00:00:00 2001
+From: John Crispin <blogic at openwrt.org>
+Date: Sun, 28 Jul 2013 18:03:54 +0200
+Subject: [PATCH 13/36] MTD: lantiq: xway: fix invalid operator
+
+xway_read_byte should use a logic or and not an add operator when working out
+the nand address.
+
+Signed-off-by: John Crispin <blogic at openwrt.org>
+---
+ drivers/mtd/nand/xway_nand.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/mtd/nand/xway_nand.c
++++ b/drivers/mtd/nand/xway_nand.c
+@@ -124,7 +124,7 @@ static unsigned char xway_read_byte(stru
+ 	int ret;
+ 
+ 	spin_lock_irqsave(&ebu_lock, flags);
+-	ret = ltq_r8((void __iomem *)(nandaddr + NAND_READ_DATA));
++	ret = ltq_r8((void __iomem *)(nandaddr | NAND_READ_DATA));
+ 	spin_unlock_irqrestore(&ebu_lock, flags);
+ 
+ 	return ret;
diff --git a/target/linux/lantiq/patches-4.4/0014-MTD-lantiq-xway-the-latched-command-should-be-persis.patch b/target/linux/lantiq/patches-4.4/0014-MTD-lantiq-xway-the-latched-command-should-be-persis.patch
new file mode 100644
index 0000000..6a7785b
--- /dev/null
+++ b/target/linux/lantiq/patches-4.4/0014-MTD-lantiq-xway-the-latched-command-should-be-persis.patch
@@ -0,0 +1,44 @@
+From b454cefd675fc1bd3d8c690c1bd1d8f4678e9922 Mon Sep 17 00:00:00 2001
+From: John Crispin <blogic at openwrt.org>
+Date: Sun, 28 Jul 2013 18:06:39 +0200
+Subject: [PATCH 14/36] MTD: lantiq: xway: the latched command should be
+ persistent
+
+Signed-off-by: John Crispin <blogic at openwrt.org>
+---
+ drivers/mtd/nand/xway_nand.c |   12 ++++++------
+ 1 file changed, 6 insertions(+), 6 deletions(-)
+
+--- a/drivers/mtd/nand/xway_nand.c
++++ b/drivers/mtd/nand/xway_nand.c
+@@ -54,6 +54,8 @@
+ #define NAND_CON_CSMUX		(1 << 1)
+ #define NAND_CON_NANDM		1
+ 
++static u32 xway_latchcmd;
++
+ static void xway_reset_chip(struct nand_chip *chip)
+ {
+ 	unsigned long nandaddr = (unsigned long) chip->IO_ADDR_W;
+@@ -94,17 +96,15 @@ static void xway_cmd_ctrl(struct mtd_inf
+ 	unsigned long flags;
+ 
+ 	if (ctrl & NAND_CTRL_CHANGE) {
+-		nandaddr &= ~(NAND_WRITE_CMD | NAND_WRITE_ADDR);
+ 		if (ctrl & NAND_CLE)
+-			nandaddr |= NAND_WRITE_CMD;
+-		else
+-			nandaddr |= NAND_WRITE_ADDR;
+-		this->IO_ADDR_W = (void __iomem *) nandaddr;
++			xway_latchcmd = NAND_WRITE_CMD;
++		else if (ctrl & NAND_ALE)
++			xway_latchcmd = NAND_WRITE_ADDR;
+ 	}
+ 
+ 	if (cmd != NAND_CMD_NONE) {
+ 		spin_lock_irqsave(&ebu_lock, flags);
+-		writeb(cmd, this->IO_ADDR_W);
++		writeb(cmd, (void __iomem *) (nandaddr | xway_latchcmd));
+ 		while ((ltq_ebu_r32(EBU_NAND_WAIT) & NAND_WAIT_WR_C) == 0)
+ 			;
+ 		spin_unlock_irqrestore(&ebu_lock, flags);
diff --git a/target/linux/lantiq/patches-4.4/0015-MTD-lantiq-xway-remove-endless-loop.patch b/target/linux/lantiq/patches-4.4/0015-MTD-lantiq-xway-remove-endless-loop.patch
new file mode 100644
index 0000000..4bd1668
--- /dev/null
+++ b/target/linux/lantiq/patches-4.4/0015-MTD-lantiq-xway-remove-endless-loop.patch
@@ -0,0 +1,41 @@
+From 76e153079f02d26e3357302d2886a0c8aaaec64d Mon Sep 17 00:00:00 2001
+From: John Crispin <blogic at openwrt.org>
+Date: Sun, 28 Jul 2013 18:02:06 +0200
+Subject: [PATCH 15/36] MTD: lantiq: xway: remove endless loop
+
+The reset loop logic could run into a endless loop. Lets fix it as requested.
+
+--> http://lists.infradead.org/pipermail/linux-mtd/2012-September/044240.html
+
+Signed-off-by: John Crispin <blogic at openwrt.org>
+---
+ drivers/mtd/nand/xway_nand.c |   10 ++++++++--
+ 1 file changed, 8 insertions(+), 2 deletions(-)
+
+--- a/drivers/mtd/nand/xway_nand.c
++++ b/drivers/mtd/nand/xway_nand.c
+@@ -59,16 +59,22 @@ static u32 xway_latchcmd;
+ static void xway_reset_chip(struct nand_chip *chip)
+ {
+ 	unsigned long nandaddr = (unsigned long) chip->IO_ADDR_W;
++	unsigned long timeout;
+ 	unsigned long flags;
+ 
+ 	nandaddr &= ~NAND_WRITE_ADDR;
+ 	nandaddr |= NAND_WRITE_CMD;
+ 
+ 	/* finish with a reset */
++	timeout = jiffies + msecs_to_jiffies(20);
++
+ 	spin_lock_irqsave(&ebu_lock, flags);
+ 	writeb(NAND_WRITE_CMD_RESET, (void __iomem *) nandaddr);
+-	while ((ltq_ebu_r32(EBU_NAND_WAIT) & NAND_WAIT_WR_C) == 0)
+-		;
++	do {
++		if ((ltq_ebu_r32(EBU_NAND_WAIT) & NAND_WAIT_WR_C) == 0)
++			break;
++		cond_resched();
++	} while (!time_after_eq(jiffies, timeout));
+ 	spin_unlock_irqrestore(&ebu_lock, flags);
+ }
+ 
diff --git a/target/linux/lantiq/patches-4.4/0016-MTD-lantiq-xway-add-missing-write_buf-and-read_buf-t.patch b/target/linux/lantiq/patches-4.4/0016-MTD-lantiq-xway-add-missing-write_buf-and-read_buf-t.patch
new file mode 100644
index 0000000..072caf6
--- /dev/null
+++ b/target/linux/lantiq/patches-4.4/0016-MTD-lantiq-xway-add-missing-write_buf-and-read_buf-t.patch
@@ -0,0 +1,55 @@
+From 65df9d63eaee02c25e879b33dd42aceb78e57842 Mon Sep 17 00:00:00 2001
+From: John Crispin <blogic at openwrt.org>
+Date: Sun, 28 Jul 2013 17:59:51 +0200
+Subject: [PATCH 16/36] MTD: lantiq: xway: add missing write_buf and read_buf
+ to nand driver
+
+Signed-off-by: John Crispin <blogic at openwrt.org>
+---
+ drivers/mtd/nand/xway_nand.c |   28 ++++++++++++++++++++++++++++
+ 1 file changed, 28 insertions(+)
+
+--- a/drivers/mtd/nand/xway_nand.c
++++ b/drivers/mtd/nand/xway_nand.c
+@@ -136,6 +136,32 @@ static unsigned char xway_read_byte(stru
+ 	return ret;
+ }
+ 
++static void xway_read_buf(struct mtd_info *mtd, u_char *buf, int len)
++{
++	struct nand_chip *this = mtd->priv;
++	unsigned long nandaddr = (unsigned long) this->IO_ADDR_R;
++	unsigned long flags;
++	int i;
++
++	spin_lock_irqsave(&ebu_lock, flags);
++	for (i = 0; i < len; i++)
++		buf[i] = ltq_r8((void __iomem *)(nandaddr | NAND_READ_DATA));
++	spin_unlock_irqrestore(&ebu_lock, flags);
++}
++
++static void xway_write_buf(struct mtd_info *mtd, const u_char *buf, int len)
++{
++	struct nand_chip *this = mtd->priv;
++	unsigned long nandaddr = (unsigned long) this->IO_ADDR_W;
++	unsigned long flags;
++	int i;
++
++	spin_lock_irqsave(&ebu_lock, flags);
++	for (i = 0; i < len; i++)
++		ltq_w8(buf[i], (void __iomem *)(nandaddr | NAND_WRITE_DATA));
++	spin_unlock_irqrestore(&ebu_lock, flags);
++}
++
+ static int xway_nand_probe(struct platform_device *pdev)
+ {
+ 	struct nand_chip *this = platform_get_drvdata(pdev);
+@@ -177,6 +203,8 @@ static struct platform_nand_data xway_na
+ 		.dev_ready	= xway_dev_ready,
+ 		.select_chip	= xway_select_chip,
+ 		.read_byte	= xway_read_byte,
++		.read_buf	= xway_read_buf,
++		.write_buf	= xway_write_buf,
+ 	}
+ };
+ 
diff --git a/target/linux/lantiq/patches-4.4/0017-MTD-xway-fix-nand-locking.patch b/target/linux/lantiq/patches-4.4/0017-MTD-xway-fix-nand-locking.patch
new file mode 100644
index 0000000..737469a
--- /dev/null
+++ b/target/linux/lantiq/patches-4.4/0017-MTD-xway-fix-nand-locking.patch
@@ -0,0 +1,89 @@
+From aa705c1b0860da91f2ed1a4c0b57337e6de689e1 Mon Sep 17 00:00:00 2001
+From: John Crispin <blogic at openwrt.org>
+Date: Thu, 7 Aug 2014 18:55:31 +0200
+Subject: [PATCH 17/36] MTD: xway: fix nand locking
+
+Signed-off-by: John Crispin <blogic at openwrt.org>
+---
+ drivers/mtd/nand/xway_nand.c |   15 +++------------
+ 1 file changed, 3 insertions(+), 12 deletions(-)
+
+--- a/drivers/mtd/nand/xway_nand.c
++++ b/drivers/mtd/nand/xway_nand.c
+@@ -80,13 +80,16 @@ static void xway_reset_chip(struct nand_
+ 
+ static void xway_select_chip(struct mtd_info *mtd, int chip)
+ {
++	static unsigned long csflags;
+ 
+ 	switch (chip) {
+ 	case -1:
+ 		ltq_ebu_w32_mask(NAND_CON_CE, 0, EBU_NAND_CON);
+ 		ltq_ebu_w32_mask(NAND_CON_NANDM, 0, EBU_NAND_CON);
++		spin_unlock_irqrestore(&ebu_lock, csflags);
+ 		break;
+ 	case 0:
++		spin_lock_irqsave(&ebu_lock, csflags);
+ 		ltq_ebu_w32_mask(0, NAND_CON_NANDM, EBU_NAND_CON);
+ 		ltq_ebu_w32_mask(0, NAND_CON_CE, EBU_NAND_CON);
+ 		break;
+@@ -99,7 +102,6 @@ static void xway_cmd_ctrl(struct mtd_inf
+ {
+ 	struct nand_chip *this = mtd->priv;
+ 	unsigned long nandaddr = (unsigned long) this->IO_ADDR_W;
+-	unsigned long flags;
+ 
+ 	if (ctrl & NAND_CTRL_CHANGE) {
+ 		if (ctrl & NAND_CLE)
+@@ -109,11 +111,9 @@ static void xway_cmd_ctrl(struct mtd_inf
+ 	}
+ 
+ 	if (cmd != NAND_CMD_NONE) {
+-		spin_lock_irqsave(&ebu_lock, flags);
+ 		writeb(cmd, (void __iomem *) (nandaddr | xway_latchcmd));
+ 		while ((ltq_ebu_r32(EBU_NAND_WAIT) & NAND_WAIT_WR_C) == 0)
+ 			;
+-		spin_unlock_irqrestore(&ebu_lock, flags);
+ 	}
+ }
+ 
+@@ -126,12 +126,9 @@ static unsigned char xway_read_byte(stru
+ {
+ 	struct nand_chip *this = mtd->priv;
+ 	unsigned long nandaddr = (unsigned long) this->IO_ADDR_R;
+-	unsigned long flags;
+ 	int ret;
+ 
+-	spin_lock_irqsave(&ebu_lock, flags);
+ 	ret = ltq_r8((void __iomem *)(nandaddr | NAND_READ_DATA));
+-	spin_unlock_irqrestore(&ebu_lock, flags);
+ 
+ 	return ret;
+ }
+@@ -140,26 +137,20 @@ static void xway_read_buf(struct mtd_inf
+ {
+ 	struct nand_chip *this = mtd->priv;
+ 	unsigned long nandaddr = (unsigned long) this->IO_ADDR_R;
+-	unsigned long flags;
+ 	int i;
+ 
+-	spin_lock_irqsave(&ebu_lock, flags);
+ 	for (i = 0; i < len; i++)
+ 		buf[i] = ltq_r8((void __iomem *)(nandaddr | NAND_READ_DATA));
+-	spin_unlock_irqrestore(&ebu_lock, flags);
+ }
+ 
+ static void xway_write_buf(struct mtd_info *mtd, const u_char *buf, int len)
+ {
+ 	struct nand_chip *this = mtd->priv;
+ 	unsigned long nandaddr = (unsigned long) this->IO_ADDR_W;
+-	unsigned long flags;
+ 	int i;
+ 
+-	spin_lock_irqsave(&ebu_lock, flags);
+ 	for (i = 0; i < len; i++)
+ 		ltq_w8(buf[i], (void __iomem *)(nandaddr | NAND_WRITE_DATA));
+-	spin_unlock_irqrestore(&ebu_lock, flags);
+ }
+ 
+ static int xway_nand_probe(struct platform_device *pdev)
diff --git a/target/linux/lantiq/patches-4.4/0018-MTD-nand-lots-of-xrx200-fixes.patch b/target/linux/lantiq/patches-4.4/0018-MTD-nand-lots-of-xrx200-fixes.patch
new file mode 100644
index 0000000..5500861
--- /dev/null
+++ b/target/linux/lantiq/patches-4.4/0018-MTD-nand-lots-of-xrx200-fixes.patch
@@ -0,0 +1,125 @@
+From 997a8965db8417266bea3fbdcfa3e5655a1b52fa Mon Sep 17 00:00:00 2001
+From: John Crispin <blogic at openwrt.org>
+Date: Tue, 9 Sep 2014 23:12:15 +0200
+Subject: [PATCH 18/36] MTD: nand: lots of xrx200 fixes
+
+Signed-off-by: John Crispin <blogic at openwrt.org>
+---
+ drivers/mtd/nand/xway_nand.c |   63 ++++++++++++++++++++++++++++++++++++++++++
+ 1 file changed, 63 insertions(+)
+
+--- a/drivers/mtd/nand/xway_nand.c
++++ b/drivers/mtd/nand/xway_nand.c
+@@ -54,8 +54,27 @@
+ #define NAND_CON_CSMUX		(1 << 1)
+ #define NAND_CON_NANDM		1
+ 
++#define DANUBE_PCI_REG32( addr )    (*(volatile u32 *)(addr))
++#define PCI_CR_PR_OFFSET	    (KSEG1+0x1E105400)
++#define PCI_CR_PC_ARB		    (PCI_CR_PR_OFFSET + 0x0080)
++
+ static u32 xway_latchcmd;
+ 
++/*
++ * req_mask provides a mechanism to prevent interference between
++ * nand and pci (probably only relevant for the BT Home Hub 2B).
++ * Setting it causes the corresponding pci req pins to be masked
++ * during nand access, and also moves ebu locking from the read/write
++ * functions to the chip select function to ensure that the whole
++ * operation runs with interrupts disabled.
++ * In addition it switches on some extra waiting in xway_cmd_ctrl().
++ * This seems to be necessary if the ebu_cs1 pin has open-drain disabled,
++ * which in turn seems to be necessary for the nor chip to be recognised
++ * reliably, on a board (Home Hub 2B again) which has both nor and nand.
++ */
++
++static __be32 req_mask = 0;
++
+ static void xway_reset_chip(struct nand_chip *chip)
+ {
+ 	unsigned long nandaddr = (unsigned long) chip->IO_ADDR_W;
+@@ -86,12 +105,24 @@ static void xway_select_chip(struct mtd_
+ 	case -1:
+ 		ltq_ebu_w32_mask(NAND_CON_CE, 0, EBU_NAND_CON);
+ 		ltq_ebu_w32_mask(NAND_CON_NANDM, 0, EBU_NAND_CON);
++
++		if (req_mask) {
++			/* Unmask all external PCI request */
++			DANUBE_PCI_REG32(PCI_CR_PC_ARB) &= ~(req_mask << 16);
++		}
+ 		spin_unlock_irqrestore(&ebu_lock, csflags);
++
+ 		break;
+ 	case 0:
+ 		spin_lock_irqsave(&ebu_lock, csflags);
++		if (req_mask) {
++			/* Mask all external PCI request */
++			DANUBE_PCI_REG32(PCI_CR_PC_ARB) |= (req_mask << 16);
++		}
++
+ 		ltq_ebu_w32_mask(0, NAND_CON_NANDM, EBU_NAND_CON);
+ 		ltq_ebu_w32_mask(0, NAND_CON_CE, EBU_NAND_CON);
++
+ 		break;
+ 	default:
+ 		BUG();
+@@ -103,6 +134,12 @@ static void xway_cmd_ctrl(struct mtd_inf
+ 	struct nand_chip *this = mtd->priv;
+ 	unsigned long nandaddr = (unsigned long) this->IO_ADDR_W;
+ 
++	if (req_mask) {
++		if (cmd != NAND_CMD_STATUS)
++			ltq_ebu_w32(EBU_NAND_WAIT, 0); /* Clear nand ready */
++	}
++
++
+ 	if (ctrl & NAND_CTRL_CHANGE) {
+ 		if (ctrl & NAND_CLE)
+ 			xway_latchcmd = NAND_WRITE_CMD;
+@@ -115,6 +152,24 @@ static void xway_cmd_ctrl(struct mtd_inf
+ 		while ((ltq_ebu_r32(EBU_NAND_WAIT) & NAND_WAIT_WR_C) == 0)
+ 			;
+ 	}
++
++	if (req_mask) {
++	       /*
++		* program and erase have their own busy handlers
++		* status and sequential in needs no delay
++		*/
++		switch (cmd) {
++			case NAND_CMD_ERASE1:
++			case NAND_CMD_SEQIN:
++			case NAND_CMD_STATUS:
++			case NAND_CMD_READID:
++			return;
++		}
++
++		/* wait until command is processed */
++		while ((ltq_ebu_r32(EBU_NAND_WAIT) & NAND_WAIT_RD) == 0)
++			;
++	}
+ }
+ 
+ static int xway_dev_ready(struct mtd_info *mtd)
+@@ -157,6 +212,8 @@ static int xway_nand_probe(struct platfo
+ {
+ 	struct nand_chip *this = platform_get_drvdata(pdev);
+ 	unsigned long nandaddr = (unsigned long) this->IO_ADDR_W;
++	const __be32 *req_mask_ptr = of_get_property(pdev->dev.of_node,
++					"req-mask", NULL);
+ 	const __be32 *cs = of_get_property(pdev->dev.of_node,
+ 					"lantiq,cs", NULL);
+ 	u32 cs_flag = 0;
+@@ -165,6 +222,12 @@ static int xway_nand_probe(struct platfo
+ 	if (cs && (*cs == 1))
+ 		cs_flag = NAND_CON_IN_CS1 | NAND_CON_OUT_CS1;
+ 
++	/*
++	 * Load the PCI req lines to mask from the device tree. If the
++	 * property is not present, setting req_mask to 0 disables masking.
++	 */
++	req_mask = (req_mask_ptr ? *req_mask_ptr : 0);
++
+ 	/* setup the EBU to run in NAND mode on our base addr */
+ 	ltq_ebu_w32(CPHYSADDR(nandaddr)
+ 		| ADDSEL1_MASK(3) | ADDSEL1_REGEN, EBU_ADDSEL1);
diff --git a/target/linux/lantiq/patches-4.4/0020-MTD-lantiq-handle-NO_XIP-on-cfi0001-flash.patch b/target/linux/lantiq/patches-4.4/0020-MTD-lantiq-handle-NO_XIP-on-cfi0001-flash.patch
new file mode 100644
index 0000000..3b38a95
--- /dev/null
+++ b/target/linux/lantiq/patches-4.4/0020-MTD-lantiq-handle-NO_XIP-on-cfi0001-flash.patch
@@ -0,0 +1,25 @@
+From e3b20f04e9f9cae1babe091fdc1d08d7703ae344 Mon Sep 17 00:00:00 2001
+From: John Crispin <blogic at openwrt.org>
+Date: Thu, 7 Aug 2014 18:18:00 +0200
+Subject: [PATCH 20/36] MTD: lantiq: handle NO_XIP on cfi0001 flash
+
+Signed-off-by: John Crispin <blogic at openwrt.org>
+---
+ drivers/mtd/maps/lantiq-flash.c |    6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+--- a/drivers/mtd/maps/lantiq-flash.c
++++ b/drivers/mtd/maps/lantiq-flash.c
+@@ -138,7 +138,11 @@ ltq_mtd_probe(struct platform_device *pd
+ 	if (!ltq_mtd->map)
+ 		return -ENOMEM;
+ 
+-	ltq_mtd->map->phys = ltq_mtd->res->start;
++	if (of_find_property(pdev->dev.of_node, "lantiq,noxip", NULL))
++		ltq_mtd->map->phys = NO_XIP;
++	else
++		ltq_mtd->map->phys = ltq_mtd->res->start;
++	ltq_mtd->res->start;
+ 	ltq_mtd->map->size = resource_size(ltq_mtd->res);
+ 	ltq_mtd->map->virt = devm_ioremap_resource(&pdev->dev, ltq_mtd->res);
+ 	if (IS_ERR(ltq_mtd->map->virt))
diff --git a/target/linux/lantiq/patches-4.4/0022-MTD-m25p80-allow-loading-mtd-name-from-OF.patch b/target/linux/lantiq/patches-4.4/0022-MTD-m25p80-allow-loading-mtd-name-from-OF.patch
new file mode 100644
index 0000000..25f3fff
--- /dev/null
+++ b/target/linux/lantiq/patches-4.4/0022-MTD-m25p80-allow-loading-mtd-name-from-OF.patch
@@ -0,0 +1,44 @@
+From 4400e1f593ea40a51912128adb4f53d59e62cad8 Mon Sep 17 00:00:00 2001
+From: John Crispin <blogic at openwrt.org>
+Date: Wed, 10 Sep 2014 22:40:18 +0200
+Subject: [PATCH 22/36] MTD: m25p80: allow loading mtd name from OF
+
+In accordance with the physmap flash we should honour the linux,mtd-name
+property when deciding what name the mtd device has.
+
+Signed-off-by: Thomas Langer <thomas.langer at lantiq.com>
+Signed-off-by: John Crispin <blogic at openwrt.org>
+---
+ drivers/mtd/devices/m25p80.c |    6 ++++++
+ 1 file changed, 6 insertions(+)
+
+--- a/drivers/mtd/devices/m25p80.c
++++ b/drivers/mtd/devices/m25p80.c
+@@ -19,6 +19,7 @@
+ #include <linux/errno.h>
+ #include <linux/module.h>
+ #include <linux/device.h>
++#include <linux/of.h>
+ 
+ #include <linux/mtd/mtd.h>
+ #include <linux/mtd/partitions.h>
+@@ -182,6 +183,10 @@ static int m25p_probe(struct spi_device
+ 	enum read_mode mode = SPI_NOR_NORMAL;
+ 	char *flash_name = NULL;
+ 	int ret;
++	const char __maybe_unused	*of_mtd_name = NULL;
++
++	of_property_read_string(spi->dev.of_node,
++		"linux,mtd-name", &of_mtd_name);
+ 
+ 	data = dev_get_platdata(&spi->dev);
+ 
+@@ -212,6 +217,8 @@ static int m25p_probe(struct spi_device
+ 
+ 	if (data && data->name)
+ 		nor->mtd.name = data->name;
++	else if (of_mtd_name)
++		nor->mtd.name = of_mtd_name;
+ 
+ 	/* For some (historical?) reason many platforms provide two different
+ 	 * names in flash_platform_data: "name" and "type". Quite often name is
diff --git a/target/linux/lantiq/patches-4.4/0023-NET-PHY-adds-driver-for-lantiq-PHY11G.patch b/target/linux/lantiq/patches-4.4/0023-NET-PHY-adds-driver-for-lantiq-PHY11G.patch
new file mode 100644
index 0000000..191ccf2
--- /dev/null
+++ b/target/linux/lantiq/patches-4.4/0023-NET-PHY-adds-driver-for-lantiq-PHY11G.patch
@@ -0,0 +1,537 @@
+From 0a63ab263725c427051a8bbaa0732b749627da27 Mon Sep 17 00:00:00 2001
+From: John Crispin <blogic at openwrt.org>
+Date: Thu, 7 Aug 2014 18:15:36 +0200
+Subject: [PATCH 23/36] NET: PHY: adds driver for lantiq PHY11G
+
+Signed-off-by: John Crispin <blogic at openwrt.org>
+---
+ drivers/net/phy/Kconfig  |    5 +
+ drivers/net/phy/Makefile |    1 +
+ drivers/net/phy/lantiq.c |  231 ++++++++++++++++++++++++++++++++++++++++++++++
+ 3 files changed, 237 insertions(+)
+ create mode 100644 drivers/net/phy/lantiq.c
+
+--- a/drivers/net/phy/Kconfig
++++ b/drivers/net/phy/Kconfig
+@@ -202,6 +202,11 @@ config RTL8306_PHY
+ 	tristate "Driver for Realtek RTL8306S switches"
+ 	select SWCONFIG
+ 
++config LANTIQ_PHY
++	tristate "Driver for Lantiq PHYs"
++	---help---
++	  Supports the 11G and 22E PHYs.
++
+ config FIXED_PHY
+ 	tristate "Driver for MDIO Bus/PHY emulation with fixed speed/link PHYs"
+ 	depends on PHYLIB
+--- a/drivers/net/phy/Makefile
++++ b/drivers/net/phy/Makefile
+@@ -46,6 +46,7 @@ obj-$(CONFIG_DP83848_PHY)	+= dp83848.o
+ obj-$(CONFIG_DP83867_PHY)	+= dp83867.o
+ obj-$(CONFIG_STE10XP)		+= ste10Xp.o
+ obj-$(CONFIG_MICREL_PHY)	+= micrel.o
++obj-$(CONFIG_LANTIQ_PHY)        += lantiq.o
+ obj-$(CONFIG_MDIO_OCTEON)	+= mdio-octeon.o
+ obj-$(CONFIG_MICREL_KS8995MA)	+= spi_ks8995.o
+ obj-$(CONFIG_AT803X_PHY)	+= at803x.o
+--- /dev/null
++++ b/drivers/net/phy/lantiq.c
+@@ -0,0 +1,278 @@
++/*
++ *   This program is free software; you can redistribute it and/or modify
++ *   it under the terms of the GNU General Public License as published by
++ *   the Free Software Foundation; either version 2 of the License, or
++ *   (at your option) any later version.
++ *
++ *   This program is distributed in the hope that it will be useful,
++ *   but WITHOUT ANY WARRANTY; without even the implied warranty of
++ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ *   GNU General Public License for more details.
++ *
++ *   You should have received a copy of the GNU General Public License
++ *   along with this program; if not, write to the Free Software
++ *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
++ *
++ *   Copyright (C) 2012 Daniel Schwierzeck <daniel.schwierzeck at googlemail.com>
++ */
++
++#include <linux/module.h>
++#include <linux/phy.h>
++#include <linux/of.h>
++
++#define MII_MMDCTRL		0x0d
++#define MII_MMDDATA		0x0e
++
++#define MII_VR9_11G_IMASK	0x19	/* interrupt mask */
++#define MII_VR9_11G_ISTAT	0x1a	/* interrupt status */
++
++#define INT_VR9_11G_WOL		BIT(15)	/* Wake-On-LAN */
++#define INT_VR9_11G_ANE		BIT(11)	/* Auto-Neg error */
++#define INT_VR9_11G_ANC		BIT(10)	/* Auto-Neg complete */
++#define INT_VR9_11G_ADSC	BIT(5)	/* Link auto-downspeed detect */
++#define INT_VR9_11G_DXMC	BIT(2)	/* Duplex mode change */
++#define INT_VR9_11G_LSPC	BIT(1)	/* Link speed change */
++#define INT_VR9_11G_LSTC	BIT(0)	/* Link state change */
++#define INT_VR9_11G_MASK	(INT_VR9_11G_LSTC | INT_VR9_11G_ADSC)
++
++#define ADVERTISED_MPD		BIT(10)	/* Multi-port device */
++
++#define MMD_DEVAD		0x1f
++#define MMD_ACTYPE_SHIFT	14
++#define MMD_ACTYPE_ADDRESS	(0 << MMD_ACTYPE_SHIFT)
++#define MMD_ACTYPE_DATA		(1 << MMD_ACTYPE_SHIFT)
++#define MMD_ACTYPE_DATA_PI	(2 << MMD_ACTYPE_SHIFT)
++#define MMD_ACTYPE_DATA_PIWR	(3 << MMD_ACTYPE_SHIFT)
++
++static __maybe_unused int vr9_gphy_mmd_read(struct phy_device *phydev,
++						u16 regnum)
++{
++	phy_write(phydev, MII_MMDCTRL, MMD_ACTYPE_ADDRESS | MMD_DEVAD);
++	phy_write(phydev, MII_MMDDATA, regnum);
++	phy_write(phydev, MII_MMDCTRL, MMD_ACTYPE_DATA | MMD_DEVAD);
++
++	return phy_read(phydev, MII_MMDDATA);
++}
++
++static __maybe_unused int vr9_gphy_mmd_write(struct phy_device *phydev,
++						u16 regnum, u16 val)
++{
++	phy_write(phydev, MII_MMDCTRL, MMD_ACTYPE_ADDRESS | MMD_DEVAD);
++	phy_write(phydev, MII_MMDDATA, regnum);
++	phy_write(phydev, MII_MMDCTRL, MMD_ACTYPE_DATA | MMD_DEVAD);
++	phy_write(phydev, MII_MMDDATA, val);
++
++	return 0;
++}
++
++#if IS_ENABLED(CONFIG_OF_MDIO)
++static int vr9_gphy_of_reg_init(struct phy_device *phydev)
++{
++	u32 tmp;
++
++	/* store the led values if one was passed by the devicetree */
++	if (!of_property_read_u32(phydev->dev.of_node, "lantiq,ledch", &tmp))
++		vr9_gphy_mmd_write(phydev, 0x1e0, tmp);
++
++	if (!of_property_read_u32(phydev->dev.of_node, "lantiq,ledcl", &tmp))
++		vr9_gphy_mmd_write(phydev, 0x1e1, tmp);
++
++	if (!of_property_read_u32(phydev->dev.of_node, "lantiq,led0h", &tmp))
++		vr9_gphy_mmd_write(phydev, 0x1e2, tmp);
++
++	if (!of_property_read_u32(phydev->dev.of_node, "lantiq,led0l", &tmp))
++		vr9_gphy_mmd_write(phydev, 0x1e3, tmp);
++
++	if (!of_property_read_u32(phydev->dev.of_node, "lantiq,led1h", &tmp))
++		vr9_gphy_mmd_write(phydev, 0x1e4, tmp);
++
++	if (!of_property_read_u32(phydev->dev.of_node, "lantiq,led1l", &tmp))
++		vr9_gphy_mmd_write(phydev, 0x1e5, tmp);
++	if (!of_property_read_u32(phydev->dev.of_node, "lantiq,led2h", &tmp))
++		vr9_gphy_mmd_write(phydev, 0x1e6, tmp);
++
++	if (!of_property_read_u32(phydev->dev.of_node, "lantiq,led2l", &tmp))
++		vr9_gphy_mmd_write(phydev, 0x1e7, tmp);
++
++	if (!of_property_read_u32(phydev->dev.of_node, "lantiq,led3h", &tmp))
++		vr9_gphy_mmd_write(phydev, 0x1e8, tmp);
++
++	if (!of_property_read_u32(phydev->dev.of_node, "lantiq,led3l", &tmp))
++		vr9_gphy_mmd_write(phydev, 0x1e9, tmp);
++
++	return 0;
++}
++#else
++static int vr9_gphy_of_reg_init(struct phy_device *phydev)
++{
++	return 0;
++}
++#endif /* CONFIG_OF_MDIO */
++
++static int vr9_gphy_config_init(struct phy_device *phydev)
++{
++	int err;
++
++	dev_dbg(&phydev->dev, "%s\n", __func__);
++
++	/* Mask all interrupts */
++	err = phy_write(phydev, MII_VR9_11G_IMASK, 0);
++	if (err)
++		return err;
++
++	/* Clear all pending interrupts */
++	phy_read(phydev, MII_VR9_11G_ISTAT);
++
++	vr9_gphy_mmd_write(phydev, 0x1e0, 0xc5);
++	vr9_gphy_mmd_write(phydev, 0x1e1, 0x67);
++	vr9_gphy_mmd_write(phydev, 0x1e2, 0x42);
++	vr9_gphy_mmd_write(phydev, 0x1e3, 0x10);
++	vr9_gphy_mmd_write(phydev, 0x1e4, 0x70);
++	vr9_gphy_mmd_write(phydev, 0x1e5, 0x03);
++	vr9_gphy_mmd_write(phydev, 0x1e6, 0x20);
++	vr9_gphy_mmd_write(phydev, 0x1e7, 0x00);
++	vr9_gphy_mmd_write(phydev, 0x1e8, 0x40);
++	vr9_gphy_mmd_write(phydev, 0x1e9, 0x20);
++
++	vr9_gphy_of_reg_init(phydev);
++
++	return 0;
++}
++
++static int vr9_gphy_config_aneg(struct phy_device *phydev)
++{
++	int reg, err;
++
++	/* Advertise as multi-port device */
++	reg = phy_read(phydev, MII_CTRL1000);
++	reg |= ADVERTISED_MPD;
++	err = phy_write(phydev, MII_CTRL1000, reg);
++	if (err)
++		return err;
++
++	return genphy_config_aneg(phydev);
++}
++
++static int vr9_gphy_ack_interrupt(struct phy_device *phydev)
++{
++	int reg;
++
++	/*
++	 * Possible IRQ numbers:
++	 * - IM3_IRL18 for GPHY0
++	 * - IM3_IRL17 for GPHY1
++	 *
++	 * Due to a silicon bug IRQ lines are not really independent from
++	 * each other. Sometimes the two lines are driven at the same time
++	 * if only one GPHY core raises the interrupt.
++	 */
++
++	reg = phy_read(phydev, MII_VR9_11G_ISTAT);
++
++	return (reg < 0) ? reg : 0;
++}
++
++static int vr9_gphy_did_interrupt(struct phy_device *phydev)
++{
++	int reg;
++
++	reg = phy_read(phydev, MII_VR9_11G_ISTAT);
++
++	return reg > 0;
++}
++
++static int vr9_gphy_config_intr(struct phy_device *phydev)
++{
++	int err;
++
++	if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
++		err = phy_write(phydev, MII_VR9_11G_IMASK, INT_VR9_11G_MASK);
++	else
++		err = phy_write(phydev, MII_VR9_11G_IMASK, 0);
++
++	return err;
++}
++
++static struct phy_driver lantiq_phy[] = {
++	{
++		.phy_id		= 0xd565a400,
++		.phy_id_mask	= 0xfffffff8,
++		.name		= "Lantiq XWAY PEF7071",
++		.features	= (PHY_GBIT_FEATURES | SUPPORTED_Pause),
++		.flags		= 0, /*PHY_HAS_INTERRUPT,*/
++		.config_init	= vr9_gphy_config_init,
++		.config_aneg	= vr9_gphy_config_aneg,
++		.read_status	= genphy_read_status,
++		.ack_interrupt	= vr9_gphy_ack_interrupt,
++		.did_interrupt	= vr9_gphy_did_interrupt,
++		.config_intr	= vr9_gphy_config_intr,
++		.driver		= { .owner = THIS_MODULE },
++	}, {
++		.phy_id		= 0x030260D0,
++		.phy_id_mask	= 0xfffffff0,
++		.name		= "Lantiq XWAY VR9 GPHY 11G v1.3",
++		.features	= (PHY_GBIT_FEATURES | SUPPORTED_Pause),
++		.flags		= 0, /*PHY_HAS_INTERRUPT,*/
++		.config_init	= vr9_gphy_config_init,
++		.config_aneg	= vr9_gphy_config_aneg,
++		.read_status	= genphy_read_status,
++		.ack_interrupt	= vr9_gphy_ack_interrupt,
++		.did_interrupt	= vr9_gphy_did_interrupt,
++		.config_intr	= vr9_gphy_config_intr,
++		.driver		= { .owner = THIS_MODULE },
++	}, {
++		.phy_id		= 0xd565a408,
++		.phy_id_mask	= 0xfffffff8,
++		.name		= "Lantiq XWAY VR9 GPHY 11G v1.4",
++		.features	= (PHY_GBIT_FEATURES | SUPPORTED_Pause),
++		.flags		= 0, /*PHY_HAS_INTERRUPT,*/
++		.config_init	= vr9_gphy_config_init,
++		.config_aneg	= vr9_gphy_config_aneg,
++		.read_status	= genphy_read_status,
++		.ack_interrupt	= vr9_gphy_ack_interrupt,
++		.did_interrupt	= vr9_gphy_did_interrupt,
++		.config_intr	= vr9_gphy_config_intr,
++		.driver		= { .owner = THIS_MODULE },
++	}, {
++		.phy_id		= 0xd565a418,
++		.phy_id_mask	= 0xfffffff8,
++		.name		= "Lantiq XWAY XRX PHY22F v1.4",
++		.features	= (PHY_BASIC_FEATURES | SUPPORTED_Pause),
++		.flags		= 0, /*PHY_HAS_INTERRUPT,*/
++		.config_init	= vr9_gphy_config_init,
++		.config_aneg	= vr9_gphy_config_aneg,
++		.read_status	= genphy_read_status,
++		.ack_interrupt	= vr9_gphy_ack_interrupt,
++		.did_interrupt	= vr9_gphy_did_interrupt,
++		.config_intr	= vr9_gphy_config_intr,
++		.driver		= { .owner = THIS_MODULE },
++	},
++};
++
++static int __init ltq_phy_init(void)
++{
++	int i;
++
++	for (i = 0; i < ARRAY_SIZE(lantiq_phy); i++) {
++		int err = phy_driver_register(&lantiq_phy[i]);
++		if (err)
++			pr_err("lantiq_phy: failed to load %s\n", lantiq_phy[i].name);
++	}
++
++	return 0;
++}
++
++static void __exit ltq_phy_exit(void)
++{
++	int i;
++
++	for (i = 0; i < ARRAY_SIZE(lantiq_phy); i++)
++		phy_driver_unregister(&lantiq_phy[i]);
++}
++
++module_init(ltq_phy_init);
++module_exit(ltq_phy_exit);
++
++MODULE_DESCRIPTION("Lantiq PHY drivers");
++MODULE_AUTHOR("Daniel Schwierzeck <daniel.schwierzeck at googlemail.com>");
++MODULE_LICENSE("GPL");
+--- /dev/null
++++ b/Documentation/devicetree/bindings/phy/phy-lanitq.txt
+@@ -0,0 +1,216 @@
++Lanitq PHY binding
++============================================
++
++This devicetree binding controls the lantiq ethernet phys led functionality.
++
++Example:
++	mdio at 0 {
++		#address-cells = <1>;
++		#size-cells = <0>;
++		compatible = "lantiq,xrx200-mdio";
++			phy5: ethernet-phy at 5 {
++			reg = <0x1>;
++			compatible = "lantiq,phy11g", "ethernet-phy-ieee802.3-c22";
++		};
++		phy11: ethernet-phy at 11 {
++			reg = <0x11>;
++			compatible = "lantiq,phy22f", "ethernet-phy-ieee802.3-c22";
++			lantiq,led2h = <0x00>;
++			lantiq,led2l = <0x03>;
++		};
++		phy12: ethernet-phy at 12 {
++			reg = <0x12>;
++			compatible = "lantiq,phy22f", "ethernet-phy-ieee802.3-c22";
++			lantiq,led1h = <0x00>;
++			lantiq,led1l = <0x03>;
++		};
++		phy13: ethernet-phy at 13 {
++			reg = <0x13>;
++			compatible = "lantiq,phy22f", "ethernet-phy-ieee802.3-c22";
++			lantiq,led2h = <0x00>;
++			lantiq,led2l = <0x03>;
++		};
++		phy14: ethernet-phy at 14 {
++			reg = <0x14>;
++			compatible = "lantiq,phy22f", "ethernet-phy-ieee802.3-c22";
++			lantiq,led1h = <0x00>;
++			lantiq,led1l = <0x03>;
++		};
++	};
++
++Register Description
++============================================
++
++LEDCH:
++
++Name	Hardware Reset Value
++LEDCH	0x00C5
++
++| 15 |    |    |    |    |    |    |  8 |
++=========================================
++|		RES			|
++=========================================
++
++|  7 |    |    |    |    |    |    |  0 |
++=========================================
++|   FBF   |   SBF   |RES |     NACS     |
++=========================================
++
++Field	Bits	Type	Description
++FBC	7:6	RW	Fast Blink Frequency
++			---
++			0x0 (00b) F02HZ 2 Hz blinking frequency
++			0x1 (01b) F04HZ 4 Hz blinking frequency
++			0x2 (10b) F08HZ 8 Hz blinking frequency
++			0x3 (11b) F16HZ 16 Hz blinking frequency
++
++SBF	5:4	RW	Slow Blink Frequency
++			---
++			0x0 (00b) F02HZ 2 Hz blinking frequency
++			0x1 (01b) F04HZ 4 Hz blinking frequency
++			0x2 (10b) F08HZ 8 Hz blinking frequency
++			0x3 (11b) F16HZ 16 Hz blinking frequency
++
++NACS	2:0	RW	Inverse of Scan Function
++			---
++			0x0 (000b) NONE No Function
++			0x1 (001b) LINK Complex function enabled when link is up
++			0x2 (010b) PDOWN Complex function enabled when device is powered-down
++			0x3 (011b) EEE Complex function enabled when device is in EEE mode
++			0x4 (100b) ANEG Complex function enabled when auto-negotiation is running
++			0x5 (101b) ABIST Complex function enabled when analog self-test is running
++			0x6 (110b) CDIAG Complex function enabled when cable diagnostics are running
++			0x7 (111b) TEST Complex function enabled when test mode is running
++
++LEDCL:
++
++Name	Hardware Reset Value
++LEDCL	0x0067
++
++| 15 |    |    |    |    |    |    |  8 |
++=========================================
++|		RES			|
++=========================================
++
++|  7 |    |    |    |    |    |    |  0 |
++=========================================
++|RES |     SCAN     |RES |    CBLINK    |
++=========================================
++
++Field	Bits	Type	Description
++SCAN	6:4	RW	Complex Scan Configuration
++			---
++			000 B NONE No Function
++			001 B LINK Complex function enabled when link is up
++			010 B PDOWN Complex function enabled when device is powered-down
++			011 B EEE Complex function enabled when device is in EEE mode
++			100 B ANEG Complex function enabled when auto-negotiation is running
++			101 B ABIST Complex function enabled when analog self-test is running
++			110 B CDIAG Complex function enabled when cable diagnostics are running
++			111 B TEST Complex function enabled when test mode is running
++
++CBLINK	2:0	RW	Complex Blinking Configuration
++			---
++			000 B NONE No Function
++			001 B LINK Complex function enabled when link is up
++			010 B PDOWN Complex function enabled when device is powered-down
++			011 B EEE Complex function enabled when device is in EEE mode
++			100 B ANEG Complex function enabled when auto-negotiation is running
++			101 B ABIST Complex function enabled when analog self-test is running
++			110 B CDIAG Complex function enabled when cable diagnostics are running
++			111 B TEST Complex function enabled when test mode is running
++
++LEDxH:
++
++Name	Hardware Reset Value
++LED0H	0x0070
++LED1H	0x0020
++LED2H	0x0040
++LED3H	0x0040
++
++| 15 |    |    |    |    |    |    |  8 |
++=========================================
++|		RES			|
++=========================================
++
++|  7 |    |    |    |    |    |    |  0 |
++=========================================
++|        CON        |       BLINKF      |
++=========================================
++
++Field	Bits	Type	Description
++CON	7:4	RW	Constant On Configuration
++			---
++			0x0 (0000b) NONE LED does not light up constantly
++			0x1 (0001b) LINK10 LED is on when link is 10 Mbit/s
++			0x2 (0010b) LINK100 LED is on when link is 100 Mbit/s
++			0x3 (0011b) LINK10X LED is on when link is 10/100 Mbit/s
++			0x4 (0100b) LINK1000 LED is on when link is 1000 Mbit/s
++			0x5 (0101b) LINK10_0 LED is on when link is 10/1000 Mbit/s
++			0x6 (0110b) LINK100X LED is on when link is 100/1000 Mbit/s
++			0x7 (0111b) LINK10XX LED is on when link is 10/100/1000 Mbit/s
++			0x8 (1000b) PDOWN LED is on when device is powered-down
++			0x9 (1001b) EEE LED is on when device is in EEE mode
++			0xA (1010b) ANEG LED is on when auto-negotiation is running
++			0xB (1011b) ABIST LED is on when analog self-test is running
++			0xC (1100b) CDIAG LED is on when cable diagnostics are running
++
++BLINKF	3:0	RW	Fast Blinking Configuration
++			---
++			0x0 (0000b) NONE No Blinking
++			0x1 (0001b) LINK10 Blink when link is 10 Mbit/s
++			0x2 (0010b) LINK100 Blink when link is 100 Mbit/s
++			0x3 (0011b) LINK10X Blink when link is 10/100 Mbit/s
++			0x4 (0100b) LINK1000 Blink when link is 1000 Mbit/s
++			0x5 (0101b) LINK10_0 Blink when link is 10/1000 Mbit/s
++			0x6 (0110b) LINK100X Blink when link is 100/1000 Mbit/s
++			0x7 (0111b) LINK10XX Blink when link is 10/100/1000 Mbit/s
++			0x8 (1000b) PDOWN Blink when device is powered-down
++			0x9 (1001b) EEE Blink when device is in EEE mode
++			0xA (1010b) ANEG Blink when auto-negotiation is running
++			0xB (1011b) ABIST Blink when analog self-test is running
++			0xC (1100b) CDIAG Blink when cable diagnostics are running
++
++LEDxL:
++
++Name	Hardware Reset Value
++LED0L	0x0003
++LED1L	0x0000
++LED2L	0x0000
++LED3L	0x0020
++
++| 15 |    |    |    |    |    |    |  8 |
++=========================================
++|		RES			|
++=========================================
++
++|  7 |    |    |    |    |    |    |  0 |
++=========================================
++|      BLINKS       |       PULSE       |
++=========================================
++
++Field	Bits	Type	Description
++BLINKS	7:4	RW	Slow Blinkin Configuration
++			---
++			0x0 (0000b) NONE No Blinking
++			0x1 (0001b) LINK10 Blink when link is 10 Mbit/s
++			0x2 (0010b) LINK100 Blink when link is 100 Mbit/s
++			0x3 (0011b) LINK10X Blink when link is 10/100 Mbit/s
++			0x4 (0100b) LINK1000 Blink when link is 1000 Mbit/s
++			0x5 (0101b) LINK10_0 Blink when link is 10/1000 Mbit/s
++			0x6 (0110b) LINK100X Blink when link is 100/1000 Mbit/s
++			0x7 (0111b) LINK10XX Blink when link is 10/100/1000 Mbit/s
++			0x8 (1000b) PDOWN Blink when device is powered-down
++			0x9 (1001b) EEE Blink when device is in EEE mode
++			0xA (1010b) ANEG Blink when auto-negotiation is running
++			0xB (1011b) ABIST Blink when analog self-test is running
++			0xC (1100b) CDIAG Blink when cable diagnostics are runningning
++
++PULSE	3:0	RW	Pulsing Configuration
++			The pulse field is a mask field by which certain events can be combined
++			---
++			0x0 (0000b) NONE No pulsing
++			0x1 (0001b) TXACT Transmit activity
++			0x2 (0010b) RXACT Receive activity
++			0x4 (0100b) COL Collision
++			0x8 (1000b) RES Reserved
diff --git a/target/linux/lantiq/patches-4.4/0024-NET-lantiq-adds-PHY11G-firmware-blobs.patch b/target/linux/lantiq/patches-4.4/0024-NET-lantiq-adds-PHY11G-firmware-blobs.patch
new file mode 100644
index 0000000..b69b2a9
--- /dev/null
+++ b/target/linux/lantiq/patches-4.4/0024-NET-lantiq-adds-PHY11G-firmware-blobs.patch
@@ -0,0 +1,364 @@
+From 77e89d5a28be35058041c79e9874ab26f222c603 Mon Sep 17 00:00:00 2001
+From: John Crispin <blogic at openwrt.org>
+Date: Mon, 22 Oct 2012 09:26:24 +0200
+Subject: [PATCH 24/36] NET: lantiq: adds PHY11G firmware blobs
+
+Signed-off-by: John Crispin <blogic at openwrt.org>
+---
+ firmware/Makefile       |    4 +
+ firmware/lantiq/COPYING |  286 +++++++++++++++++++++++++++++++++++++++++++++++
+ firmware/lantiq/README  |   45 ++++++++
+ 3 files changed, 335 insertions(+)
+ create mode 100644 firmware/lantiq/COPYING
+ create mode 100644 firmware/lantiq/README
+
+--- a/firmware/Makefile
++++ b/firmware/Makefile
+@@ -134,6 +134,10 @@ fw-shipped-$(CONFIG_USB_SERIAL_KEYSPAN_P
+ fw-shipped-$(CONFIG_USB_SERIAL_XIRCOM) += keyspan_pda/xircom_pgs.fw
+ fw-shipped-$(CONFIG_USB_VICAM) += vicam/firmware.fw
+ fw-shipped-$(CONFIG_VIDEO_CPIA2) += cpia2/stv0672_vp4.bin
++fw-shipped-$(CONFIG_LANTIQ_XRX200) += lantiq/vr9_phy11g_a1x.bin
++fw-shipped-$(CONFIG_LANTIQ_XRX200) += lantiq/vr9_phy11g_a2x.bin
++fw-shipped-$(CONFIG_LANTIQ_XRX200) += lantiq/vr9_phy22f_a1x.bin
++fw-shipped-$(CONFIG_LANTIQ_XRX200) += lantiq/vr9_phy22f_a2x.bin
+ fw-shipped-$(CONFIG_YAM) += yam/1200.bin yam/9600.bin
+ 
+ fw-shipped-all := $(fw-shipped-y) $(fw-shipped-m) $(fw-shipped-)
+--- /dev/null
++++ b/firmware/lantiq/COPYING
+@@ -0,0 +1,286 @@
++All firmware files are copyrighted by Lantiq Deutschland GmbH.
++The files have been extracted from header files found in Lantiq BSPs.
++If not stated otherwise all files are licensed under GPL.
++
++=======================================================================
++
++		    GNU GENERAL PUBLIC LICENSE
++		       Version 2, June 1991
++
++ Copyright (C) 1989, 1991 Free Software Foundation, Inc.
++     59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
++ Everyone is permitted to copy and distribute verbatim copies
++ of this license document, but changing it is not allowed.
++
++			    Preamble
++
++  The licenses for most software are designed to take away your
++freedom to share and change it.  By contrast, the GNU General Public
++License is intended to guarantee your freedom to share and change free
++software--to make sure the software is free for all its users.  This
++General Public License applies to most of the Free Software
++Foundation's software and to any other program whose authors commit to
++using it.  (Some other Free Software Foundation software is covered by
++the GNU Library General Public License instead.)  You can apply it to
++your programs, too.
++
++  When we speak of free software, we are referring to freedom, not
++price.  Our General Public Licenses are designed to make sure that you
++have the freedom to distribute copies of free software (and charge for
++this service if you wish), that you receive source code or can get it
++if you want it, that you can change the software or use pieces of it
++in new free programs; and that you know you can do these things.
++
++  To protect your rights, we need to make restrictions that forbid
++anyone to deny you these rights or to ask you to surrender the rights.
++These restrictions translate to certain responsibilities for you if you
++distribute copies of the software, or if you modify it.
++
++  For example, if you distribute copies of such a program, whether
++gratis or for a fee, you must give the recipients all the rights that
++you have.  You must make sure that they, too, receive or can get the
++source code.  And you must show them these terms so they know their
++rights.
++
++  We protect your rights with two steps: (1) copyright the software, and
++(2) offer you this license which gives you legal permission to copy,
++distribute and/or modify the software.
++
++  Also, for each author's protection and ours, we want to make certain
++that everyone understands that there is no warranty for this free
++software.  If the software is modified by someone else and passed on, we
++want its recipients to know that what they have is not the original, so
++that any problems introduced by others will not reflect on the original
++authors' reputations.
++
++  Finally, any free program is threatened constantly by software
++patents.  We wish to avoid the danger that redistributors of a free
++program will individually obtain patent licenses, in effect making the
++program proprietary.  To prevent this, we have made it clear that any
++patent must be licensed for everyone's free use or not licensed at all.
++
++  The precise terms and conditions for copying, distribution and
++modification follow.
++

++		    GNU GENERAL PUBLIC LICENSE
++   TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
++
++  0. This License applies to any program or other work which contains
++a notice placed by the copyright holder saying it may be distributed
++under the terms of this General Public License.  The "Program", below,
++refers to any such program or work, and a "work based on the Program"
++means either the Program or any derivative work under copyright law:
++that is to say, a work containing the Program or a portion of it,
++either verbatim or with modifications and/or translated into another
++language.  (Hereinafter, translation is included without limitation in
++the term "modification".)  Each licensee is addressed as "you".
++
++Activities other than copying, distribution and modification are not
++covered by this License; they are outside its scope.  The act of
++running the Program is not restricted, and the output from the Program
++is covered only if its contents constitute a work based on the
++Program (independent of having been made by running the Program).
++Whether that is true depends on what the Program does.
++
++  1. You may copy and distribute verbatim copies of the Program's
++source code as you receive it, in any medium, provided that you
++conspicuously and appropriately publish on each copy an appropriate
++copyright notice and disclaimer of warranty; keep intact all the
++notices that refer to this License and to the absence of any warranty;
++and give any other recipients of the Program a copy of this License
++along with the Program.
++
++You may charge a fee for the physical act of transferring a copy, and
++you may at your option offer warranty protection in exchange for a fee.
++
++  2. You may modify your copy or copies of the Program or any portion
++of it, thus forming a work based on the Program, and copy and
++distribute such modifications or work under the terms of Section 1
++above, provided that you also meet all of these conditions:
++
++    a) You must cause the modified files to carry prominent notices
++    stating that you changed the files and the date of any change.
++
++    b) You must cause any work that you distribute or publish, that in
++    whole or in part contains or is derived from the Program or any
++    part thereof, to be licensed as a whole at no charge to all third
++    parties under the terms of this License.
++
++    c) If the modified program normally reads commands interactively
++    when run, you must cause it, when started running for such
++    interactive use in the most ordinary way, to print or display an
++    announcement including an appropriate copyright notice and a
++    notice that there is no warranty (or else, saying that you provide
++    a warranty) and that users may redistribute the program under
++    these conditions, and telling the user how to view a copy of this
++    License.  (Exception: if the Program itself is interactive but
++    does not normally print such an announcement, your work based on
++    the Program is not required to print an announcement.)
++

++These requirements apply to the modified work as a whole.  If
++identifiable sections of that work are not derived from the Program,
++and can be reasonably considered independent and separate works in
++themselves, then this License, and its terms, do not apply to those
++sections when you distribute them as separate works.  But when you
++distribute the same sections as part of a whole which is a work based
++on the Program, the distribution of the whole must be on the terms of
++this License, whose permissions for other licensees extend to the
++entire whole, and thus to each and every part regardless of who wrote it.
++
++Thus, it is not the intent of this section to claim rights or contest
++your rights to work written entirely by you; rather, the intent is to
++exercise the right to control the distribution of derivative or
++collective works based on the Program.
++
++In addition, mere aggregation of another work not based on the Program
++with the Program (or with a work based on the Program) on a volume of
++a storage or distribution medium does not bring the other work under
++the scope of this License.
++
++  3. You may copy and distribute the Program (or a work based on it,
++under Section 2) in object code or executable form under the terms of
++Sections 1 and 2 above provided that you also do one of the following:
++
++    a) Accompany it with the complete corresponding machine-readable
++    source code, which must be distributed under the terms of Sections
++    1 and 2 above on a medium customarily used for software interchange; or,
++
++    b) Accompany it with a written offer, valid for at least three
++    years, to give any third party, for a charge no more than your
++    cost of physically performing source distribution, a complete
++    machine-readable copy of the corresponding source code, to be
++    distributed under the terms of Sections 1 and 2 above on a medium
++    customarily used for software interchange; or,
++
++    c) Accompany it with the information you received as to the offer
++    to distribute corresponding source code.  (This alternative is
++    allowed only for noncommercial distribution and only if you
++    received the program in object code or executable form with such
++    an offer, in accord with Subsection b above.)
++
++The source code for a work means the preferred form of the work for
++making modifications to it.  For an executable work, complete source
++code means all the source code for all modules it contains, plus any
++associated interface definition files, plus the scripts used to
++control compilation and installation of the executable.  However, as a
++special exception, the source code distributed need not include
++anything that is normally distributed (in either source or binary
++form) with the major components (compiler, kernel, and so on) of the
++operating system on which the executable runs, unless that component
++itself accompanies the executable.
++
++If distribution of executable or object code is made by offering
++access to copy from a designated place, then offering equivalent
++access to copy the source code from the same place counts as
++distribution of the source code, even though third parties are not
++compelled to copy the source along with the object code.
++

++  4. You may not copy, modify, sublicense, or distribute the Program
++except as expressly provided under this License.  Any attempt
++otherwise to copy, modify, sublicense or distribute the Program is
++void, and will automatically terminate your rights under this License.
++However, parties who have received copies, or rights, from you under
++this License will not have their licenses terminated so long as such
++parties remain in full compliance.
++
++  5. You are not required to accept this License, since you have not
++signed it.  However, nothing else grants you permission to modify or
++distribute the Program or its derivative works.  These actions are
++prohibited by law if you do not accept this License.  Therefore, by
++modifying or distributing the Program (or any work based on the
++Program), you indicate your acceptance of this License to do so, and
++all its terms and conditions for copying, distributing or modifying
++the Program or works based on it.
++
++  6. Each time you redistribute the Program (or any work based on the
++Program), the recipient automatically receives a license from the
++original licensor to copy, distribute or modify the Program subject to
++these terms and conditions.  You may not impose any further
++restrictions on the recipients' exercise of the rights granted herein.
++You are not responsible for enforcing compliance by third parties to
++this License.
++
++  7. If, as a consequence of a court judgment or allegation of patent
++infringement or for any other reason (not limited to patent issues),
++conditions are imposed on you (whether by court order, agreement or
++otherwise) that contradict the conditions of this License, they do not
++excuse you from the conditions of this License.  If you cannot
++distribute so as to satisfy simultaneously your obligations under this
++License and any other pertinent obligations, then as a consequence you
++may not distribute the Program at all.  For example, if a patent
++license would not permit royalty-free redistribution of the Program by
++all those who receive copies directly or indirectly through you, then
++the only way you could satisfy both it and this License would be to
++refrain entirely from distribution of the Program.
++
++If any portion of this section is held invalid or unenforceable under
++any particular circumstance, the balance of the section is intended to
++apply and the section as a whole is intended to apply in other
++circumstances.
++
++It is not the purpose of this section to induce you to infringe any
++patents or other property right claims or to contest validity of any
++such claims; this section has the sole purpose of protecting the
++integrity of the free software distribution system, which is
++implemented by public license practices.  Many people have made
++generous contributions to the wide range of software distributed
++through that system in reliance on consistent application of that
++system; it is up to the author/donor to decide if he or she is willing
++to distribute software through any other system and a licensee cannot
++impose that choice.
++
++This section is intended to make thoroughly clear what is believed to
++be a consequence of the rest of this License.
++

++  8. If the distribution and/or use of the Program is restricted in
++certain countries either by patents or by copyrighted interfaces, the
++original copyright holder who places the Program under this License
++may add an explicit geographical distribution limitation excluding
++those countries, so that distribution is permitted only in or among
++countries not thus excluded.  In such case, this License incorporates
++the limitation as if written in the body of this License.
++
++  9. The Free Software Foundation may publish revised and/or new versions
++of the General Public License from time to time.  Such new versions will
++be similar in spirit to the present version, but may differ in detail to
++address new problems or concerns.
++
++Each version is given a distinguishing version number.  If the Program
++specifies a version number of this License which applies to it and "any
++later version", you have the option of following the terms and conditions
++either of that version or of any later version published by the Free
++Software Foundation.  If the Program does not specify a version number of
++this License, you may choose any version ever published by the Free Software
++Foundation.
++
++  10. If you wish to incorporate parts of the Program into other free
++programs whose distribution conditions are different, write to the author
++to ask for permission.  For software which is copyrighted by the Free
++Software Foundation, write to the Free Software Foundation; we sometimes
++make exceptions for this.  Our decision will be guided by the two goals
++of preserving the free status of all derivatives of our free software and
++of promoting the sharing and reuse of software generally.
++
++			    NO WARRANTY
++
++  11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
++FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW.  EXCEPT WHEN
++OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
++PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
++OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
++MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.  THE ENTIRE RISK AS
++TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU.  SHOULD THE
++PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,
++REPAIR OR CORRECTION.
++
++  12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
++WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
++REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
++INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
++OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED
++TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY
++YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
++PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
++POSSIBILITY OF SUCH DAMAGES.
++
++		     END OF TERMS AND CONDITIONS
+--- /dev/null
++++ b/firmware/lantiq/README
+@@ -0,0 +1,45 @@
++#
++# This program is free software; you can redistribute it and/or
++# modify it under the terms of the GNU General Public License as
++# published by the Free Software Foundation; either version 2 of
++# the License, or (at your option) any later version.
++#
++# This program is distributed in the hope that it will be useful,
++# but WITHOUT ANY WARRANTY; without even the implied warranty of
++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++# GNU General Public License for more details.
++#
++# You should have received a copy of the GNU General Public License
++# along with this program; if not, write to the Free Software
++# Foundation, Inc., 59 Temple Place, Suite 330, Boston,
++# MA 02111-1307 USA
++#
++# (C) Copyright 2007 - 2012
++# Lantiq Deutschland GmbH
++#
++# (C) Copyright 2012
++# Daniel Schwierzeck <daniel.schwierzeck at googlemail.com>
++#
++
++#
++# How to use
++#
++Configure kernel with:
++CONFIG_FW_LOADER=y
++CONFIG_EXTRA_FIRMWARE_DIR="FIRMWARE_DIR"
++CONFIG_EXTRA_FIRMWARE="FIRMWARE_FILES"
++
++where FIRMWARE_DIR should point to this git tree and FIRMWARE_FILES is a list
++of space separated files from list below.
++
++#
++# Firmware files
++#
++
++# GPHY core on Lantiq XWAY VR9 v1.1
++lantiq/vr9_phy11g_a1x.bin
++lantiq/vr9_phy22f_a1x.bin
++
++# GPHY core on Lantiq XWAY VR9 v1.2
++lantiq/vr9_phy11g_a2x.bin
++lantiq/vr9_phy22f_a2x.bin
diff --git a/target/linux/lantiq/patches-4.4/0025-NET-MIPS-lantiq-adds-xrx200-net.patch b/target/linux/lantiq/patches-4.4/0025-NET-MIPS-lantiq-adds-xrx200-net.patch
new file mode 100644
index 0000000..e944980
--- /dev/null
+++ b/target/linux/lantiq/patches-4.4/0025-NET-MIPS-lantiq-adds-xrx200-net.patch
@@ -0,0 +1,3340 @@
+From fb0c9601f4414c39ff68e26b88681bef0bb04954 Mon Sep 17 00:00:00 2001
+From: John Crispin <blogic at openwrt.org>
+Date: Mon, 22 Oct 2012 12:22:23 +0200
+Subject: [PATCH 25/36] NET: MIPS: lantiq: adds xrx200-net
+
+---
+ drivers/net/ethernet/Kconfig            |    8 +-
+ drivers/net/ethernet/Makefile           |    1 +
+ drivers/net/ethernet/lantiq_pce.h       |  163 +++
+ drivers/net/ethernet/lantiq_xrx200.c    | 1798 +++++++++++++++++++++++++++++++
+ drivers/net/ethernet/lantiq_xrx200_sw.h | 1328 +++++++++++++++++++++++
+ 5 files changed, 3297 insertions(+), 1 deletion(-)
+ create mode 100644 drivers/net/ethernet/lantiq_pce.h
+ create mode 100644 drivers/net/ethernet/lantiq_xrx200.c
+ create mode 100644 drivers/net/ethernet/lantiq_xrx200_sw.h
+
+--- a/drivers/net/ethernet/Kconfig
++++ b/drivers/net/ethernet/Kconfig
+@@ -103,7 +103,13 @@ config LANTIQ_ETOP
+ 	tristate "Lantiq SoC ETOP driver"
+ 	depends on SOC_TYPE_XWAY
+ 	---help---
+-	  Support for the MII0 inside the Lantiq SoC
++	  Support for the MII0 inside the Lantiq ADSL SoC
++
++config LANTIQ_XRX200
++	tristate "Lantiq SoC XRX200 driver"
++	depends on SOC_TYPE_XWAY
++	---help---
++	  Support for the MII0 inside the Lantiq VDSL SoC
+ 
+ source "drivers/net/ethernet/marvell/Kconfig"
+ source "drivers/net/ethernet/mellanox/Kconfig"
+--- a/drivers/net/ethernet/Makefile
++++ b/drivers/net/ethernet/Makefile
+@@ -45,6 +45,7 @@ obj-$(CONFIG_NET_VENDOR_XSCALE) += xscal
+ obj-$(CONFIG_JME) += jme.o
+ obj-$(CONFIG_KORINA) += korina.o
+ obj-$(CONFIG_LANTIQ_ETOP) += lantiq_etop.o
++obj-$(CONFIG_LANTIQ_XRX200) += lantiq_xrx200.o
+ obj-$(CONFIG_NET_VENDOR_MARVELL) += marvell/
+ obj-$(CONFIG_NET_VENDOR_MELLANOX) += mellanox/
+ obj-$(CONFIG_NET_VENDOR_MICREL) += micrel/
+--- /dev/null
++++ b/drivers/net/ethernet/lantiq_pce.h
+@@ -0,0 +1,163 @@
++/*
++ *   This program is free software; you can redistribute it and/or modify it
++ *   under the terms of the GNU General Public License version 2 as published
++ *   by the Free Software Foundation.
++ *
++ *   This program is distributed in the hope that it will be useful,
++ *   but WITHOUT ANY WARRANTY; without even the implied warranty of
++ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ *   GNU General Public License for more details.
++ *
++ *   You should have received a copy of the GNU General Public License
++ *   along with this program; if not, write to the Free Software
++ *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
++ *
++ *   Copyright (C) 2010 Lantiq Deutschland GmbH
++ *   Copyright (C) 2012 John Crispin <blogic at openwrt.org>
++ *
++ *   PCE microcode extracted from UGW5.2 switch api
++ */
++
++/* Switch API Micro Code V0.3 */
++enum {
++	OUT_MAC0 = 0,
++	OUT_MAC1,
++	OUT_MAC2,
++	OUT_MAC3,
++	OUT_MAC4,
++	OUT_MAC5,
++	OUT_ETHTYP,
++	OUT_VTAG0,
++	OUT_VTAG1,
++	OUT_ITAG0,
++	OUT_ITAG1,	/*10 */
++	OUT_ITAG2,
++	OUT_ITAG3,
++	OUT_IP0,
++	OUT_IP1,
++	OUT_IP2,
++	OUT_IP3,
++	OUT_SIP0,
++	OUT_SIP1,
++	OUT_SIP2,
++	OUT_SIP3,	/*20*/
++	OUT_SIP4,
++	OUT_SIP5,
++	OUT_SIP6,
++	OUT_SIP7,
++	OUT_DIP0,
++	OUT_DIP1,
++	OUT_DIP2,
++	OUT_DIP3,
++	OUT_DIP4,
++	OUT_DIP5,	/*30*/
++	OUT_DIP6,
++	OUT_DIP7,
++	OUT_SESID,
++	OUT_PROT,
++	OUT_APP0,
++	OUT_APP1,
++	OUT_IGMP0,
++	OUT_IGMP1,
++	OUT_IPOFF,	/*39*/
++	OUT_NONE =  63
++};
++
++/* parser's microcode length type */
++#define INSTR		0
++#define IPV6		1
++#define LENACCU		2
++
++/* parser's microcode flag type */
++enum {
++	FLAG_ITAG =  0,
++	FLAG_VLAN,
++	FLAG_SNAP,
++	FLAG_PPPOE,
++	FLAG_IPV6,
++	FLAG_IPV6FL,
++	FLAG_IPV4,
++	FLAG_IGMP,
++	FLAG_TU,
++	FLAG_HOP,
++	FLAG_NN1,	/*10 */
++	FLAG_NN2,
++	FLAG_END,
++	FLAG_NO,	/*13*/
++};
++
++/* Micro code version V2_11 (extension for parsing IPv6 in PPPoE) */
++#define MC_ENTRY(val, msk, ns, out, len, type, flags, ipv4_len) \
++	{ {val, msk, (ns<<10 | out<<4 | len>>1), (len&1)<<15 | type<<13 | flags<<9 | ipv4_len<<8 }}
++struct pce_microcode {
++	unsigned short val[4];
++/*	unsigned short val_2;
++	unsigned short val_1;
++	unsigned short val_0;*/
++} pce_microcode[] = {
++	/*      value    mask    ns  fields      L  type     flags       ipv4_len */
++	MC_ENTRY(0x88c3, 0xFFFF,  1, OUT_ITAG0,  4, INSTR,   FLAG_ITAG,  0),
++	MC_ENTRY(0x8100, 0xFFFF,  2, OUT_VTAG0,  2, INSTR,   FLAG_VLAN,  0),
++	MC_ENTRY(0x88A8, 0xFFFF,  1, OUT_VTAG0,  2, INSTR,   FLAG_VLAN,  0),
++	MC_ENTRY(0x8100, 0xFFFF,  1, OUT_VTAG0,  2, INSTR,   FLAG_VLAN,  0),
++	MC_ENTRY(0x8864, 0xFFFF, 17, OUT_ETHTYP, 1, INSTR,   FLAG_NO,    0),
++	MC_ENTRY(0x0800, 0xFFFF, 21, OUT_ETHTYP, 1, INSTR,   FLAG_NO,    0),
++	MC_ENTRY(0x86DD, 0xFFFF, 22, OUT_ETHTYP, 1, INSTR,   FLAG_NO,    0),
++	MC_ENTRY(0x8863, 0xFFFF, 16, OUT_ETHTYP, 1, INSTR,   FLAG_NO,    0),
++	MC_ENTRY(0x0000, 0xF800, 10, OUT_NONE,   0, INSTR,   FLAG_NO,    0),
++	MC_ENTRY(0x0000, 0x0000, 38, OUT_ETHTYP, 1, INSTR,   FLAG_NO,    0),
++	MC_ENTRY(0x0600, 0x0600, 38, OUT_ETHTYP, 1, INSTR,   FLAG_NO,    0),
++	MC_ENTRY(0x0000, 0x0000, 12, OUT_NONE,   1, INSTR,   FLAG_NO,    0),
++	MC_ENTRY(0xAAAA, 0xFFFF, 14, OUT_NONE,   1, INSTR,   FLAG_NO,    0),
++	MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE,   0, INSTR,   FLAG_NO,    0),
++	MC_ENTRY(0x0300, 0xFF00, 39, OUT_NONE,   0, INSTR,   FLAG_SNAP,  0),
++	MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE,   0, INSTR,   FLAG_NO,    0),
++	MC_ENTRY(0x0000, 0x0000, 39, OUT_DIP7,   3, INSTR,   FLAG_NO,    0),
++	MC_ENTRY(0x0000, 0x0000, 18, OUT_DIP7,   3, INSTR,   FLAG_PPPOE, 0),
++	MC_ENTRY(0x0021, 0xFFFF, 21, OUT_NONE,   1, INSTR,   FLAG_NO,    0),
++	MC_ENTRY(0x0057, 0xFFFF, 22, OUT_NONE,   1, INSTR,   FLAG_NO,    0),
++	MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE,   0, INSTR,   FLAG_NO,    0),
++	MC_ENTRY(0x4000, 0xF000, 24, OUT_IP0,    4, INSTR,   FLAG_IPV4,  1),
++	MC_ENTRY(0x6000, 0xF000, 27, OUT_IP0,    3, INSTR,   FLAG_IPV6,  0),
++	MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE,   0, INSTR,   FLAG_NO,    0),
++	MC_ENTRY(0x0000, 0x0000, 25, OUT_IP3,    2, INSTR,   FLAG_NO,    0),
++	MC_ENTRY(0x0000, 0x0000, 26, OUT_SIP0,   4, INSTR,   FLAG_NO,    0),
++	MC_ENTRY(0x0000, 0x0000, 38, OUT_NONE,   0, LENACCU, FLAG_NO,    0),
++	MC_ENTRY(0x1100, 0xFF00, 37, OUT_PROT,   1, INSTR,   FLAG_NO,    0),
++	MC_ENTRY(0x0600, 0xFF00, 37, OUT_PROT,   1, INSTR,   FLAG_NO,    0),
++	MC_ENTRY(0x0000, 0xFF00, 33, OUT_IP3,   17, INSTR,   FLAG_HOP,   0),
++	MC_ENTRY(0x2B00, 0xFF00, 33, OUT_IP3,   17, INSTR,   FLAG_NN1,   0),
++	MC_ENTRY(0x3C00, 0xFF00, 33, OUT_IP3,   17, INSTR,   FLAG_NN2,   0),
++	MC_ENTRY(0x0000, 0x0000, 37, OUT_PROT,   1, INSTR,   FLAG_NO,    0),
++	MC_ENTRY(0x0000, 0xFF00, 33, OUT_NONE,   0, IPV6,    FLAG_HOP,   0),
++	MC_ENTRY(0x2B00, 0xFF00, 33, OUT_NONE,   0, IPV6,    FLAG_NN1,   0),
++	MC_ENTRY(0x3C00, 0xFF00, 33, OUT_NONE,   0, IPV6,    FLAG_NN2,   0),
++	MC_ENTRY(0x0000, 0x0000, 38, OUT_PROT,   1, IPV6,    FLAG_NO,    0),
++	MC_ENTRY(0x0000, 0x0000, 38, OUT_SIP0,  16, INSTR,   FLAG_NO,    0),
++	MC_ENTRY(0x0000, 0x0000, 39, OUT_APP0,   4, INSTR,   FLAG_IGMP,  0),
++	MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE,   0, INSTR,   FLAG_END,   0),
++	MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE,   0, INSTR,   FLAG_END,   0),
++	MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE,   0, INSTR,   FLAG_END,   0),
++	MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE,   0, INSTR,   FLAG_END,   0),
++	MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE,   0, INSTR,   FLAG_END,   0),
++	MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE,   0, INSTR,   FLAG_END,   0),
++	MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE,   0, INSTR,   FLAG_END,   0),
++	MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE,   0, INSTR,   FLAG_END,   0),
++	MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE,   0, INSTR,   FLAG_END,   0),
++	MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE,   0, INSTR,   FLAG_END,   0),
++	MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE,   0, INSTR,   FLAG_END,   0),
++	MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE,   0, INSTR,   FLAG_END,   0),
++	MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE,   0, INSTR,   FLAG_END,   0),
++	MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE,   0, INSTR,   FLAG_END,   0),
++	MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE,   0, INSTR,   FLAG_END,   0),
++	MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE,   0, INSTR,   FLAG_END,   0),
++	MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE,   0, INSTR,   FLAG_END,   0),
++	MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE,   0, INSTR,   FLAG_END,   0),
++	MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE,   0, INSTR,   FLAG_END,   0),
++	MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE,   0, INSTR,   FLAG_END,   0),
++	MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE,   0, INSTR,   FLAG_END,   0),
++	MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE,   0, INSTR,   FLAG_END,   0),
++	MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE,   0, INSTR,   FLAG_END,   0),
++	MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE,   0, INSTR,   FLAG_END,   0),
++	MC_ENTRY(0x0000, 0x0000, 39, OUT_NONE,   0, INSTR,   FLAG_END,   0),
++};
+--- /dev/null
++++ b/drivers/net/ethernet/lantiq_xrx200.c
+@@ -0,0 +1,1797 @@
++/*
++ *   This program is free software; you can redistribute it and/or modify it
++ *   under the terms of the GNU General Public License version 2 as published
++ *   by the Free Software Foundation.
++ *
++ *   This program is distributed in the hope that it will be useful,
++ *   but WITHOUT ANY WARRANTY; without even the implied warranty of
++ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ *   GNU General Public License for more details.
++ *
++ *   You should have received a copy of the GNU General Public License
++ *   along with this program; if not, write to the Free Software
++ *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
++ *
++ *   Copyright (C) 2010 Lantiq Deutschland
++ *   Copyright (C) 2012 John Crispin <blogic at openwrt.org>
++ */
++
++#include <linux/switch.h>
++#include <linux/etherdevice.h>
++#include <linux/module.h>
++#include <linux/platform_device.h>
++#include <linux/interrupt.h>
++#include <linux/clk.h>
++#include <asm/delay.h>
++
++#include <linux/of_net.h>
++#include <linux/of_mdio.h>
++#include <linux/of_gpio.h>
++
++#include <xway_dma.h>
++#include <lantiq_soc.h>
++
++#include "lantiq_pce.h"
++#include "lantiq_xrx200_sw.h"
++
++#define SW_POLLING
++#define SW_ROUTING
++/* #define SW_PORTMAP */
++
++#ifdef SW_ROUTING
++  #ifdef SW_PORTMAP
++#define XRX200_MAX_DEV		2
++  #else
++#define XRX200_MAX_DEV		2
++  #endif
++#else
++#define XRX200_MAX_DEV		1
++#endif
++
++#define XRX200_MAX_VLAN		64
++#define XRX200_PCE_ACTVLAN_IDX	0x01
++#define XRX200_PCE_VLANMAP_IDX	0x02
++
++#define XRX200_MAX_PORT		7
++#define XRX200_MAX_DMA		8
++
++#define XRX200_HEADROOM		4
++
++#define XRX200_TX_TIMEOUT	(10 * HZ)
++
++/* port type */
++#define XRX200_PORT_TYPE_PHY	1
++#define XRX200_PORT_TYPE_MAC	2
++
++/* DMA */
++#define XRX200_DMA_DATA_LEN	0x600
++#define XRX200_DMA_IRQ		INT_NUM_IM2_IRL0
++#define XRX200_DMA_RX		0
++#define XRX200_DMA_TX		1
++#define XRX200_DMA_IS_TX(x)	(x%2)
++#define XRX200_DMA_IS_RX(x)	(!XRX200_DMA_IS_TX(x))
++
++/* fetch / store dma */
++#define FDMA_PCTRL0		0x2A00
++#define FDMA_PCTRLx(x)		(FDMA_PCTRL0 + (x * 0x18))
++#define SDMA_PCTRL0		0x2F00
++#define SDMA_PCTRLx(x)		(SDMA_PCTRL0 + (x * 0x18))
++
++/* buffer management */
++#define BM_PCFG0		0x200
++#define BM_PCFGx(x)		(BM_PCFG0 + (x * 8))
++
++/* MDIO */
++#define MDIO_GLOB		0x0000
++#define MDIO_CTRL		0x0020
++#define MDIO_READ		0x0024
++#define MDIO_WRITE		0x0028
++#define MDIO_PHY0		0x0054
++#define MDIO_PHY(x)		(0x0054 - (x * sizeof(unsigned)))
++#define MDIO_CLK_CFG0		0x002C
++#define MDIO_CLK_CFG1		0x0030
++
++#define MDIO_GLOB_ENABLE	0x8000
++#define MDIO_BUSY		BIT(12)
++#define MDIO_RD			BIT(11)
++#define MDIO_WR			BIT(10)
++#define MDIO_MASK		0x1f
++#define MDIO_ADDRSHIFT		5
++#define MDIO1_25MHZ		9
++
++#define MDIO_PHY_LINK_DOWN	0x4000
++#define MDIO_PHY_LINK_UP	0x2000
++
++#define MDIO_PHY_SPEED_M10	0x0000
++#define MDIO_PHY_SPEED_M100	0x0800
++#define MDIO_PHY_SPEED_G1	0x1000
++
++#define MDIO_PHY_FDUP_EN	0x0200
++#define MDIO_PHY_FDUP_DIS	0x0600
++
++#define MDIO_PHY_LINK_MASK	0x6000
++#define MDIO_PHY_SPEED_MASK	0x1800
++#define MDIO_PHY_FDUP_MASK	0x0600
++#define MDIO_PHY_ADDR_MASK	0x001f
++#define MDIO_UPDATE_MASK	MDIO_PHY_ADDR_MASK | MDIO_PHY_LINK_MASK | \
++					MDIO_PHY_SPEED_MASK | MDIO_PHY_FDUP_MASK
++
++/* MII */
++#define MII_CFG(p)		(p * 8)
++
++#define MII_CFG_EN		BIT(14)
++
++#define MII_CFG_MODE_MIIP	0x0
++#define MII_CFG_MODE_MIIM	0x1
++#define MII_CFG_MODE_RMIIP	0x2
++#define MII_CFG_MODE_RMIIM	0x3
++#define MII_CFG_MODE_RGMII	0x4
++#define MII_CFG_MODE_MASK	0xf
++
++#define MII_CFG_RATE_M2P5	0x00
++#define MII_CFG_RATE_M25	0x10
++#define MII_CFG_RATE_M125	0x20
++#define MII_CFG_RATE_M50	0x30
++#define MII_CFG_RATE_AUTO	0x40
++#define MII_CFG_RATE_MASK	0x70
++
++/* cpu port mac */
++#define PMAC_HD_CTL		0x0000
++#define PMAC_RX_IPG		0x0024
++#define PMAC_EWAN		0x002c
++
++#define PMAC_IPG_MASK		0xf
++#define PMAC_HD_CTL_AS		0x0008
++#define PMAC_HD_CTL_AC		0x0004
++#define PMAC_HD_CTL_RC		0x0010
++#define PMAC_HD_CTL_RXSH	0x0040
++#define PMAC_HD_CTL_AST		0x0080
++#define PMAC_HD_CTL_RST		0x0100
++
++/* PCE */
++#define PCE_TBL_KEY(x)		(0x1100 + ((7 - x) * 4))
++#define PCE_TBL_MASK		0x1120
++#define PCE_TBL_VAL(x)		(0x1124 + ((4 - x) * 4))
++#define PCE_TBL_ADDR		0x1138
++#define PCE_TBL_CTRL		0x113c
++#define PCE_PMAP1		0x114c
++#define PCE_PMAP2		0x1150
++#define PCE_PMAP3		0x1154
++#define PCE_GCTRL_REG(x)	(0x1158 + (x * 4))
++#define PCE_PCTRL_REG(p, x)	(0x1200 + (((p * 0xa) + x) * 4))
++
++#define PCE_TBL_BUSY		BIT(15)
++#define PCE_TBL_CFG_ADDR_MASK	0x1f
++#define PCE_TBL_CFG_ADWR	0x20
++#define PCE_TBL_CFG_ADWR_MASK	0x60
++#define PCE_INGRESS		BIT(11)
++
++/* MAC */
++#define MAC_FLEN_REG		(0x2314)
++#define MAC_CTRL_REG(p, x)	(0x240c + (((p * 0xc) + x) * 4))
++
++/* buffer management */
++#define BM_PCFG(p)		(0x200 + (p * 8))
++
++/* special tag in TX path header */
++#define SPID_SHIFT		24
++#define DPID_SHIFT		16
++#define DPID_ENABLE		1
++#define SPID_CPU_PORT		2
++#define PORT_MAP_SEL		BIT(15)
++#define PORT_MAP_EN		BIT(14)
++#define PORT_MAP_SHIFT		1
++#define PORT_MAP_MASK		0x3f
++
++#define SPPID_MASK		0x7
++#define SPPID_SHIFT		4
++
++/* MII regs not yet in linux */
++#define MDIO_DEVAD_NONE		(-1)
++#define ADVERTIZE_MPD		(1 << 10)
++
++struct xrx200_port {
++	u8 num;
++	u8 phy_addr;
++	u16 flags;
++	phy_interface_t phy_if;
++
++	int link;
++	int gpio;
++	enum of_gpio_flags gpio_flags;
++
++	struct phy_device *phydev;
++	struct device_node *phy_node;
++};
++
++struct xrx200_chan {
++	int idx;
++	int refcount;
++	int tx_free;
++
++	struct net_device dummy_dev;
++	struct net_device *devs[XRX200_MAX_DEV];
++
++	struct tasklet_struct tasklet;
++	struct napi_struct napi;
++	struct ltq_dma_channel dma;
++	struct sk_buff *skb[LTQ_DESC_NUM];
++};
++
++struct xrx200_hw {
++	struct clk *clk;
++	struct mii_bus *mii_bus;
++
++	struct xrx200_chan chan[XRX200_MAX_DMA];
++
++	struct net_device *devs[XRX200_MAX_DEV];
++	int num_devs;
++
++	int port_map[XRX200_MAX_PORT];
++	unsigned short wan_map;
++
++	spinlock_t lock;
++
++	struct switch_dev swdev;
++};
++
++struct xrx200_priv {
++	struct net_device_stats stats;
++	int id;
++
++	struct xrx200_port port[XRX200_MAX_PORT];
++	int num_port;
++	bool wan;
++	bool sw;
++	unsigned short port_map;
++	unsigned char mac[6];
++
++	struct xrx200_hw *hw;
++};
++
++static __iomem void *xrx200_switch_membase;
++static __iomem void *xrx200_mii_membase;
++static __iomem void *xrx200_mdio_membase;
++static __iomem void *xrx200_pmac_membase;
++
++#define ltq_switch_r32(x)	ltq_r32(xrx200_switch_membase + (x))
++#define ltq_switch_w32(x, y)	ltq_w32(x, xrx200_switch_membase + (y))
++#define ltq_switch_w32_mask(x, y, z) \
++			ltq_w32_mask(x, y, xrx200_switch_membase + (z))
++
++#define ltq_mdio_r32(x)		ltq_r32(xrx200_mdio_membase + (x))
++#define ltq_mdio_w32(x, y)	ltq_w32(x, xrx200_mdio_membase + (y))
++#define ltq_mdio_w32_mask(x, y, z) \
++			ltq_w32_mask(x, y, xrx200_mdio_membase + (z))
++
++#define ltq_mii_r32(x)		ltq_r32(xrx200_mii_membase + (x))
++#define ltq_mii_w32(x, y)	ltq_w32(x, xrx200_mii_membase + (y))
++#define ltq_mii_w32_mask(x, y, z) \
++			ltq_w32_mask(x, y, xrx200_mii_membase + (z))
++
++#define ltq_pmac_r32(x)		ltq_r32(xrx200_pmac_membase + (x))
++#define ltq_pmac_w32(x, y)	ltq_w32(x, xrx200_pmac_membase + (y))
++#define ltq_pmac_w32_mask(x, y, z) \
++			ltq_w32_mask(x, y, xrx200_pmac_membase + (z))
++
++#define XRX200_GLOBAL_REGATTR(reg) \
++	.id = reg, \
++	.type = SWITCH_TYPE_INT, \
++	.set = xrx200_set_global_attr, \
++	.get = xrx200_get_global_attr
++
++#define XRX200_PORT_REGATTR(reg) \
++	.id = reg, \
++	.type = SWITCH_TYPE_INT, \
++	.set = xrx200_set_port_attr, \
++	.get = xrx200_get_port_attr
++
++static int xrx200sw_read_x(int reg, int x)
++{
++	int value, mask, addr;
++
++	addr = xrx200sw_reg[reg].offset + (xrx200sw_reg[reg].mult * x);
++	value = ltq_switch_r32(addr);
++	mask = (1 << xrx200sw_reg[reg].size) - 1;
++	value = (value >> xrx200sw_reg[reg].shift);
++
++	return (value & mask);
++}
++
++static int xrx200sw_read(int reg)
++{
++	return xrx200sw_read_x(reg, 0);
++}
++
++static void xrx200sw_write_x(int value, int reg, int x)
++{
++	int mask, addr;
++
++	addr = xrx200sw_reg[reg].offset + (xrx200sw_reg[reg].mult * x);
++	mask = (1 << xrx200sw_reg[reg].size) - 1;
++	mask = (mask << xrx200sw_reg[reg].shift);
++	value = (value << xrx200sw_reg[reg].shift) & mask;
++
++	ltq_switch_w32_mask(mask, value, addr);
++}
++
++static void xrx200sw_write(int value, int reg)
++{
++	xrx200sw_write_x(value, reg, 0);
++}
++
++struct xrx200_pce_table_entry {
++	int index;	// PCE_TBL_ADDR.ADDR = pData->table_index
++	int table; 	// PCE_TBL_CTRL.ADDR = pData->table
++	unsigned short key[8];
++	unsigned short val[5];
++	unsigned short mask;
++	unsigned short type;
++	unsigned short valid;
++	unsigned short gmap;
++};
++
++static int xrx200_pce_table_entry_read(struct xrx200_pce_table_entry *tbl)
++{
++	// wait until hardware is ready
++	while (xrx200sw_read(XRX200_PCE_TBL_CTRL_BAS)) {};
++
++	// prepare the table access:
++	// PCE_TBL_ADDR.ADDR = pData->table_index
++	xrx200sw_write(tbl->index, XRX200_PCE_TBL_ADDR_ADDR);
++	// PCE_TBL_CTRL.ADDR = pData->table
++	xrx200sw_write(tbl->table, XRX200_PCE_TBL_CTRL_ADDR);
++
++	//(address-based read)
++	xrx200sw_write(0, XRX200_PCE_TBL_CTRL_OPMOD); // OPMOD_ADRD
++
++	xrx200sw_write(1, XRX200_PCE_TBL_CTRL_BAS); // start access
++
++	// wait until hardware is ready
++	while (xrx200sw_read(XRX200_PCE_TBL_CTRL_BAS)) {};
++
++	// read the keys
++	tbl->key[7] = xrx200sw_read(XRX200_PCE_TBL_KEY_7);
++	tbl->key[6] = xrx200sw_read(XRX200_PCE_TBL_KEY_6);
++	tbl->key[5] = xrx200sw_read(XRX200_PCE_TBL_KEY_5);
++	tbl->key[4] = xrx200sw_read(XRX200_PCE_TBL_KEY_4);
++	tbl->key[3] = xrx200sw_read(XRX200_PCE_TBL_KEY_3);
++	tbl->key[2] = xrx200sw_read(XRX200_PCE_TBL_KEY_2);
++	tbl->key[1] = xrx200sw_read(XRX200_PCE_TBL_KEY_1);
++	tbl->key[0] = xrx200sw_read(XRX200_PCE_TBL_KEY_0);
++
++	// read the values
++	tbl->val[4] = xrx200sw_read(XRX200_PCE_TBL_VAL_4);
++	tbl->val[3] = xrx200sw_read(XRX200_PCE_TBL_VAL_3);
++	tbl->val[2] = xrx200sw_read(XRX200_PCE_TBL_VAL_2);
++	tbl->val[1] = xrx200sw_read(XRX200_PCE_TBL_VAL_1);
++	tbl->val[0] = xrx200sw_read(XRX200_PCE_TBL_VAL_0);
++
++	// read the mask
++	tbl->mask = xrx200sw_read(XRX200_PCE_TBL_MASK_0);
++	// read the type
++	tbl->type = xrx200sw_read(XRX200_PCE_TBL_CTRL_TYPE);
++	// read the valid flag
++	tbl->valid = xrx200sw_read(XRX200_PCE_TBL_CTRL_VLD);
++	// read the group map
++	tbl->gmap = xrx200sw_read(XRX200_PCE_TBL_CTRL_GMAP);
++
++	return 0;
++}
++
++static int xrx200_pce_table_entry_write(struct xrx200_pce_table_entry *tbl)
++{
++	// wait until hardware is ready
++	while (xrx200sw_read(XRX200_PCE_TBL_CTRL_BAS)) {};
++
++	// prepare the table access:
++	// PCE_TBL_ADDR.ADDR = pData->table_index
++	xrx200sw_write(tbl->index, XRX200_PCE_TBL_ADDR_ADDR);
++	// PCE_TBL_CTRL.ADDR = pData->table
++	xrx200sw_write(tbl->table, XRX200_PCE_TBL_CTRL_ADDR);
++
++	//(address-based write)
++	xrx200sw_write(1, XRX200_PCE_TBL_CTRL_OPMOD); // OPMOD_ADRD
++
++	// read the keys
++	xrx200sw_write(tbl->key[7], XRX200_PCE_TBL_KEY_7);
++	xrx200sw_write(tbl->key[6], XRX200_PCE_TBL_KEY_6);
++	xrx200sw_write(tbl->key[5], XRX200_PCE_TBL_KEY_5);
++	xrx200sw_write(tbl->key[4], XRX200_PCE_TBL_KEY_4);
++	xrx200sw_write(tbl->key[3], XRX200_PCE_TBL_KEY_3);
++	xrx200sw_write(tbl->key[2], XRX200_PCE_TBL_KEY_2);
++	xrx200sw_write(tbl->key[1], XRX200_PCE_TBL_KEY_1);
++	xrx200sw_write(tbl->key[0], XRX200_PCE_TBL_KEY_0);
++
++	// read the values
++	xrx200sw_write(tbl->val[4], XRX200_PCE_TBL_VAL_4);
++	xrx200sw_write(tbl->val[3], XRX200_PCE_TBL_VAL_3);
++	xrx200sw_write(tbl->val[2], XRX200_PCE_TBL_VAL_2);
++	xrx200sw_write(tbl->val[1], XRX200_PCE_TBL_VAL_1);
++	xrx200sw_write(tbl->val[0], XRX200_PCE_TBL_VAL_0);
++
++	// read the mask
++	xrx200sw_write(tbl->mask, XRX200_PCE_TBL_MASK_0);
++	// read the type
++	xrx200sw_write(tbl->type, XRX200_PCE_TBL_CTRL_TYPE);
++	// read the valid flag
++	xrx200sw_write(tbl->valid, XRX200_PCE_TBL_CTRL_VLD);
++	// read the group map
++	xrx200sw_write(tbl->gmap, XRX200_PCE_TBL_CTRL_GMAP);
++
++	xrx200sw_write(1, XRX200_PCE_TBL_CTRL_BAS); // start access
++
++	// wait until hardware is ready
++	while (xrx200sw_read(XRX200_PCE_TBL_CTRL_BAS)) {};
++
++	return 0;
++}
++
++static void xrx200sw_fixup_pvids(void)
++{
++	int index, p, portmap, untagged;
++	struct xrx200_pce_table_entry tem;
++	struct xrx200_pce_table_entry tev;
++
++	portmap = 0;
++	for (p = 0; p < XRX200_MAX_PORT; p++)
++		portmap |= BIT(p);
++
++	tem.table = XRX200_PCE_VLANMAP_IDX;
++	tev.table = XRX200_PCE_ACTVLAN_IDX;
++
++	for (index = XRX200_MAX_VLAN; index-- > 0;)
++	{
++		tev.index = index;
++		xrx200_pce_table_entry_read(&tev);
++
++		if (tev.valid == 0)
++			continue;
++
++		tem.index = index;
++		xrx200_pce_table_entry_read(&tem);
++
++		if (tem.val[0] == 0)
++			continue;
++
++		untagged = portmap & (tem.val[1] ^ tem.val[2]);
++
++		for (p = 0; p < XRX200_MAX_PORT; p++)
++			if (untagged & BIT(p))
++			{
++				portmap &= ~BIT(p);
++				xrx200sw_write_x(index, XRX200_PCE_DEFPVID_PVID, p);
++			}
++
++		for (p = 0; p < XRX200_MAX_PORT; p++)
++			if (portmap & BIT(p))
++				xrx200sw_write_x(index, XRX200_PCE_DEFPVID_PVID, p);
++	}
++}
++
++// swconfig interface
++static void xrx200_hw_init(struct xrx200_hw *hw);
++
++// global
++static int xrx200sw_reset_switch(struct switch_dev *dev)
++{
++	struct xrx200_hw *hw = container_of(dev, struct xrx200_hw, swdev);
++
++	xrx200_hw_init(hw);
++
++	return 0;
++}
++
++static int xrx200_set_vlan_mode_enable(struct switch_dev *dev, const struct switch_attr *attr, struct switch_val *val)
++{
++	int p;
++
++	if ((attr->max > 0) && (val->value.i > attr->max))
++		return -EINVAL;
++
++	for (p = 0; p < XRX200_MAX_PORT; p++) {
++		xrx200sw_write_x(val->value.i, XRX200_PCE_VCTRL_VEMR, p);
++		xrx200sw_write_x(val->value.i, XRX200_PCE_VCTRL_VIMR, p);
++	}
++
++	xrx200sw_write(val->value.i, XRX200_PCE_GCTRL_0_VLAN);
++	return 0;
++}
++
++static int xrx200_get_vlan_mode_enable(struct switch_dev *dev, const struct switch_attr *attr, struct switch_val *val)
++{
++	val->value.i = xrx200sw_read(attr->id);
++	return 0;
++}
++
++static int xrx200_set_global_attr(struct switch_dev *dev, const struct switch_attr *attr, struct switch_val *val)
++{
++	if ((attr->max > 0) && (val->value.i > attr->max))
++		return -EINVAL;
++
++	xrx200sw_write(val->value.i, attr->id);
++	return 0;
++}
++
++static int xrx200_get_global_attr(struct switch_dev *dev, const struct switch_attr *attr, struct switch_val *val)
++{
++	val->value.i = xrx200sw_read(attr->id);
++	return 0;
++}
++
++// vlan
++static int xrx200sw_set_vlan_vid(struct switch_dev *dev, const struct switch_attr *attr,
++				 struct switch_val *val)
++{
++	int i;
++	struct xrx200_pce_table_entry tev;
++	struct xrx200_pce_table_entry tem;
++
++	tev.table = XRX200_PCE_ACTVLAN_IDX;
++
++	for (i = 0; i < XRX200_MAX_VLAN; i++)
++	{
++		tev.index = i;
++		xrx200_pce_table_entry_read(&tev);
++		if (tev.key[0] == val->value.i && i != val->port_vlan)
++			return -EINVAL;
++	}
++
++	tev.index = val->port_vlan;
++	xrx200_pce_table_entry_read(&tev);
++	tev.key[0] = val->value.i;
++	tev.valid = val->value.i > 0;
++	xrx200_pce_table_entry_write(&tev);
++
++	tem.table = XRX200_PCE_VLANMAP_IDX;
++	tem.index = val->port_vlan;
++	xrx200_pce_table_entry_read(&tem);
++	tem.val[0] = val->value.i;
++	xrx200_pce_table_entry_write(&tem);
++
++	xrx200sw_fixup_pvids();
++	return 0;
++}
++
++static int xrx200sw_get_vlan_vid(struct switch_dev *dev, const struct switch_attr *attr,
++				 struct switch_val *val)
++{
++	struct xrx200_pce_table_entry te;
++
++	te.table = XRX200_PCE_ACTVLAN_IDX;
++	te.index = val->port_vlan;
++	xrx200_pce_table_entry_read(&te);
++	val->value.i = te.key[0];
++
++	return 0;
++}
++
++static int xrx200sw_set_vlan_ports(struct switch_dev *dev, struct switch_val *val)
++{
++	int i, portmap, tagmap, untagged;
++	struct xrx200_pce_table_entry tem;
++
++	portmap = 0;
++	tagmap = 0;
++	for (i = 0; i < val->len; i++)
++	{
++		struct switch_port *p = &val->value.ports[i];
++
++		portmap |= (1 << p->id);
++		if (p->flags & (1 << SWITCH_PORT_FLAG_TAGGED))
++			tagmap |= (1 << p->id);
++	}
++
++	tem.table = XRX200_PCE_VLANMAP_IDX;
++
++	untagged = portmap ^ tagmap;
++	for (i = 0; i < XRX200_MAX_VLAN; i++)
++	{
++		tem.index = i;
++		xrx200_pce_table_entry_read(&tem);
++
++		if (tem.val[0] == 0)
++			continue;
++
++		if ((untagged & (tem.val[1] ^ tem.val[2])) && (val->port_vlan != i))
++			return -EINVAL;
++	}
++
++	tem.index = val->port_vlan;
++	xrx200_pce_table_entry_read(&tem);
++
++	// auto-enable this vlan if not enabled already
++	if (tem.val[0] == 0)
++	{
++		struct switch_val v;
++		v.port_vlan = val->port_vlan;
++		v.value.i = val->port_vlan;
++		if(xrx200sw_set_vlan_vid(dev, NULL, &v))
++			return -EINVAL;
++
++		//read updated tem
++		tem.index = val->port_vlan;
++		xrx200_pce_table_entry_read(&tem);
++	}
++
++	tem.val[1] = portmap;
++	tem.val[2] = tagmap;
++	xrx200_pce_table_entry_write(&tem);
++
++	xrx200sw_fixup_pvids();
++
++	return 0;
++}
++
++static int xrx200sw_get_vlan_ports(struct switch_dev *dev, struct switch_val *val)
++{
++	int i;
++	unsigned short ports, tags;
++	struct xrx200_pce_table_entry tem;
++
++	tem.table = XRX200_PCE_VLANMAP_IDX;
++	tem.index = val->port_vlan;
++	xrx200_pce_table_entry_read(&tem);
++
++	ports = tem.val[1];
++	tags = tem.val[2];
++
++	for (i = 0; i < XRX200_MAX_PORT; i++) {
++		struct switch_port *p;
++
++		if (!(ports & (1 << i)))
++			continue;
++
++		p = &val->value.ports[val->len++];
++		p->id = i;
++		if (tags & (1 << i))
++			p->flags = (1 << SWITCH_PORT_FLAG_TAGGED);
++		else
++			p->flags = 0;
++	}
++
++	return 0;
++}
++
++static int xrx200sw_set_vlan_enable(struct switch_dev *dev, const struct switch_attr *attr,
++				 struct switch_val *val)
++{
++	struct xrx200_pce_table_entry tev;
++
++	tev.table = XRX200_PCE_ACTVLAN_IDX;
++	tev.index = val->port_vlan;
++	xrx200_pce_table_entry_read(&tev);
++
++	if (tev.key[0] == 0)
++		return -EINVAL;
++
++	tev.valid = val->value.i;
++	xrx200_pce_table_entry_write(&tev);
++
++	xrx200sw_fixup_pvids();
++	return 0;
++}
++
++static int xrx200sw_get_vlan_enable(struct switch_dev *dev, const struct switch_attr *attr,
++				 struct switch_val *val)
++{
++	struct xrx200_pce_table_entry tev;
++
++	tev.table = XRX200_PCE_ACTVLAN_IDX;
++	tev.index = val->port_vlan;
++	xrx200_pce_table_entry_read(&tev);
++	val->value.i = tev.valid;
++
++	return 0;
++}
++
++// port
++static int xrx200sw_get_port_pvid(struct switch_dev *dev, int port, int *val)
++{
++	struct xrx200_pce_table_entry tev;
++
++	if (port >= XRX200_MAX_PORT)
++		return -EINVAL;
++
++	tev.table = XRX200_PCE_ACTVLAN_IDX;
++	tev.index = xrx200sw_read_x(XRX200_PCE_DEFPVID_PVID, port);
++	xrx200_pce_table_entry_read(&tev);
++
++	*val = tev.key[0];
++	return 0;
++}
++
++static int xrx200sw_get_port_link(struct switch_dev *dev,
++				  int port,
++				  struct switch_port_link *link)
++{
++	if (port >= XRX200_MAX_PORT)
++		return -EINVAL;
++
++	link->link = xrx200sw_read_x(XRX200_MAC_PSTAT_LSTAT, port);
++	if (!link->link)
++		return 0;
++
++	link->duplex = xrx200sw_read_x(XRX200_MAC_PSTAT_FDUP, port);
++
++	link->rx_flow = !!(xrx200sw_read_x(XRX200_MAC_CTRL_0_FCON, port) && 0x0010);
++	link->tx_flow = !!(xrx200sw_read_x(XRX200_MAC_CTRL_0_FCON, port) && 0x0020);
++	link->aneg = !(xrx200sw_read_x(XRX200_MAC_CTRL_0_FCON, port));
++
++	link->speed = SWITCH_PORT_SPEED_10;
++	if (xrx200sw_read_x(XRX200_MAC_PSTAT_MBIT, port))
++		link->speed = SWITCH_PORT_SPEED_100;
++	if (xrx200sw_read_x(XRX200_MAC_PSTAT_GBIT, port))
++		link->speed = SWITCH_PORT_SPEED_1000;
++
++	return 0;
++}
++
++static int xrx200_set_port_attr(struct switch_dev *dev, const struct switch_attr *attr, struct switch_val *val)
++{
++	if (val->port_vlan >= XRX200_MAX_PORT)
++		return -EINVAL;
++
++	if ((attr->max > 0) && (val->value.i > attr->max))
++		return -EINVAL;
++
++	xrx200sw_write_x(val->value.i, attr->id, val->port_vlan);
++	return 0;
++}
++
++static int xrx200_get_port_attr(struct switch_dev *dev, const struct switch_attr *attr, struct switch_val *val)
++{
++	if (val->port_vlan >= XRX200_MAX_PORT)
++		return -EINVAL;
++
++	val->value.i = xrx200sw_read_x(attr->id, val->port_vlan);
++	return 0;
++}
++
++// attributes
++static struct switch_attr xrx200sw_globals[] = {
++	{
++		.type = SWITCH_TYPE_INT,
++		.set = xrx200_set_vlan_mode_enable,
++		.get = xrx200_get_vlan_mode_enable,
++		.name = "enable_vlan",
++		.description = "Enable VLAN mode",
++		.max = 1},
++};
++
++static struct switch_attr xrx200sw_port[] = {
++	{
++	XRX200_PORT_REGATTR(XRX200_PCE_VCTRL_UVR),
++	.name = "uvr",
++	.description = "Unknown VLAN Rule",
++	.max = 1,
++	},
++	{
++	XRX200_PORT_REGATTR(XRX200_PCE_VCTRL_VSR),
++	.name = "vsr",
++	.description = "VLAN Security Rule",
++	.max = 1,
++	},
++	{
++	XRX200_PORT_REGATTR(XRX200_PCE_VCTRL_VINR),
++	.name = "vinr",
++	.description = "VLAN Ingress Tag Rule",
++	.max = 2,
++	},
++	{
++	XRX200_PORT_REGATTR(XRX200_PCE_PCTRL_0_TVM),
++	.name = "tvm",
++	.description = "Transparent VLAN Mode",
++	.max = 1,
++	},
++};
++
++static struct switch_attr xrx200sw_vlan[] = {
++	{
++		.type = SWITCH_TYPE_INT,
++		.name = "vid",
++		.description = "VLAN ID (0-4094)",
++		.set = xrx200sw_set_vlan_vid,
++		.get = xrx200sw_get_vlan_vid,
++		.max = 4094,
++	},
++	{
++		.type = SWITCH_TYPE_INT,
++		.name = "enable",
++		.description = "Enable VLAN",
++		.set = xrx200sw_set_vlan_enable,
++		.get = xrx200sw_get_vlan_enable,
++		.max = 1,
++	},
++};
++
++static const struct switch_dev_ops xrx200sw_ops = {
++	.attr_global = {
++		.attr = xrx200sw_globals,
++		.n_attr = ARRAY_SIZE(xrx200sw_globals),
++	},
++	.attr_port = {
++		.attr = xrx200sw_port,
++		.n_attr = ARRAY_SIZE(xrx200sw_port),
++	},
++	.attr_vlan = {
++		.attr = xrx200sw_vlan,
++		.n_attr = ARRAY_SIZE(xrx200sw_vlan),
++	},
++	.get_vlan_ports = xrx200sw_get_vlan_ports,
++	.set_vlan_ports = xrx200sw_set_vlan_ports,
++	.get_port_pvid = xrx200sw_get_port_pvid,
++	.reset_switch = xrx200sw_reset_switch,
++	.get_port_link = xrx200sw_get_port_link,
++//	.get_port_stats = xrx200sw_get_port_stats, //TODO
++};
++
++static int xrx200sw_init(struct xrx200_hw *hw)
++{
++	int netdev_num;
++
++	for (netdev_num = 0; netdev_num < hw->num_devs; netdev_num++)
++	{
++		struct switch_dev *swdev;
++		struct net_device *dev = hw->devs[netdev_num];
++		struct xrx200_priv *priv = netdev_priv(dev);
++		if (!priv->sw)
++			continue;
++
++		swdev = &hw->swdev;
++
++		swdev->name = "Lantiq XRX200 Switch";
++		swdev->vlans = XRX200_MAX_VLAN;
++		swdev->ports = XRX200_MAX_PORT;
++		swdev->cpu_port = 6;
++		swdev->ops = &xrx200sw_ops;
++
++		register_switch(swdev, dev);
++		return 0; // enough switches
++	}
++	return 0;
++}
++
++static int xrx200_open(struct net_device *dev)
++{
++	struct xrx200_priv *priv = netdev_priv(dev);
++	int i;
++
++	for (i = 0; i < XRX200_MAX_DMA; i++) {
++		if (!priv->hw->chan[i].dma.irq)
++			continue;
++		spin_lock_bh(&priv->hw->lock);
++		if (!priv->hw->chan[i].refcount) {
++			if (XRX200_DMA_IS_RX(i))
++				napi_enable(&priv->hw->chan[i].napi);
++			ltq_dma_open(&priv->hw->chan[i].dma);
++		}
++		priv->hw->chan[i].refcount++;
++		spin_unlock_bh(&priv->hw->lock);
++	}
++	for (i = 0; i < priv->num_port; i++)
++		if (priv->port[i].phydev)
++			phy_start(priv->port[i].phydev);
++	netif_start_queue(dev);
++
++	return 0;
++}
++
++static int xrx200_close(struct net_device *dev)
++{
++	struct xrx200_priv *priv = netdev_priv(dev);
++	int i;
++
++	netif_stop_queue(dev);
++
++	for (i = 0; i < priv->num_port; i++)
++		if (priv->port[i].phydev)
++			phy_stop(priv->port[i].phydev);
++
++	for (i = 0; i < XRX200_MAX_DMA; i++) {
++		if (!priv->hw->chan[i].dma.irq)
++			continue;
++		spin_lock_bh(&priv->hw->lock);
++		priv->hw->chan[i].refcount--;
++		if (!priv->hw->chan[i].refcount) {
++			if (XRX200_DMA_IS_RX(i))
++				napi_disable(&priv->hw->chan[i].napi);
++			ltq_dma_close(&priv->hw->chan[XRX200_DMA_RX].dma);
++		}
++		spin_unlock_bh(&priv->hw->lock);
++	}
++
++	return 0;
++}
++
++static int xrx200_alloc_skb(struct xrx200_chan *ch)
++{
++#define DMA_PAD	(NET_IP_ALIGN + NET_SKB_PAD)
++	ch->skb[ch->dma.desc] = dev_alloc_skb(XRX200_DMA_DATA_LEN + DMA_PAD);
++	if (!ch->skb[ch->dma.desc])
++		goto skip;
++
++	skb_reserve(ch->skb[ch->dma.desc], NET_SKB_PAD);
++	ch->dma.desc_base[ch->dma.desc].addr = dma_map_single(NULL,
++		ch->skb[ch->dma.desc]->data, XRX200_DMA_DATA_LEN,
++			DMA_FROM_DEVICE);
++	ch->dma.desc_base[ch->dma.desc].addr =
++		CPHYSADDR(ch->skb[ch->dma.desc]->data);
++	skb_reserve(ch->skb[ch->dma.desc], NET_IP_ALIGN);
++
++skip:
++	ch->dma.desc_base[ch->dma.desc].ctl =
++		LTQ_DMA_OWN | LTQ_DMA_RX_OFFSET(NET_IP_ALIGN) |
++		XRX200_DMA_DATA_LEN;
++
++	return 0;
++}
++
++static void xrx200_hw_receive(struct xrx200_chan *ch, int id)
++{
++	struct net_device *dev = ch->devs[id];
++	struct xrx200_priv *priv = netdev_priv(dev);
++	struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc];
++	struct sk_buff *skb = ch->skb[ch->dma.desc];
++	int len = (desc->ctl & LTQ_DMA_SIZE_MASK);
++	int ret;
++
++	ret = xrx200_alloc_skb(ch);
++
++	ch->dma.desc++;
++	ch->dma.desc %= LTQ_DESC_NUM;
++
++	if (ret) {
++		netdev_err(dev,
++			"failed to allocate new rx buffer\n");
++		return 0;
++	}
++
++	skb_put(skb, len);
++#ifdef SW_ROUTING
++	skb_pull(skb, 8);
++#endif
++	skb->dev = dev;
++	skb->protocol = eth_type_trans(skb, dev);
++	netif_receive_skb(skb);
++	priv->stats.rx_packets++;
++	priv->stats.rx_bytes+=len;
++}
++
++static int xrx200_poll_rx(struct napi_struct *napi, int budget)
++{
++	struct xrx200_chan *ch = container_of(napi,
++				struct xrx200_chan, napi);
++	struct xrx200_priv *priv = netdev_priv(ch->devs[0]);
++	int rx = 0;
++	int complete = 0;
++
++	while ((rx < budget) && !complete) {
++		struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc];
++		if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) == LTQ_DMA_C) {
++#ifdef SW_ROUTING
++			struct sk_buff *skb = ch->skb[ch->dma.desc];
++			u32 *special_tag = (u32*)skb->data;
++			int port = (special_tag[1] >> SPPID_SHIFT) & SPPID_MASK;
++			xrx200_hw_receive(ch, priv->hw->port_map[port]);
++#else
++			xrx200_hw_receive(ch, 0);
++#endif
++			rx++;
++		} else {
++			complete = 1;
++		}
++	}
++
++	if (complete || !rx) {
++		napi_complete(&ch->napi);
++		ltq_dma_enable_irq(&ch->dma);
++	}
++
++	return rx;
++}
++
++static void xrx200_tx_housekeeping(unsigned long ptr)
++{
++	struct xrx200_hw *hw = (struct xrx200_hw *) ptr;
++	struct xrx200_chan *ch = &hw->chan[XRX200_DMA_TX];
++	int pkts = 0;
++	int i;
++
++	spin_lock_bh(&hw->lock);
++	ltq_dma_ack_irq(&ch->dma);
++	while ((ch->dma.desc_base[ch->tx_free].ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) == LTQ_DMA_C) {
++		struct sk_buff *skb = ch->skb[ch->tx_free];
++
++		pkts++;
++		ch->skb[ch->tx_free] = NULL;
++		dev_kfree_skb(skb);
++		memset(&ch->dma.desc_base[ch->tx_free], 0,
++			sizeof(struct ltq_dma_desc));
++		ch->tx_free++;
++		ch->tx_free %= LTQ_DESC_NUM;
++	}
++	ltq_dma_enable_irq(&ch->dma);
++	spin_unlock_bh(&hw->lock);
++
++	if (!pkts)
++		return;
++
++	for (i = 0; i < XRX200_MAX_DEV && ch->devs[i]; i++)
++		netif_wake_queue(ch->devs[i]);
++}
++
++static struct net_device_stats *xrx200_get_stats (struct net_device *dev)
++{
++	struct xrx200_priv *priv = netdev_priv(dev);
++
++	return &priv->stats;
++}
++
++static void xrx200_tx_timeout(struct net_device *dev)
++{
++	struct xrx200_priv *priv = netdev_priv(dev);
++
++	printk(KERN_ERR "%s: transmit timed out, disable the dma channel irq\n", dev->name);
++
++	priv->stats.tx_errors++;
++	netif_wake_queue(dev);
++}
++
++static int xrx200_start_xmit(struct sk_buff *skb, struct net_device *dev)
++{
++	struct xrx200_priv *priv = netdev_priv(dev);
++	struct xrx200_chan *ch = &priv->hw->chan[XRX200_DMA_TX];
++	struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc];
++	u32 byte_offset;
++	int len;
++#ifdef SW_ROUTING
++  #ifdef SW_PORTMAP
++	u32 special_tag = (SPID_CPU_PORT << SPID_SHIFT) | PORT_MAP_SEL | PORT_MAP_EN | DPID_ENABLE;
++  #else
++	u32 special_tag = (SPID_CPU_PORT << SPID_SHIFT) | DPID_ENABLE;
++  #endif
++#endif
++
++	skb->dev = dev;
++	len = skb->len < ETH_ZLEN ? ETH_ZLEN : skb->len;
++
++	if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) || ch->skb[ch->dma.desc]) {
++		netdev_err(dev, "tx ring full\n");
++		netif_stop_queue(dev);
++		return NETDEV_TX_BUSY;
++	}
++#ifdef SW_ROUTING
++  #ifdef SW_PORTMAP
++	special_tag |= priv->port_map << PORT_MAP_SHIFT;
++  #else
++	if(priv->id)
++		special_tag |= (1 << DPID_SHIFT);
++  #endif
++	if(skb_headroom(skb) < 4) {
++		struct sk_buff *tmp = skb_realloc_headroom(skb, 4);
++		dev_kfree_skb_any(skb);
++		skb = tmp;
++	}
++	skb_push(skb, 4);
++	memcpy(skb->data, &special_tag, sizeof(u32));
++	len += 4;
++#endif
++
++	/* dma needs to start on a 16 byte aligned address */
++	byte_offset = CPHYSADDR(skb->data) % 16;
++	ch->skb[ch->dma.desc] = skb;
++
++	dev->trans_start = jiffies;
++
++	spin_lock_bh(&priv->hw->lock);
++	desc->addr = ((unsigned int) dma_map_single(NULL, skb->data, len,
++						DMA_TO_DEVICE)) - byte_offset;
++	wmb();
++	desc->ctl = LTQ_DMA_OWN | LTQ_DMA_SOP | LTQ_DMA_EOP |
++		LTQ_DMA_TX_OFFSET(byte_offset) | (len & LTQ_DMA_SIZE_MASK);
++	ch->dma.desc++;
++	ch->dma.desc %= LTQ_DESC_NUM;
++	if (ch->dma.desc == ch->tx_free)
++		netif_stop_queue(dev);
++
++	spin_unlock_bh(&priv->hw->lock);
++
++	priv->stats.tx_packets++;
++	priv->stats.tx_bytes+=len;
++
++	return NETDEV_TX_OK;
++}
++
++static irqreturn_t xrx200_dma_irq(int irq, void *priv)
++{
++	struct xrx200_hw *hw = priv;
++	int chnr = irq - XRX200_DMA_IRQ;
++	struct xrx200_chan *ch = &hw->chan[chnr];
++
++	ltq_dma_disable_irq(&ch->dma);
++	ltq_dma_ack_irq(&ch->dma);
++
++	if (chnr % 2)
++		tasklet_schedule(&ch->tasklet);
++	else
++		napi_schedule(&ch->napi);
++
++	return IRQ_HANDLED;
++}
++
++static int xrx200_dma_init(struct xrx200_hw *hw)
++{
++	int i, err = 0;
++
++	ltq_dma_init_port(DMA_PORT_ETOP);
++
++	for (i = 0; i < 8 && !err; i++) {
++		int irq = XRX200_DMA_IRQ + i;
++		struct xrx200_chan *ch = &hw->chan[i];
++
++		ch->idx = ch->dma.nr = i;
++
++		if (i == XRX200_DMA_TX) {
++			ltq_dma_alloc_tx(&ch->dma);
++			err = request_irq(irq, xrx200_dma_irq, 0, "vrx200_tx", hw);
++		} else if (i == XRX200_DMA_RX) {
++			ltq_dma_alloc_rx(&ch->dma);
++			for (ch->dma.desc = 0; ch->dma.desc < LTQ_DESC_NUM;
++					ch->dma.desc++)
++				if (xrx200_alloc_skb(ch))
++					err = -ENOMEM;
++			ch->dma.desc = 0;
++			err = request_irq(irq, xrx200_dma_irq, 0, "vrx200_rx", hw);
++		} else
++			continue;
++
++		if (!err)
++			ch->dma.irq = irq;
++	}
++
++	return err;
++}
++
++#ifdef SW_POLLING
++static void xrx200_gmac_update(struct xrx200_port *port)
++{
++	u16 phyaddr = port->phydev->addr & MDIO_PHY_ADDR_MASK;
++	u16 miimode = ltq_mii_r32(MII_CFG(port->num)) & MII_CFG_MODE_MASK;
++	u16 miirate = 0;
++
++	switch (port->phydev->speed) {
++	case SPEED_1000:
++		phyaddr |= MDIO_PHY_SPEED_G1;
++		miirate = MII_CFG_RATE_M125;
++		break;
++
++	case SPEED_100:
++		phyaddr |= MDIO_PHY_SPEED_M100;
++		switch (miimode) {
++		case MII_CFG_MODE_RMIIM:
++		case MII_CFG_MODE_RMIIP:
++			miirate = MII_CFG_RATE_M50;
++			break;
++		default:
++			miirate = MII_CFG_RATE_M25;
++			break;
++		}
++		break;
++
++	default:
++		phyaddr |= MDIO_PHY_SPEED_M10;
++		miirate = MII_CFG_RATE_M2P5;
++		break;
++	}
++
++	if (port->phydev->link)
++		phyaddr |= MDIO_PHY_LINK_UP;
++	else
++		phyaddr |= MDIO_PHY_LINK_DOWN;
++
++	if (port->phydev->duplex == DUPLEX_FULL)
++		phyaddr |= MDIO_PHY_FDUP_EN;
++	else
++		phyaddr |= MDIO_PHY_FDUP_DIS;
++
++	ltq_mdio_w32_mask(MDIO_UPDATE_MASK, phyaddr, MDIO_PHY(port->num));
++	ltq_mii_w32_mask(MII_CFG_RATE_MASK, miirate, MII_CFG(port->num));
++	udelay(1);
++}
++#else
++static void xrx200_gmac_update(struct xrx200_port *port)
++{
++
++}
++#endif
++
++static void xrx200_mdio_link(struct net_device *dev)
++{
++	struct xrx200_priv *priv = netdev_priv(dev);
++	int i;
++
++	for (i = 0; i < priv->num_port; i++) {
++		if (!priv->port[i].phydev)
++			continue;
++
++		if (priv->port[i].link != priv->port[i].phydev->link) {
++			xrx200_gmac_update(&priv->port[i]);
++			priv->port[i].link = priv->port[i].phydev->link;
++			netdev_info(dev, "port %d %s link\n",
++				priv->port[i].num,
++				(priv->port[i].link)?("got"):("lost"));
++		}
++	}
++}
++
++static inline int xrx200_mdio_poll(struct mii_bus *bus)
++{
++	unsigned cnt = 10000;
++
++	while (likely(cnt--)) {
++		unsigned ctrl = ltq_mdio_r32(MDIO_CTRL);
++		if ((ctrl & MDIO_BUSY) == 0)
++			return 0;
++	}
++
++	return 1;
++}
++
++static int xrx200_mdio_wr(struct mii_bus *bus, int addr, int reg, u16 val)
++{
++	if (xrx200_mdio_poll(bus))
++		return 1;
++
++	ltq_mdio_w32(val, MDIO_WRITE);
++	ltq_mdio_w32(MDIO_BUSY | MDIO_WR |
++		((addr & MDIO_MASK) << MDIO_ADDRSHIFT) |
++		(reg & MDIO_MASK),
++		MDIO_CTRL);
++
++	return 0;
++}
++
++static int xrx200_mdio_rd(struct mii_bus *bus, int addr, int reg)
++{
++	if (xrx200_mdio_poll(bus))
++		return -1;
++
++	ltq_mdio_w32(MDIO_BUSY | MDIO_RD |
++		((addr & MDIO_MASK) << MDIO_ADDRSHIFT) |
++		(reg & MDIO_MASK),
++		MDIO_CTRL);
++
++	if (xrx200_mdio_poll(bus))
++		return -1;
++
++	return ltq_mdio_r32(MDIO_READ);
++}
++
++static int xrx200_mdio_probe(struct net_device *dev, struct xrx200_port *port)
++{
++	struct xrx200_priv *priv = netdev_priv(dev);
++	struct phy_device *phydev = NULL;
++	unsigned val;
++
++	phydev = priv->hw->mii_bus->phy_map[port->phy_addr];
++
++	if (!phydev) {
++		netdev_err(dev, "no PHY found\n");
++		return -ENODEV;
++	}
++
++	phydev = phy_connect(dev, dev_name(&phydev->dev), &xrx200_mdio_link,
++				port->phy_if);
++
++	if (IS_ERR(phydev)) {
++		netdev_err(dev, "Could not attach to PHY\n");
++		return PTR_ERR(phydev);
++	}
++
++	phydev->supported &= (SUPPORTED_10baseT_Half
++			| SUPPORTED_10baseT_Full
++			| SUPPORTED_100baseT_Half
++			| SUPPORTED_100baseT_Full
++			| SUPPORTED_1000baseT_Half
++			| SUPPORTED_1000baseT_Full
++			| SUPPORTED_Autoneg
++			| SUPPORTED_MII
++			| SUPPORTED_TP);
++	phydev->advertising = phydev->supported;
++	port->phydev = phydev;
++
++	pr_info("%s: attached PHY [%s] (phy_addr=%s, irq=%d)\n",
++		dev->name, phydev->drv->name,
++		dev_name(&phydev->dev), phydev->irq);
++
++#ifdef SW_POLLING
++	phy_read_status(phydev);
++
++	val = xrx200_mdio_rd(priv->hw->mii_bus, MDIO_DEVAD_NONE, MII_CTRL1000);
++	val |= ADVERTIZE_MPD;
++	xrx200_mdio_wr(priv->hw->mii_bus, MDIO_DEVAD_NONE, MII_CTRL1000, val);
++	xrx200_mdio_wr(priv->hw->mii_bus, 0, 0, 0x1040);
++
++	phy_start_aneg(phydev);
++#endif
++	return 0;
++}
++
++static void xrx200_port_config(struct xrx200_priv *priv,
++		const struct xrx200_port *port)
++{
++	u16 miimode = 0;
++
++	switch (port->num) {
++	case 0: /* xMII0 */
++	case 1: /* xMII1 */
++		switch (port->phy_if) {
++		case PHY_INTERFACE_MODE_MII:
++			if (port->flags & XRX200_PORT_TYPE_PHY)
++				/* MII MAC mode, connected to external PHY */
++				miimode = MII_CFG_MODE_MIIM;
++			else
++				/* MII PHY mode, connected to external MAC */
++				miimode = MII_CFG_MODE_MIIP;
++			break;
++		case PHY_INTERFACE_MODE_RMII:
++			if (port->flags & XRX200_PORT_TYPE_PHY)
++				/* RMII MAC mode, connected to external PHY */
++				miimode = MII_CFG_MODE_RMIIM;
++			else
++				/* RMII PHY mode, connected to external MAC */
++				miimode = MII_CFG_MODE_RMIIP;
++			break;
++		case PHY_INTERFACE_MODE_RGMII:
++			/* RGMII MAC mode, connected to external PHY */
++			miimode = MII_CFG_MODE_RGMII;
++			break;
++		default:
++			break;
++		}
++		break;
++	case 2: /* internal GPHY0 */
++	case 3: /* internal GPHY0 */
++	case 4: /* internal GPHY1 */
++		switch (port->phy_if) {
++			case PHY_INTERFACE_MODE_MII:
++			case PHY_INTERFACE_MODE_GMII:
++				/* MII MAC mode, connected to internal GPHY */
++				miimode = MII_CFG_MODE_MIIM;
++				break;
++			default:
++				break;
++		}
++		break;
++	case 5: /* internal GPHY1 or xMII2 */
++		switch (port->phy_if) {
++		case PHY_INTERFACE_MODE_MII:
++			/* MII MAC mode, connected to internal GPHY */
++			miimode = MII_CFG_MODE_MIIM;
++			break;
++		case PHY_INTERFACE_MODE_RGMII:
++			/* RGMII MAC mode, connected to external PHY */
++			miimode = MII_CFG_MODE_RGMII;
++			break;
++		default:
++			break;
++		}
++		break;
++	default:
++		break;
++	}
++
++	ltq_mii_w32_mask(MII_CFG_MODE_MASK, miimode | MII_CFG_EN,
++		MII_CFG(port->num));
++}
++
++static int xrx200_init(struct net_device *dev)
++{
++	struct xrx200_priv *priv = netdev_priv(dev);
++	struct sockaddr mac;
++	int err, i;
++
++#ifndef SW_POLLING
++	unsigned int reg = 0;
++
++	/* enable auto polling */
++	for (i = 0; i < priv->num_port; i++)
++		reg |= BIT(priv->port[i].num);
++	ltq_mdio_w32(reg, MDIO_CLK_CFG0);
++	ltq_mdio_w32(MDIO1_25MHZ, MDIO_CLK_CFG1);
++#endif
++
++	/* setup each port */
++	for (i = 0; i < priv->num_port; i++)
++		xrx200_port_config(priv, &priv->port[i]);
++
++	memcpy(&mac.sa_data, priv->mac, ETH_ALEN);
++	if (!is_valid_ether_addr(mac.sa_data)) {
++		pr_warn("net-xrx200: invalid MAC, using random\n");
++		eth_random_addr(mac.sa_data);
++		dev->addr_assign_type |= NET_ADDR_RANDOM;
++	}
++
++	err = eth_mac_addr(dev, &mac);
++	if (err)
++		goto err_netdev;
++
++	for (i = 0; i < priv->num_port; i++)
++		if (xrx200_mdio_probe(dev, &priv->port[i]))
++			pr_warn("xrx200-mdio: probing phy of port %d failed\n",
++					 priv->port[i].num);
++
++	return 0;
++
++err_netdev:
++	unregister_netdev(dev);
++	free_netdev(dev);
++	return err;
++}
++
++static void xrx200_pci_microcode(void)
++{
++	int i;
++
++	ltq_switch_w32_mask(PCE_TBL_CFG_ADDR_MASK | PCE_TBL_CFG_ADWR_MASK,
++		PCE_TBL_CFG_ADWR, PCE_TBL_CTRL);
++	ltq_switch_w32(0, PCE_TBL_MASK);
++
++	for (i = 0; i < ARRAY_SIZE(pce_microcode); i++) {
++		ltq_switch_w32(i, PCE_TBL_ADDR);
++		ltq_switch_w32(pce_microcode[i].val[3], PCE_TBL_VAL(0));
++		ltq_switch_w32(pce_microcode[i].val[2], PCE_TBL_VAL(1));
++		ltq_switch_w32(pce_microcode[i].val[1], PCE_TBL_VAL(2));
++		ltq_switch_w32(pce_microcode[i].val[0], PCE_TBL_VAL(3));
++
++		// start the table access:
++		ltq_switch_w32_mask(0, PCE_TBL_BUSY, PCE_TBL_CTRL);
++		while (ltq_switch_r32(PCE_TBL_CTRL) & PCE_TBL_BUSY);
++	}
++
++	/* tell the switch that the microcode is loaded */
++	ltq_switch_w32_mask(0, BIT(3), PCE_GCTRL_REG(0));
++}
++
++static void xrx200_hw_init(struct xrx200_hw *hw)
++{
++	int i;
++
++	/* enable clock gate */
++	clk_enable(hw->clk);
++
++	ltq_switch_w32(1, 0);
++	mdelay(100);
++	ltq_switch_w32(0, 0);
++	/*
++	 * TODO: we should really disbale all phys/miis here and explicitly
++	 * enable them in the device secific init function
++	 */
++
++	/* disable port fetch/store dma */
++	for (i = 0; i < 7; i++ ) {
++		ltq_switch_w32(0, FDMA_PCTRLx(i));
++		ltq_switch_w32(0, SDMA_PCTRLx(i));
++	}
++
++	/* enable Switch */
++	ltq_mdio_w32_mask(0, MDIO_GLOB_ENABLE, MDIO_GLOB);
++
++	/* load the pce microcode */
++	xrx200_pci_microcode();
++
++	/* Default unknown Broadcat/Multicast/Unicast port maps */
++	ltq_switch_w32(0x7f, PCE_PMAP1);
++	ltq_switch_w32(0x7f, PCE_PMAP2);
++	ltq_switch_w32(0x7f, PCE_PMAP3);
++
++	/* RMON Counter Enable for all physical ports */
++	for (i = 0; i < 7; i++)
++		ltq_switch_w32(0x1, BM_PCFG(i));
++
++	/* disable auto polling */
++	ltq_mdio_w32(0x0, MDIO_CLK_CFG0);
++
++	/* enable port statistic counters */
++	for (i = 0; i < 7; i++)
++		ltq_switch_w32(0x1, BM_PCFGx(i));
++
++	/* set IPG to 12 */
++	ltq_pmac_w32_mask(PMAC_IPG_MASK, 0xb, PMAC_RX_IPG);
++
++#ifdef SW_ROUTING
++	/* enable status header, enable CRC */
++	ltq_pmac_w32_mask(0,
++		PMAC_HD_CTL_RST | PMAC_HD_CTL_AST | PMAC_HD_CTL_RXSH | PMAC_HD_CTL_AS | PMAC_HD_CTL_AC | PMAC_HD_CTL_RC,
++		PMAC_HD_CTL);
++#else
++	/* disable status header, enable CRC */
++	ltq_pmac_w32_mask(PMAC_HD_CTL_AST | PMAC_HD_CTL_RXSH | PMAC_HD_CTL_AS,
++		PMAC_HD_CTL_AC | PMAC_HD_CTL_RC,
++		PMAC_HD_CTL);
++#endif
++
++	/* enable port fetch/store dma & VLAN Modification */
++	for (i = 0; i < 7; i++ ) {
++		ltq_switch_w32_mask(0, 0x19, FDMA_PCTRLx(i));
++		ltq_switch_w32_mask(0, 0x01, SDMA_PCTRLx(i));
++		ltq_switch_w32_mask(0, PCE_INGRESS, PCE_PCTRL_REG(i, 0));
++	}
++
++	/* enable special tag insertion on cpu port */
++	ltq_switch_w32_mask(0, 0x02, FDMA_PCTRLx(6));
++	ltq_switch_w32_mask(0, PCE_INGRESS, PCE_PCTRL_REG(6, 0));
++	ltq_switch_w32_mask(0, BIT(3), MAC_CTRL_REG(6, 2));
++	ltq_switch_w32(1518 + 8 + 4 * 2, MAC_FLEN_REG);
++}
++
++static void xrx200_hw_cleanup(struct xrx200_hw *hw)
++{
++	int i;
++
++	/* disable the switch */
++	ltq_mdio_w32_mask(MDIO_GLOB_ENABLE, 0, MDIO_GLOB);
++
++	/* free the channels and IRQs */
++	for (i = 0; i < 2; i++) {
++		ltq_dma_free(&hw->chan[i].dma);
++		if (hw->chan[i].dma.irq)
++			free_irq(hw->chan[i].dma.irq, hw);
++	}
++
++	/* free the allocated RX ring */
++	for (i = 0; i < LTQ_DESC_NUM; i++)
++		dev_kfree_skb_any(hw->chan[XRX200_DMA_RX].skb[i]);
++
++	/* clear the mdio bus */
++	mdiobus_unregister(hw->mii_bus);
++	mdiobus_free(hw->mii_bus);
++
++	/* release the clock */
++	clk_disable(hw->clk);
++	clk_put(hw->clk);
++}
++
++static int xrx200_of_mdio(struct xrx200_hw *hw, struct device_node *np)
++{
++	hw->mii_bus = mdiobus_alloc();
++	if (!hw->mii_bus)
++		return -ENOMEM;
++
++	hw->mii_bus->read = xrx200_mdio_rd;
++	hw->mii_bus->write = xrx200_mdio_wr;
++	hw->mii_bus->name = "lantiq,xrx200-mdio";
++	snprintf(hw->mii_bus->id, MII_BUS_ID_SIZE, "%x", 0);
++
++	if (of_mdiobus_register(hw->mii_bus, np)) {
++		mdiobus_free(hw->mii_bus);
++		return -ENXIO;
++	}
++
++	return 0;
++}
++
++static void xrx200_of_port(struct xrx200_priv *priv, struct device_node *port)
++{
++	const __be32 *addr, *id = of_get_property(port, "reg", NULL);
++	struct xrx200_port *p = &priv->port[priv->num_port];
++
++	if (!id)
++		return;
++
++	memset(p, 0, sizeof(struct xrx200_port));
++	p->phy_node = of_parse_phandle(port, "phy-handle", 0);
++	addr = of_get_property(p->phy_node, "reg", NULL);
++	if (!addr)
++		return;
++
++	p->num = *id;
++	p->phy_addr = *addr;
++	p->phy_if = of_get_phy_mode(port);
++	if (p->phy_addr > 0x10)
++		p->flags = XRX200_PORT_TYPE_MAC;
++	else
++		p->flags = XRX200_PORT_TYPE_PHY;
++	priv->num_port++;
++
++	p->gpio = of_get_gpio_flags(port, 0, &p->gpio_flags);
++	if (gpio_is_valid(p->gpio))
++		if (!gpio_request(p->gpio, "phy-reset")) {
++			gpio_direction_output(p->gpio,
++				(p->gpio_flags & OF_GPIO_ACTIVE_LOW) ? (1) : (0));
++			udelay(100);
++			gpio_set_value(p->gpio, (p->gpio_flags & OF_GPIO_ACTIVE_LOW) ? (0) : (1));
++		}
++	/* is this port a wan port ? */
++	if (priv->wan)
++		priv->hw->wan_map |= BIT(p->num);
++
++	priv->port_map |= BIT(p->num);
++
++	/* store the port id in the hw struct so we can map ports -> devices */
++	priv->hw->port_map[p->num] = priv->hw->num_devs;
++}
++
++static const struct net_device_ops xrx200_netdev_ops = {
++	.ndo_init		= xrx200_init,
++	.ndo_open		= xrx200_open,
++	.ndo_stop		= xrx200_close,
++	.ndo_start_xmit		= xrx200_start_xmit,
++	.ndo_set_mac_address	= eth_mac_addr,
++	.ndo_validate_addr	= eth_validate_addr,
++	.ndo_change_mtu		= eth_change_mtu,
++	.ndo_get_stats		= xrx200_get_stats,
++	.ndo_tx_timeout		= xrx200_tx_timeout,
++};
++
++static void xrx200_of_iface(struct xrx200_hw *hw, struct device_node *iface)
++{
++	struct xrx200_priv *priv;
++	struct device_node *port;
++	const __be32 *wan;
++
++	/* alloc the network device */
++	hw->devs[hw->num_devs] = alloc_etherdev(sizeof(struct xrx200_priv));
++	if (!hw->devs[hw->num_devs])
++		return;
++
++	/* setup the network device */
++	strcpy(hw->devs[hw->num_devs]->name, "eth%d");
++	hw->devs[hw->num_devs]->netdev_ops = &xrx200_netdev_ops;
++	hw->devs[hw->num_devs]->watchdog_timeo = XRX200_TX_TIMEOUT;
++	hw->devs[hw->num_devs]->needed_headroom = XRX200_HEADROOM;
++
++	/* setup our private data */
++	priv = netdev_priv(hw->devs[hw->num_devs]);
++	priv->hw = hw;
++	of_get_mac_address_mtd(iface, priv->mac);
++	priv->id = hw->num_devs;
++
++	/* is this the wan interface ? */
++	wan = of_get_property(iface, "lantiq,wan", NULL);
++	if (wan && (*wan == 1))
++		priv->wan = 1;
++
++	/* should the switch be enabled on this interface ? */
++	if (of_find_property(iface, "lantiq,switch", NULL))
++		priv->sw = 1;
++
++	/* load the ports that are part of the interface */
++	for_each_child_of_node(iface, port)
++		if (of_device_is_compatible(port, "lantiq,xrx200-pdi-port"))
++			xrx200_of_port(priv, port);
++
++	/* register the actual device */
++	if (!register_netdev(hw->devs[hw->num_devs]))
++		hw->num_devs++;
++}
++
++static struct xrx200_hw xrx200_hw;
++
++static int xrx200_probe(struct platform_device *pdev)
++{
++	struct resource *res[4];
++	struct device_node *mdio_np, *iface_np;
++	int i;
++
++	/* load the memory ranges */
++	for (i = 0; i < 4; i++) {
++		res[i] = platform_get_resource(pdev, IORESOURCE_MEM, i);
++		if (!res[i]) {
++			dev_err(&pdev->dev, "failed to get resources\n");
++			return -ENOENT;
++		}
++	}
++	xrx200_switch_membase = devm_ioremap_resource(&pdev->dev, res[0]);
++	xrx200_mdio_membase = devm_ioremap_resource(&pdev->dev, res[1]);
++	xrx200_mii_membase = devm_ioremap_resource(&pdev->dev, res[2]);
++	xrx200_pmac_membase = devm_ioremap_resource(&pdev->dev, res[3]);
++	if (!xrx200_switch_membase || !xrx200_mdio_membase ||
++			!xrx200_mii_membase || !xrx200_pmac_membase) {
++		dev_err(&pdev->dev, "failed to request and remap io ranges \n");
++		return -ENOMEM;
++	}
++
++	/* get the clock */
++	xrx200_hw.clk = clk_get(&pdev->dev, NULL);
++	if (IS_ERR(xrx200_hw.clk)) {
++		dev_err(&pdev->dev, "failed to get clock\n");
++		return PTR_ERR(xrx200_hw.clk);
++	}
++
++	/* bring up the dma engine and IP core */
++	spin_lock_init(&xrx200_hw.lock);
++	xrx200_dma_init(&xrx200_hw);
++	xrx200_hw_init(&xrx200_hw);
++	tasklet_init(&xrx200_hw.chan[XRX200_DMA_TX].tasklet, xrx200_tx_housekeeping, (u32) &xrx200_hw);
++
++	/* bring up the mdio bus */
++	mdio_np = of_find_compatible_node(pdev->dev.of_node, NULL,
++				"lantiq,xrx200-mdio");
++	if (mdio_np)
++		if (xrx200_of_mdio(&xrx200_hw, mdio_np))
++			dev_err(&pdev->dev, "mdio probe failed\n");
++
++	/* load the interfaces */
++	for_each_child_of_node(pdev->dev.of_node, iface_np)
++		if (of_device_is_compatible(iface_np, "lantiq,xrx200-pdi")) {
++			if (xrx200_hw.num_devs < XRX200_MAX_DEV)
++				xrx200_of_iface(&xrx200_hw, iface_np);
++			else
++				dev_err(&pdev->dev,
++					"only %d interfaces allowed\n",
++					XRX200_MAX_DEV);
++		}
++
++	if (!xrx200_hw.num_devs) {
++		xrx200_hw_cleanup(&xrx200_hw);
++		dev_err(&pdev->dev, "failed to load interfaces\n");
++		return -ENOENT;
++	}
++
++	xrx200sw_init(&xrx200_hw);
++
++	/* set wan port mask */
++	ltq_pmac_w32(xrx200_hw.wan_map, PMAC_EWAN);
++
++	for (i = 0; i < xrx200_hw.num_devs; i++) {
++		xrx200_hw.chan[XRX200_DMA_RX].devs[i] = xrx200_hw.devs[i];
++		xrx200_hw.chan[XRX200_DMA_TX].devs[i] = xrx200_hw.devs[i];
++	}
++
++	/* setup NAPI */
++	init_dummy_netdev(&xrx200_hw.chan[XRX200_DMA_RX].dummy_dev);
++	netif_napi_add(&xrx200_hw.chan[XRX200_DMA_RX].dummy_dev,
++			&xrx200_hw.chan[XRX200_DMA_RX].napi, xrx200_poll_rx, 32);
++
++	platform_set_drvdata(pdev, &xrx200_hw);
++
++	return 0;
++}
++
++static int xrx200_remove(struct platform_device *pdev)
++{
++	struct net_device *dev = platform_get_drvdata(pdev);
++	struct xrx200_priv *priv;
++
++	if (!dev)
++		return 0;
++
++	priv = netdev_priv(dev);
++
++	/* free stack related instances */
++	netif_stop_queue(dev);
++	netif_napi_del(&xrx200_hw.chan[XRX200_DMA_RX].napi);
++
++	/* shut down hardware */
++	xrx200_hw_cleanup(&xrx200_hw);
++
++	/* remove the actual device */
++	unregister_netdev(dev);
++	free_netdev(dev);
++
++	return 0;
++}
++
++static const struct of_device_id xrx200_match[] = {
++	{ .compatible = "lantiq,xrx200-net" },
++	{},
++};
++MODULE_DEVICE_TABLE(of, xrx200_match);
++
++static struct platform_driver xrx200_driver = {
++	.probe = xrx200_probe,
++	.remove = xrx200_remove,
++	.driver = {
++		.name = "lantiq,xrx200-net",
++		.of_match_table = xrx200_match,
++		.owner = THIS_MODULE,
++	},
++};
++
++module_platform_driver(xrx200_driver);
++
++MODULE_AUTHOR("John Crispin <blogic at openwrt.org>");
++MODULE_DESCRIPTION("Lantiq SoC XRX200 ethernet");
++MODULE_LICENSE("GPL");
+--- /dev/null
++++ b/drivers/net/ethernet/lantiq_xrx200_sw.h
+@@ -0,0 +1,1328 @@
++/*
++ *   This program is free software; you can redistribute it and/or modify it
++ *   under the terms of the GNU General Public License version 2 as published
++ *   by the Free Software Foundation.
++ *
++ *   This program is distributed in the hope that it will be useful,
++ *   but WITHOUT ANY WARRANTY; without even the implied warranty of
++ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ *   GNU General Public License for more details.
++ *
++ *   You should have received a copy of the GNU General Public License
++ *   along with this program; if not, write to the Free Software
++ *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
++ *
++ *   Copyright (C) 2010 Lantiq Deutschland GmbH
++ *   Copyright (C) 2013 Antonios Vamporakis <vamporakis at yahoo.com>
++ *
++ *   VR9 switch registers extracted from 310TUJ0 switch api
++ *   WARNING mult values of 0x00 may not be correct
++ *
++ */
++
++enum {
++//	XRX200_ETHSW_SWRES,            /* Ethernet Switch ResetControl Register */
++//	XRX200_ETHSW_SWRES_R1,         /* Hardware Reset */
++//	XRX200_ETHSW_SWRES_R0,         /* Register Configuration */
++//	XRX200_ETHSW_CLK_MAC_GAT,      /* Ethernet Switch Clock ControlRegister  */
++//	XRX200_ETHSW_CLK_EXP_SLEEP,    /* Exponent to put system into sleep */
++//	XRX200_ETHSW_CLK_EXP_WAKE,     /* Exponent to wake up system */
++//	XRX200_ETHSW_CLK_CLK2_EN,      /* CLK2 Input for MAC */
++//	XRX200_ETHSW_CLK_EXT_DIV_EN,   /* External Clock Divider Enable */
++//	XRX200_ETHSW_CLK_RAM_DBG_EN,   /* Clock Gating Enable */
++//	XRX200_ETHSW_CLK_REG_GAT_EN,   /* Clock Gating Enable */
++//	XRX200_ETHSW_CLK_GAT_EN,       /* Clock Gating Enable */
++//	XRX200_ETHSW_CLK_MAC_GAT_EN,   /* Clock Gating Enable */
++//	XRX200_ETHSW_DBG_STEP,         /* Ethernet Switch Debug ControlRegister */
++//	XRX200_ETHSW_DBG_CLK_SEL,      /* Trigger Enable */
++//	XRX200_ETHSW_DBG_MON_EN,       /* Monitoring Enable */
++//	XRX200_ETHSW_DBG_TRIG_EN,      /* Trigger Enable */
++//	XRX200_ETHSW_DBG_MODE,         /* Debug Mode */
++//	XRX200_ETHSW_DBG_STEP_TIME,    /* Clock Step Size */
++//	XRX200_ETHSW_SSB_MODE,         /* Ethernet Switch SharedSegment Buffer Mode Register */
++//	XRX200_ETHSW_SSB_MODE_ADDE,    /* Memory Address */
++//	XRX200_ETHSW_SSB_MODE_MODE,    /* Memory Access Mode */
++//	XRX200_ETHSW_SSB_ADDR,         /* Ethernet Switch SharedSegment Buffer Address Register */
++//	XRX200_ETHSW_SSB_ADDR_ADDE,    /* Memory Address */
++//	XRX200_ETHSW_SSB_DATA,         /* Ethernet Switch SharedSegment Buffer Data Register */
++//	XRX200_ETHSW_SSB_DATA_DATA,    /* Data Value */
++//	XRX200_ETHSW_CAP_0,            /* Ethernet Switch CapabilityRegister 0 */
++//	XRX200_ETHSW_CAP_0_SPEED,      /* Clock frequency */
++//	XRX200_ETHSW_CAP_1,            /* Ethernet Switch CapabilityRegister 1 */
++//	XRX200_ETHSW_CAP_1_GMAC,       /* MAC operation mode */
++//	XRX200_ETHSW_CAP_1_QUEUE,      /* Number of queues */
++//	XRX200_ETHSW_CAP_1_VPORTS,     /* Number of virtual ports */
++//	XRX200_ETHSW_CAP_1_PPORTS,     /* Number of physical ports */
++//	XRX200_ETHSW_CAP_2,            /* Ethernet Switch CapabilityRegister 2 */
++//	XRX200_ETHSW_CAP_2_PACKETS,    /* Number of packets */
++//	XRX200_ETHSW_CAP_3,            /* Ethernet Switch CapabilityRegister 3 */
++//	XRX200_ETHSW_CAP_3_METERS,     /* Number of traffic meters */
++//	XRX200_ETHSW_CAP_3_SHAPERS,    /* Number of traffic shapers */
++//	XRX200_ETHSW_CAP_4,            /* Ethernet Switch CapabilityRegister 4 */
++//	XRX200_ETHSW_CAP_4_PPPOE,      /* PPPoE table size */
++//	XRX200_ETHSW_CAP_4_VLAN,       /* Active VLAN table size */
++//	XRX200_ETHSW_CAP_5,            /* Ethernet Switch CapabilityRegister 5 */
++//	XRX200_ETHSW_CAP_5_IPPLEN,     /* IP packet length table size */
++//	XRX200_ETHSW_CAP_5_PROT,       /* Protocol table size */
++//	XRX200_ETHSW_CAP_6,            /* Ethernet Switch CapabilityRegister 6 */
++//	XRX200_ETHSW_CAP_6_MACDASA,    /* MAC DA/SA table size */
++//	XRX200_ETHSW_CAP_6_APPL,       /* Application table size */
++//	XRX200_ETHSW_CAP_7,            /* Ethernet Switch CapabilityRegister 7 */
++//	XRX200_ETHSW_CAP_7_IPDASAM,    /* IP DA/SA MSB table size */
++//	XRX200_ETHSW_CAP_7_IPDASAL,    /* IP DA/SA LSB table size */
++//	XRX200_ETHSW_CAP_8,            /* Ethernet Switch CapabilityRegister 8 */
++//	XRX200_ETHSW_CAP_8_MCAST,      /* Multicast table size */
++//	XRX200_ETHSW_CAP_9,            /* Ethernet Switch CapabilityRegister 9 */
++//	XRX200_ETHSW_CAP_9_FLAGG,      /* Flow Aggregation table size */
++//	XRX200_ETHSW_CAP_10,           /* Ethernet Switch CapabilityRegister 10 */
++//	XRX200_ETHSW_CAP_10_MACBT,     /* MAC bridging table size */
++//	XRX200_ETHSW_CAP_11,           /* Ethernet Switch CapabilityRegister 11 */
++//	XRX200_ETHSW_CAP_11_BSIZEL,    /* Packet buffer size (lower part, in byte) */
++//	XRX200_ETHSW_CAP_12,           /* Ethernet Switch CapabilityRegister 12 */
++//	XRX200_ETHSW_CAP_12_BSIZEH,    /* Packet buffer size (higher part, in byte) */
++//	XRX200_ETHSW_VERSION_REV,      /* Ethernet Switch VersionRegister */
++//	XRX200_ETHSW_VERSION_MOD_ID,   /* Module Identification */
++//	XRX200_ETHSW_VERSION_REV_ID,   /* Hardware Revision Identification */
++//	XRX200_ETHSW_IER,              /* Interrupt Enable Register */
++//	XRX200_ETHSW_IER_FDMAIE,       /* Fetch DMA Interrupt Enable */
++//	XRX200_ETHSW_IER_SDMAIE,       /* Store DMA Interrupt Enable */
++//	XRX200_ETHSW_IER_MACIE,        /* Ethernet MAC Interrupt Enable */
++//	XRX200_ETHSW_IER_PCEIE,        /* Parser and Classification Engine Interrupt Enable */
++//	XRX200_ETHSW_IER_BMIE,         /* Buffer Manager Interrupt Enable */
++//	XRX200_ETHSW_ISR,              /* Interrupt Status Register */
++//	XRX200_ETHSW_ISR_FDMAINT,      /* Fetch DMA Interrupt */
++//	XRX200_ETHSW_ISR_SDMAINT,      /* Store DMA Interrupt */
++//	XRX200_ETHSW_ISR_MACINT,       /* Ethernet MAC Interrupt */
++//	XRX200_ETHSW_ISR_PCEINT,       /* Parser and Classification Engine Interrupt */
++//	XRX200_ETHSW_ISR_BMINT,        /* Buffer Manager Interrupt */
++//	XRX200_ETHSW_SPARE_0,          /* Ethernet Switch SpareCells 0 */
++//	XRX200_ETHSW_SPARE_0_SPARE,    /* SPARE0  */
++//	XRX200_ETHSW_SPARE_1,          /* Ethernet Switch SpareCells 1 */
++//	XRX200_ETHSW_SPARE_1_SPARE,    /* SPARE1  */
++//	XRX200_ETHSW_SPARE_2,          /* Ethernet Switch SpareCells 2 */
++//	XRX200_ETHSW_SPARE_2_SPARE,    /* SPARE2  */
++//	XRX200_ETHSW_SPARE_3,          /* Ethernet Switch SpareCells 3 */
++//	XRX200_ETHSW_SPARE_3_SPARE,    /* SPARE3  */
++//	XRX200_ETHSW_SPARE_4,          /* Ethernet Switch SpareCells 4 */
++//	XRX200_ETHSW_SPARE_4_SPARE,    /* SPARE4  */
++//	XRX200_ETHSW_SPARE_5,          /* Ethernet Switch SpareCells 5 */
++//	XRX200_ETHSW_SPARE_5_SPARE,    /* SPARE5  */
++//	XRX200_ETHSW_SPARE_6,          /* Ethernet Switch SpareCells 6 */
++//	XRX200_ETHSW_SPARE_6_SPARE,    /* SPARE6  */
++//	XRX200_ETHSW_SPARE_7,          /* Ethernet Switch SpareCells 7 */
++//	XRX200_ETHSW_SPARE_7_SPARE,    /* SPARE7  */
++//	XRX200_ETHSW_SPARE_8,          /* Ethernet Switch SpareCells 8 */
++//	XRX200_ETHSW_SPARE_8_SPARE,    /* SPARE8  */
++//	XRX200_ETHSW_SPARE_9,          /* Ethernet Switch SpareCells 9 */
++//	XRX200_ETHSW_SPARE_9_SPARE,    /* SPARE9  */
++//	XRX200_ETHSW_SPARE_10,         /* Ethernet Switch SpareCells 10 */
++//	XRX200_ETHSW_SPARE_10_SPARE,   /* SPARE10  */
++//	XRX200_ETHSW_SPARE_11,         /* Ethernet Switch SpareCells 11 */
++//	XRX200_ETHSW_SPARE_11_SPARE,   /* SPARE11  */
++//	XRX200_ETHSW_SPARE_12,         /* Ethernet Switch SpareCells 12 */
++//	XRX200_ETHSW_SPARE_12_SPARE,   /* SPARE12  */
++//	XRX200_ETHSW_SPARE_13,         /* Ethernet Switch SpareCells 13 */
++//	XRX200_ETHSW_SPARE_13_SPARE,   /* SPARE13  */
++//	XRX200_ETHSW_SPARE_14,         /* Ethernet Switch SpareCells 14 */
++//	XRX200_ETHSW_SPARE_14_SPARE,   /* SPARE14  */
++//	XRX200_ETHSW_SPARE_15,         /* Ethernet Switch SpareCells 15 */
++//	XRX200_ETHSW_SPARE_15_SPARE,   /* SPARE15  */
++//	XRX200_BM_RAM_VAL_3,           /* RAM Value Register 3 */
++//	XRX200_BM_RAM_VAL_3_VAL3,      /* Data value [15:0] */
++//	XRX200_BM_RAM_VAL_2,           /* RAM Value Register 2 */
++//	XRX200_BM_RAM_VAL_2_VAL2,      /* Data value [15:0] */
++//	XRX200_BM_RAM_VAL_1,           /* RAM Value Register 1 */
++//	XRX200_BM_RAM_VAL_1_VAL1,      /* Data value [15:0] */
++//	XRX200_BM_RAM_VAL_0,           /* RAM Value Register 0 */
++//	XRX200_BM_RAM_VAL_0_VAL0,      /* Data value [15:0] */
++//	XRX200_BM_RAM_ADDR,            /* RAM Address Register */
++//	XRX200_BM_RAM_ADDR_ADDR,       /* RAM Address */
++//	XRX200_BM_RAM_CTRL,            /* RAM Access Control Register */
++//	XRX200_BM_RAM_CTRL_BAS,        /* Access Busy/Access Start */
++//	XRX200_BM_RAM_CTRL_OPMOD,      /* Lookup Table Access Operation Mode */
++//	XRX200_BM_RAM_CTRL_ADDR,       /* Address for RAM selection */
++//	XRX200_BM_FSQM_GCTRL,          /* Free Segment Queue ManagerGlobal Control Register */
++//	XRX200_BM_FSQM_GCTRL_SEGNUM,   /* Maximum Segment Number */
++//	XRX200_BM_CONS_SEG,            /* Number of Consumed SegmentsRegister */
++//	XRX200_BM_CONS_SEG_FSEG,       /* Number of Consumed Segments */
++//	XRX200_BM_CONS_PKT,            /* Number of Consumed PacketPointers Register */
++//	XRX200_BM_CONS_PKT_FQP,        /* Number of Consumed Packet Pointers */
++//	XRX200_BM_GCTRL_F,             /* Buffer Manager Global ControlRegister 0 */
++//	XRX200_BM_GCTRL_BM_STA,        /* Buffer Manager Initialization Status Bit */
++//	XRX200_BM_GCTRL_SAT,           /* RMON Counter Update Mode */
++//	XRX200_BM_GCTRL_FR_RBC,        /* Freeze RMON RX Bad Byte 64 Bit Counter */
++//	XRX200_BM_GCTRL_FR_RGC,        /* Freeze RMON RX Good Byte 64 Bit Counter */
++//	XRX200_BM_GCTRL_FR_TGC,        /* Freeze RMON TX Good Byte 64 Bit Counter */
++//	XRX200_BM_GCTRL_I_FIN,         /* RAM initialization finished */
++//	XRX200_BM_GCTRL_CX_INI,        /* PQM Context RAM initialization */
++//	XRX200_BM_GCTRL_FP_INI,        /* FPQM RAM initialization */
++//	XRX200_BM_GCTRL_FS_INI,        /* FSQM RAM initialization */
++//	XRX200_BM_GCTRL_R_SRES,        /* Software Reset for RMON */
++//	XRX200_BM_GCTRL_S_SRES,        /* Software Reset for Scheduler */
++//	XRX200_BM_GCTRL_A_SRES,        /* Software Reset for AVG */
++//	XRX200_BM_GCTRL_P_SRES,        /* Software Reset for PQM */
++//	XRX200_BM_GCTRL_F_SRES,        /* Software Reset for FSQM */
++//	XRX200_BM_QUEUE_GCTRL,         /* Queue Manager GlobalControl Register 0 */
++//	XRX200_BM_QUEUE_GCTRL_GL_MOD,  /* WRED Mode Signal */
++//	XRX200_BM_QUEUE_GCTRL_AQUI,    /* Average Queue Update Interval */
++//	XRX200_BM_QUEUE_GCTRL_AQWF,    /* Average Queue Weight Factor */
++//	XRX200_BM_QUEUE_GCTRL_QAVGEN,  /* Queue Average Calculation Enable */
++//	XRX200_BM_QUEUE_GCTRL_DPROB,   /* Drop Probability Profile */
++//	XRX200_BM_WRED_RTH_0,          /* WRED Red Threshold Register0 */
++//	XRX200_BM_WRED_RTH_0_MINTH,    /* Minimum Threshold */
++//	XRX200_BM_WRED_RTH_1,          /* WRED Red Threshold Register1 */
++//	XRX200_BM_WRED_RTH_1_MAXTH,    /* Maximum Threshold */
++//	XRX200_BM_WRED_YTH_0,          /* WRED Yellow ThresholdRegister 0 */
++//	XRX200_BM_WRED_YTH_0_MINTH,    /* Minimum Threshold */
++//	XRX200_BM_WRED_YTH_1,          /* WRED Yellow ThresholdRegister 1 */
++//	XRX200_BM_WRED_YTH_1_MAXTH,    /* Maximum Threshold */
++//	XRX200_BM_WRED_GTH_0,          /* WRED Green ThresholdRegister 0 */
++//	XRX200_BM_WRED_GTH_0_MINTH,    /* Minimum Threshold */
++//	XRX200_BM_WRED_GTH_1,          /* WRED Green ThresholdRegister 1 */
++//	XRX200_BM_WRED_GTH_1_MAXTH,    /* Maximum Threshold */
++//	XRX200_BM_DROP_GTH_0_THR,      /* Drop Threshold ConfigurationRegister 0 */
++//	XRX200_BM_DROP_GTH_0_THR_FQ,   /* Threshold for frames marked red */
++//	XRX200_BM_DROP_GTH_1_THY,      /* Drop Threshold ConfigurationRegister 1 */
++//	XRX200_BM_DROP_GTH_1_THY_FQ,   /* Threshold for frames marked yellow */
++//	XRX200_BM_DROP_GTH_2_THG,      /* Drop Threshold ConfigurationRegister 2 */
++//	XRX200_BM_DROP_GTH_2_THG_FQ,   /* Threshold for frames marked green */
++//	XRX200_BM_IER,                 /* Buffer Manager Global InterruptEnable Register */
++//	XRX200_BM_IER_CNT4,            /* Counter Group 4 (RMON-CLASSIFICATION) Interrupt Enable */
++//	XRX200_BM_IER_CNT3,            /* Counter Group 3 (RMON-PQM) Interrupt Enable */
++//	XRX200_BM_IER_CNT2,            /* Counter Group 2 (RMON-SCHEDULER) Interrupt Enable */
++//	XRX200_BM_IER_CNT1,            /* Counter Group 1 (RMON-QFETCH) Interrupt Enable */
++//	XRX200_BM_IER_CNT0,            /* Counter Group 0 (RMON-QSTOR) Interrupt Enable */
++//	XRX200_BM_IER_DEQ,             /* PQM dequeue Interrupt Enable */
++//	XRX200_BM_IER_ENQ,             /* PQM Enqueue Interrupt Enable */
++//	XRX200_BM_IER_FSQM,            /* Buffer Empty Interrupt Enable */
++//	XRX200_BM_ISR,                 /* Buffer Manager Global InterruptStatus Register */
++//	XRX200_BM_ISR_CNT4,            /* Counter Group 4 Interrupt */
++//	XRX200_BM_ISR_CNT3,            /* Counter Group 3 Interrupt */
++//	XRX200_BM_ISR_CNT2,            /* Counter Group 2 Interrupt */
++//	XRX200_BM_ISR_CNT1,            /* Counter Group 1 Interrupt */
++//	XRX200_BM_ISR_CNT0,            /* Counter Group 0 Interrupt */
++//	XRX200_BM_ISR_DEQ,             /* PQM dequeue Interrupt Enable */
++//	XRX200_BM_ISR_ENQ,             /* PQM Enqueue Interrupt */
++//	XRX200_BM_ISR_FSQM,            /* Buffer Empty Interrupt */
++//	XRX200_BM_CISEL,               /* Buffer Manager RMON CounterInterrupt Select Register */
++//	XRX200_BM_CISEL_PORT,          /* Port Number */
++//	XRX200_BM_DEBUG_CTRL_DBG,      /* Debug Control Register */
++//	XRX200_BM_DEBUG_CTRL_DBG_SEL,  /* Select Signal for Debug Multiplexer */
++//	XRX200_BM_DEBUG_VAL_DBG,       /* Debug Value Register */
++//	XRX200_BM_DEBUG_VAL_DBG_DAT,   /* Debug Data Value */
++//	XRX200_BM_PCFG,                /* Buffer Manager PortConfiguration Register */
++//	XRX200_BM_PCFG_CNTEN,          /* RMON Counter Enable */
++//	XRX200_BM_RMON_CTRL_RAM1,      /* Buffer ManagerRMON Control Register */
++//	XRX200_BM_RMON_CTRL_RAM2_RES,  /* Software Reset for RMON RAM2 */
++//	XRX200_BM_RMON_CTRL_RAM1_RES,  /* Software Reset for RMON RAM1 */
++//	XRX200_PQM_DP,                 /* Packet Queue ManagerDrop Probability Register */
++//	XRX200_PQM_DP_DPROB,           /* Drop Probability Profile */
++//	XRX200_PQM_RS,                 /* Packet Queue ManagerRate Shaper Assignment Register */
++//	XRX200_PQM_RS_EN2,             /* Rate Shaper 2 Enable */
++//	XRX200_PQM_RS_RS2,             /* Rate Shaper 2 */
++//	XRX200_PQM_RS_EN1,             /* Rate Shaper 1 Enable */
++//	XRX200_PQM_RS_RS1,             /* Rate Shaper 1 */
++//	XRX200_RS_CTRL,                /* Rate Shaper ControlRegister */
++//	XRX200_RS_CTRL_RSEN,           /* Rate Shaper Enable */
++//	XRX200_RS_CBS,                 /* Rate Shaper CommittedBurst Size Register */
++//	XRX200_RS_CBS_CBS,             /* Committed Burst Size */
++//	XRX200_RS_IBS,                 /* Rate Shaper InstantaneousBurst Size Register */
++//	XRX200_RS_IBS_IBS,             /* Instantaneous Burst Size */
++//	XRX200_RS_CIR_EXP,             /* Rate Shaper RateExponent Register */
++//	XRX200_RS_CIR_EXP_EXP,         /* Exponent */
++//	XRX200_RS_CIR_MANT,            /* Rate Shaper RateMantissa Register */
++//	XRX200_RS_CIR_MANT_MANT,       /* Mantissa */
++	XRX200_PCE_TBL_KEY_7,          /* Table Key Data 7 */
++//	XRX200_PCE_TBL_KEY_7_KEY7,     /* Key Value[15:0] */
++	XRX200_PCE_TBL_KEY_6,          /* Table Key Data 6 */
++//	XRX200_PCE_TBL_KEY_6_KEY6,     /* Key Value[15:0] */
++	XRX200_PCE_TBL_KEY_5,          /* Table Key Data 5 */
++//	XRX200_PCE_TBL_KEY_5_KEY5,     /* Key Value[15:0] */
++	XRX200_PCE_TBL_KEY_4,          /* Table Key Data 4 */
++//	XRX200_PCE_TBL_KEY_4_KEY4,     /* Key Value[15:0] */
++	XRX200_PCE_TBL_KEY_3,          /* Table Key Data 3 */
++//	XRX200_PCE_TBL_KEY_3_KEY3,     /* Key Value[15:0] */
++	XRX200_PCE_TBL_KEY_2,          /* Table Key Data 2 */
++//	XRX200_PCE_TBL_KEY_2_KEY2,     /* Key Value[15:0] */
++	XRX200_PCE_TBL_KEY_1,          /* Table Key Data 1 */
++//	XRX200_PCE_TBL_KEY_1_KEY1,     /* Key Value[31:16] */
++	XRX200_PCE_TBL_KEY_0,          /* Table Key Data 0 */
++//	XRX200_PCE_TBL_KEY_0_KEY0,     /* Key Value[15:0] */
++	XRX200_PCE_TBL_MASK_0,         /* Table Mask Write Register0 */
++//	XRX200_PCE_TBL_MASK_0_MASK0,   /* Mask Pattern [15:0] */
++	XRX200_PCE_TBL_VAL_4,          /* Table Value Register4 */
++//	XRX200_PCE_TBL_VAL_4_VAL4,     /* Data value [15:0] */
++	XRX200_PCE_TBL_VAL_3,          /* Table Value Register3 */
++//	XRX200_PCE_TBL_VAL_3_VAL3,     /* Data value [15:0] */
++	XRX200_PCE_TBL_VAL_2,          /* Table Value Register2 */
++//	XRX200_PCE_TBL_VAL_2_VAL2,     /* Data value [15:0] */
++	XRX200_PCE_TBL_VAL_1,          /* Table Value Register1 */
++//	XRX200_PCE_TBL_VAL_1_VAL1,     /* Data value [15:0] */
++	XRX200_PCE_TBL_VAL_0,          /* Table Value Register0 */
++//	XRX200_PCE_TBL_VAL_0_VAL0,     /* Data value [15:0] */
++//	XRX200_PCE_TBL_ADDR,           /* Table Entry AddressRegister */
++	XRX200_PCE_TBL_ADDR_ADDR,      /* Table Address */
++//	XRX200_PCE_TBL_CTRL,           /* Table Access ControlRegister */
++	XRX200_PCE_TBL_CTRL_BAS,       /* Access Busy/Access Start */
++	XRX200_PCE_TBL_CTRL_TYPE,      /* Lookup Entry Type */
++	XRX200_PCE_TBL_CTRL_VLD,       /* Lookup Entry Valid */
++	XRX200_PCE_TBL_CTRL_GMAP,      /* Group Map */
++	XRX200_PCE_TBL_CTRL_OPMOD,     /* Lookup Table Access Operation Mode */
++	XRX200_PCE_TBL_CTRL_ADDR,      /* Lookup Table Address */
++//	XRX200_PCE_TBL_STAT,           /* Table General StatusRegister */
++//	XRX200_PCE_TBL_STAT_TBUSY,     /* Table Access Busy */
++//	XRX200_PCE_TBL_STAT_TEMPT,     /* Table Empty */
++//	XRX200_PCE_TBL_STAT_TFUL,      /* Table Full */
++//	XRX200_PCE_AGE_0,              /* Aging Counter ConfigurationRegister 0 */
++//	XRX200_PCE_AGE_0_EXP,          /* Aging Counter Exponent Value  */
++//	XRX200_PCE_AGE_1,              /* Aging Counter ConfigurationRegister 1 */
++//	XRX200_PCE_AGE_1_MANT,         /* Aging Counter Mantissa Value  */
++//	XRX200_PCE_PMAP_1,             /* Port Map Register 1 */
++//	XRX200_PCE_PMAP_1_MPMAP,       /* Monitoring Port Map */
++//	XRX200_PCE_PMAP_2,             /* Port Map Register 2 */
++//	XRX200_PCE_PMAP_2_DMCPMAP,     /* Default Multicast Port Map */
++//	XRX200_PCE_PMAP_3,             /* Port Map Register 3 */
++//	XRX200_PCE_PMAP_3_UUCMAP,      /* Default Unknown Unicast Port Map */
++//	XRX200_PCE_GCTRL_0,            /* PCE Global Control Register0 */
++//	XRX200_PCE_GCTRL_0_IGMP,       /* IGMP Mode Selection */
++	XRX200_PCE_GCTRL_0_VLAN,       /* VLAN-aware Switching */
++//	XRX200_PCE_GCTRL_0_NOPM,       /* No Port Map Forwarding */
++//	XRX200_PCE_GCTRL_0_SCONUC,     /* Unknown Unicast Storm Control */
++//	XRX200_PCE_GCTRL_0_SCONMC,     /* Multicast Storm Control */
++//	XRX200_PCE_GCTRL_0_SCONBC,     /* Broadcast Storm Control */
++//	XRX200_PCE_GCTRL_0_SCONMOD,    /* Storm Control Mode */
++//	XRX200_PCE_GCTRL_0_SCONMET,    /* Storm Control Metering Instance */
++//	XRX200_PCE_GCTRL_0_MC_VALID,   /* Access Request */
++//	XRX200_PCE_GCTRL_0_PLCKMOD,    /* Port Lock Mode */
++//	XRX200_PCE_GCTRL_0_PLIMMOD,    /* MAC Address Learning Limitation Mode */
++//	XRX200_PCE_GCTRL_0_MTFL,       /* MAC Table Flushing */
++//	XRX200_PCE_GCTRL_1,            /* PCE Global Control Register1 */
++//	XRX200_PCE_GCTRL_1_PCE_DIS,    /* PCE Disable after currently processed packet */
++//	XRX200_PCE_GCTRL_1_LRNMOD,     /* MAC Address Learning Mode */
++//	XRX200_PCE_TCM_GLOB_CTRL,      /* Three-color MarkerGlobal Control Register */
++//	XRX200_PCE_TCM_GLOB_CTRL_DPRED, /* Re-marking Drop Precedence Red Encoding */
++//	XRX200_PCE_TCM_GLOB_CTRL_DPYEL, /* Re-marking Drop Precedence Yellow Encoding */
++//	XRX200_PCE_TCM_GLOB_CTRL_DPGRN, /* Re-marking Drop Precedence Green Encoding */
++//	XRX200_PCE_IGMP_CTRL,          /* IGMP Control Register */
++//	XRX200_PCE_IGMP_CTRL_FAGEEN,   /* Force Aging of Table Entries Enable */
++//	XRX200_PCE_IGMP_CTRL_FLEAVE,   /* Fast Leave Enable */
++//	XRX200_PCE_IGMP_CTRL_DMRTEN,   /* Default Maximum Response Time Enable */
++//	XRX200_PCE_IGMP_CTRL_JASUP,    /* Join Aggregation Suppression Enable */
++//	XRX200_PCE_IGMP_CTRL_REPSUP,   /* Report Suppression Enable */
++//	XRX200_PCE_IGMP_CTRL_SRPEN,    /* Snooping of Router Port Enable */
++//	XRX200_PCE_IGMP_CTRL_ROB,      /* Robustness Variable */
++//	XRX200_PCE_IGMP_CTRL_DMRT,     /* IGMP Default Maximum Response Time */
++//	XRX200_PCE_IGMP_DRPM,          /* IGMP Default RouterPort Map Register */
++//	XRX200_PCE_IGMP_DRPM_DRPM,     /* IGMP Default Router Port Map */
++//	XRX200_PCE_IGMP_AGE_0,         /* IGMP Aging Register0 */
++//	XRX200_PCE_IGMP_AGE_0_MANT,    /* IGMP Group Aging Time Mantissa */
++//	XRX200_PCE_IGMP_AGE_0_EXP,     /* IGMP Group Aging Time Exponent */
++//	XRX200_PCE_IGMP_AGE_1,         /* IGMP Aging Register1 */
++//	XRX200_PCE_IGMP_AGE_1_MANT,    /* IGMP Router Port Aging Time Mantissa */
++//	XRX200_PCE_IGMP_STAT,          /* IGMP Status Register */
++//	XRX200_PCE_IGMP_STAT_IGPM,     /* IGMP Port Map */
++//	XRX200_WOL_GLB_CTRL,           /* Wake-on-LAN ControlRegister */
++//	XRX200_WOL_GLB_CTRL_PASSEN,    /* WoL Password Enable */
++//	XRX200_WOL_DA_0,               /* Wake-on-LAN DestinationAddress Register 0 */
++//	XRX200_WOL_DA_0_DA0,           /* WoL Destination Address [15:0] */
++//	XRX200_WOL_DA_1,               /* Wake-on-LAN DestinationAddress Register 1 */
++//	XRX200_WOL_DA_1_DA1,           /* WoL Destination Address [31:16] */
++//	XRX200_WOL_DA_2,               /* Wake-on-LAN DestinationAddress Register 2 */
++//	XRX200_WOL_DA_2_DA2,           /* WoL Destination Address [47:32] */
++//	XRX200_WOL_PW_0,               /* Wake-on-LAN Password Register0 */
++//	XRX200_WOL_PW_0_PW0,           /* WoL Password [15:0] */
++//	XRX200_WOL_PW_1,               /* Wake-on-LAN Password Register1 */
++//	XRX200_WOL_PW_1_PW1,           /* WoL Password [31:16] */
++//	XRX200_WOL_PW_2,               /* Wake-on-LAN Password Register2 */
++//	XRX200_WOL_PW_2_PW2,           /* WoL Password [47:32] */
++//	XRX200_PCE_IER_0_PINT,         /* Parser and ClassificationEngine Global Interrupt Enable Register 0 */
++//	XRX200_PCE_IER_0_PINT_15,      /* Port Interrupt Enable */
++//	XRX200_PCE_IER_0_PINT_14,      /* Port Interrupt Enable */
++//	XRX200_PCE_IER_0_PINT_13,      /* Port Interrupt Enable */
++//	XRX200_PCE_IER_0_PINT_12,      /* Port Interrupt Enable */
++//	XRX200_PCE_IER_0_PINT_11,      /* Port Interrupt Enable */
++//	XRX200_PCE_IER_0_PINT_10,      /* Port Interrupt Enable */
++//	XRX200_PCE_IER_0_PINT_9,       /* Port Interrupt Enable */
++//	XRX200_PCE_IER_0_PINT_8,       /* Port Interrupt Enable */
++//	XRX200_PCE_IER_0_PINT_7,       /* Port Interrupt Enable */
++//	XRX200_PCE_IER_0_PINT_6,       /* Port Interrupt Enable */
++//	XRX200_PCE_IER_0_PINT_5,       /* Port Interrupt Enable */
++//	XRX200_PCE_IER_0_PINT_4,       /* Port Interrupt Enable */
++//	XRX200_PCE_IER_0_PINT_3,       /* Port Interrupt Enable */
++//	XRX200_PCE_IER_0_PINT_2,       /* Port Interrupt Enable */
++//	XRX200_PCE_IER_0_PINT_1,       /* Port Interrupt Enable */
++//	XRX200_PCE_IER_0_PINT_0,       /* Port Interrupt Enable */
++//	XRX200_PCE_IER_1,              /* Parser and ClassificationEngine Global Interrupt Enable Register 1 */
++//	XRX200_PCE_IER_1_FLOWINT,      /* Traffic Flow Table Interrupt Rule matched Interrupt Enable */
++//	XRX200_PCE_IER_1_CPH2,         /* Classification Phase 2 Ready Interrupt Enable */
++//	XRX200_PCE_IER_1_CPH1,         /* Classification Phase 1 Ready Interrupt Enable */
++//	XRX200_PCE_IER_1_CPH0,         /* Classification Phase 0 Ready Interrupt Enable */
++//	XRX200_PCE_IER_1_PRDY,         /* Parser Ready Interrupt Enable */
++//	XRX200_PCE_IER_1_IGTF,         /* IGMP Table Full Interrupt Enable */
++//	XRX200_PCE_IER_1_MTF,          /* MAC Table Full Interrupt Enable */
++//	XRX200_PCE_ISR_0_PINT,         /* Parser and ClassificationEngine Global Interrupt Status Register 0 */
++//	XRX200_PCE_ISR_0_PINT_15,      /* Port Interrupt */
++//	XRX200_PCE_ISR_0_PINT_14,      /* Port Interrupt */
++//	XRX200_PCE_ISR_0_PINT_13,      /* Port Interrupt */
++//	XRX200_PCE_ISR_0_PINT_12,      /* Port Interrupt */
++//	XRX200_PCE_ISR_0_PINT_11,      /* Port Interrupt */
++//	XRX200_PCE_ISR_0_PINT_10,      /* Port Interrupt */
++//	XRX200_PCE_ISR_0_PINT_9,       /* Port Interrupt */
++//	XRX200_PCE_ISR_0_PINT_8,       /* Port Interrupt */
++//	XRX200_PCE_ISR_0_PINT_7,       /* Port Interrupt */
++//	XRX200_PCE_ISR_0_PINT_6,       /* Port Interrupt */
++//	XRX200_PCE_ISR_0_PINT_5,       /* Port Interrupt */
++//	XRX200_PCE_ISR_0_PINT_4,       /* Port Interrupt */
++//	XRX200_PCE_ISR_0_PINT_3,       /* Port Interrupt */
++//	XRX200_PCE_ISR_0_PINT_2,       /* Port Interrupt */
++//	XRX200_PCE_ISR_0_PINT_1,       /* Port Interrupt */
++//	XRX200_PCE_ISR_0_PINT_0,       /* Port Interrupt */
++//	XRX200_PCE_ISR_1,              /* Parser and ClassificationEngine Global Interrupt Status Register 1 */
++//	XRX200_PCE_ISR_1_FLOWINT,      /* Traffic Flow Table Interrupt Rule matched */
++//	XRX200_PCE_ISR_1_CPH2,         /* Classification Phase 2 Ready Interrupt */
++//	XRX200_PCE_ISR_1_CPH1,         /* Classification Phase 1 Ready Interrupt */
++//	XRX200_PCE_ISR_1_CPH0,         /* Classification Phase 0 Ready Interrupt */
++//	XRX200_PCE_ISR_1_PRDY,         /* Parser Ready Interrupt */
++//	XRX200_PCE_ISR_1_IGTF,         /* IGMP Table Full Interrupt */
++//	XRX200_PCE_ISR_1_MTF,          /* MAC Table Full Interrupt */
++//	XRX200_PARSER_STAT_FIFO,       /* Parser Status Register */
++//	XRX200_PARSER_STAT_FSM_DAT_CNT, /* Parser FSM Data Counter */
++//	XRX200_PARSER_STAT_FSM_STATE,  /* Parser FSM State */
++//	XRX200_PARSER_STAT_PKT_ERR,    /* Packet error detected */
++//	XRX200_PARSER_STAT_FSM_FIN,    /* Parser FSM finished */
++//	XRX200_PARSER_STAT_FSM_START,  /* Parser FSM start */
++//	XRX200_PARSER_STAT_FIFO_RDY,   /* Parser FIFO ready for read. */
++//	XRX200_PARSER_STAT_FIFO_FULL,  /* Parser */
++//	XRX200_PCE_PCTRL_0,            /* PCE Port ControlRegister 0 */
++//	XRX200_PCE_PCTRL_0_MCST,       /* Multicast Forwarding Mode Selection */
++//	XRX200_PCE_PCTRL_0_EGSTEN,     /* Table-based Egress Special Tag Enable */
++//	XRX200_PCE_PCTRL_0_IGSTEN,     /* Ingress Special Tag Enable */
++//	XRX200_PCE_PCTRL_0_PCPEN,      /* PCP Remarking Mode */
++//	XRX200_PCE_PCTRL_0_CLPEN,      /* Class Remarking Mode */
++//	XRX200_PCE_PCTRL_0_DPEN,       /* Drop Precedence Remarking Mode */
++//	XRX200_PCE_PCTRL_0_CMOD,       /* Three-color Marker Color Mode */
++//	XRX200_PCE_PCTRL_0_VREP,       /* VLAN Replacement Mode */
++	XRX200_PCE_PCTRL_0_TVM,        /* Transparent VLAN Mode */
++//	XRX200_PCE_PCTRL_0_PLOCK,      /* Port Locking Enable */
++//	XRX200_PCE_PCTRL_0_AGEDIS,     /* Aging Disable */
++//	XRX200_PCE_PCTRL_0_PSTATE,     /* Port State */
++//	XRX200_PCE_PCTRL_1,            /* PCE Port ControlRegister 1 */
++//	XRX200_PCE_PCTRL_1_LRNLIM,     /* MAC Address Learning Limit */
++//	XRX200_PCE_PCTRL_2,            /* PCE Port ControlRegister 2 */
++//	XRX200_PCE_PCTRL_2_DSCPMOD,    /* DSCP Mode Selection */
++//	XRX200_PCE_PCTRL_2_DSCP,       /* Enable DSCP to select the Class of Service */
++//	XRX200_PCE_PCTRL_2_PCP,        /* Enable VLAN PCP to select the Class of Service */
++//	XRX200_PCE_PCTRL_2_PCLASS,     /* Port-based Traffic Class */
++//	XRX200_PCE_PCTRL_3_VIO,        /* PCE Port ControlRegister 3 */
++//	XRX200_PCE_PCTRL_3_EDIR,       /* Egress Redirection Mode */
++//	XRX200_PCE_PCTRL_3_RXDMIR,     /* Receive Mirroring Enable for dropped frames */
++//	XRX200_PCE_PCTRL_3_RXVMIR,     /* Receive Mirroring Enable for valid frames */
++//	XRX200_PCE_PCTRL_3_TXMIR,      /* Transmit Mirroring Enable */
++//	XRX200_PCE_PCTRL_3_VIO_7,      /* Violation Type 7 Mirroring Enable */
++//	XRX200_PCE_PCTRL_3_VIO_6,      /* Violation Type 6 Mirroring Enable */
++//	XRX200_PCE_PCTRL_3_VIO_5,      /* Violation Type 5 Mirroring Enable */
++//	XRX200_PCE_PCTRL_3_VIO_4,      /* Violation Type 4 Mirroring Enable */
++//	XRX200_PCE_PCTRL_3_VIO_3,      /* Violation Type 3 Mirroring Enable */
++//	XRX200_PCE_PCTRL_3_VIO_2,      /* Violation Type 2 Mirroring Enable */
++//	XRX200_PCE_PCTRL_3_VIO_1,      /* Violation Type 1 Mirroring Enable */
++//	XRX200_PCE_PCTRL_3_VIO_0,      /* Violation Type 0 Mirroring Enable */
++//	XRX200_WOL_CTRL,               /* Wake-on-LAN ControlRegister */
++//	XRX200_WOL_CTRL_PORT,          /* WoL Enable */
++//	XRX200_PCE_VCTRL,              /* PCE VLAN ControlRegister */
++	XRX200_PCE_VCTRL_VSR,          /* VLAN Security Rule */
++	XRX200_PCE_VCTRL_VEMR,         /* VLAN Egress Member Violation Rule */
++	XRX200_PCE_VCTRL_VIMR,         /* VLAN Ingress Member Violation Rule */
++	XRX200_PCE_VCTRL_VINR,         /* VLAN Ingress Tag Rule */
++	XRX200_PCE_VCTRL_UVR,          /* Unknown VLAN Rule */
++//	XRX200_PCE_DEFPVID,            /* PCE Default PortVID Register */
++	XRX200_PCE_DEFPVID_PVID,       /* Default Port VID Index */
++//	XRX200_PCE_PSTAT,              /* PCE Port StatusRegister */
++//	XRX200_PCE_PSTAT_LRNCNT,       /* Learning Count */
++//	XRX200_PCE_PIER,               /* Parser and ClassificationEngine Port Interrupt Enable Register */
++//	XRX200_PCE_PIER_CLDRP,         /* Classification Drop Interrupt Enable */
++//	XRX200_PCE_PIER_PTDRP,         /* Port Drop Interrupt Enable */
++//	XRX200_PCE_PIER_VLAN,          /* VLAN Violation Interrupt Enable */
++//	XRX200_PCE_PIER_WOL,           /* Wake-on-LAN Interrupt Enable */
++//	XRX200_PCE_PIER_LOCK,          /* Port Limit Alert Interrupt Enable */
++//	XRX200_PCE_PIER_LIM,           /* Port Lock Alert Interrupt Enable */
++//	XRX200_PCE_PISR,               /* Parser and ClassificationEngine Port Interrupt Status Register */
++//	XRX200_PCE_PISR_CLDRP,         /* Classification Drop Interrupt */
++//	XRX200_PCE_PISR_PTDRP,         /* Port Drop Interrupt */
++//	XRX200_PCE_PISR_VLAN,          /* VLAN Violation Interrupt */
++//	XRX200_PCE_PISR_WOL,           /* Wake-on-LAN Interrupt */
++//	XRX200_PCE_PISR_LOCK,          /* Port Lock Alert Interrupt */
++//	XRX200_PCE_PISR_LIMIT,         /* Port Limitation Alert Interrupt */
++//	XRX200_PCE_TCM_CTRL,           /* Three-colorMarker Control Register */
++//	XRX200_PCE_TCM_CTRL_TCMEN,     /* Three-color Marker metering instance enable */
++//	XRX200_PCE_TCM_STAT,           /* Three-colorMarker Status Register */
++//	XRX200_PCE_TCM_STAT_AL1,       /* Three-color Marker Alert 1 Status */
++//	XRX200_PCE_TCM_STAT_AL0,       /* Three-color Marker Alert 0 Status */
++//	XRX200_PCE_TCM_CBS,            /* Three-color MarkerCommitted Burst Size Register */
++//	XRX200_PCE_TCM_CBS_CBS,        /* Committed Burst Size */
++//	XRX200_PCE_TCM_EBS,            /* Three-color MarkerExcess Burst Size Register */
++//	XRX200_PCE_TCM_EBS_EBS,        /* Excess Burst Size */
++//	XRX200_PCE_TCM_IBS,            /* Three-color MarkerInstantaneous Burst Size Register */
++//	XRX200_PCE_TCM_IBS_IBS,        /* Instantaneous Burst Size */
++//	XRX200_PCE_TCM_CIR_MANT,       /* Three-colorMarker Constant Information Rate Mantissa Register */
++//	XRX200_PCE_TCM_CIR_MANT_MANT,  /* Rate Counter Mantissa */
++//	XRX200_PCE_TCM_CIR_EXP,        /* Three-colorMarker Constant Information Rate Exponent Register */
++//	XRX200_PCE_TCM_CIR_EXP_EXP,    /* Rate Counter Exponent */
++//	XRX200_MAC_TEST,               /* MAC Test Register */
++//	XRX200_MAC_TEST_JTP,           /* Jitter Test Pattern */
++//	XRX200_MAC_PFAD_CFG,           /* MAC Pause FrameSource Address Configuration Register */
++//	XRX200_MAC_PFAD_CFG_SAMOD,     /* Source Address Mode */
++//	XRX200_MAC_PFSA_0,             /* Pause Frame SourceAddress Part 0  */
++//	XRX200_MAC_PFSA_0_PFAD,        /* Pause Frame Source Address Part 0 */
++//	XRX200_MAC_PFSA_1,             /* Pause Frame SourceAddress Part 1  */
++//	XRX200_MAC_PFSA_1_PFAD,        /* Pause Frame Source Address Part 1 */
++//	XRX200_MAC_PFSA_2,             /* Pause Frame SourceAddress Part 2  */
++//	XRX200_MAC_PFSA_2_PFAD,        /* Pause Frame Source Address Part 2 */
++//	XRX200_MAC_FLEN,               /* MAC Frame Length Register */
++//	XRX200_MAC_FLEN_LEN,           /* Maximum Frame Length */
++//	XRX200_MAC_VLAN_ETYPE_0,       /* MAC VLAN EthertypeRegister 0 */
++//	XRX200_MAC_VLAN_ETYPE_0_OUTER, /* Ethertype */
++//	XRX200_MAC_VLAN_ETYPE_1,       /* MAC VLAN EthertypeRegister 1 */
++//	XRX200_MAC_VLAN_ETYPE_1_INNER, /* Ethertype */
++//	XRX200_MAC_IER,                /* MAC Interrupt EnableRegister */
++//	XRX200_MAC_IER_MACIEN,         /* MAC Interrupt Enable */
++//	XRX200_MAC_ISR,                /* MAC Interrupt StatusRegister */
++//	XRX200_MAC_ISR_MACINT,         /* MAC Interrupt */
++//	XRX200_MAC_PSTAT,              /* MAC Port Status Register */
++//	XRX200_MAC_PSTAT_PACT,         /* PHY Active Status */
++	XRX200_MAC_PSTAT_GBIT,         /* Gigabit Speed Status */
++	XRX200_MAC_PSTAT_MBIT,         /* Megabit Speed Status */
++	XRX200_MAC_PSTAT_FDUP,         /* Full Duplex Status */
++//	XRX200_MAC_PSTAT_RXPAU,        /* Receive Pause Status */
++//	XRX200_MAC_PSTAT_TXPAU,        /* Transmit Pause Status */
++//	XRX200_MAC_PSTAT_RXPAUEN,      /* Receive Pause Enable Status */
++//	XRX200_MAC_PSTAT_TXPAUEN,      /* Transmit Pause Enable Status */
++	XRX200_MAC_PSTAT_LSTAT,        /* Link Status */
++//	XRX200_MAC_PSTAT_CRS,          /* Carrier Sense Status */
++//	XRX200_MAC_PSTAT_TXLPI,        /* Transmit Low-power Idle Status */
++//	XRX200_MAC_PSTAT_RXLPI,        /* Receive Low-power Idle Status */
++//	XRX200_MAC_PISR,               /* MAC Interrupt Status Register */
++//	XRX200_MAC_PISR_PACT,          /* PHY Active Status */
++//	XRX200_MAC_PISR_SPEED,         /* Megabit Speed Status */
++//	XRX200_MAC_PISR_FDUP,          /* Full Duplex Status */
++//	XRX200_MAC_PISR_RXPAUEN,       /* Receive Pause Enable Status */
++//	XRX200_MAC_PISR_TXPAUEN,       /* Transmit Pause Enable Status */
++//	XRX200_MAC_PISR_LPIOFF,        /* Receive Low-power Idle Mode is left */
++//	XRX200_MAC_PISR_LPION,         /* Receive Low-power Idle Mode is entered */
++//	XRX200_MAC_PISR_JAM,           /* Jam Status Detected */
++//	XRX200_MAC_PISR_TOOSHORT,      /* Too Short Frame Error Detected */
++//	XRX200_MAC_PISR_TOOLONG,       /* Too Long Frame Error Detected */
++//	XRX200_MAC_PISR_LENERR,        /* Length Mismatch Error Detected */
++//	XRX200_MAC_PISR_FCSERR,        /* Frame Checksum Error Detected */
++//	XRX200_MAC_PISR_TXPAUSE,       /* Pause Frame Transmitted */
++//	XRX200_MAC_PISR_RXPAUSE,       /* Pause Frame Received */
++//	XRX200_MAC_PIER,               /* MAC Interrupt Enable Register */
++//	XRX200_MAC_PIER_PACT,          /* PHY Active Status */
++//	XRX200_MAC_PIER_SPEED,         /* Megabit Speed Status */
++//	XRX200_MAC_PIER_FDUP,          /* Full Duplex Status */
++//	XRX200_MAC_PIER_RXPAUEN,       /* Receive Pause Enable Status */
++//	XRX200_MAC_PIER_TXPAUEN,       /* Transmit Pause Enable Status */
++//	XRX200_MAC_PIER_LPIOFF,        /* Low-power Idle Off Interrupt Mask */
++//	XRX200_MAC_PIER_LPION,         /* Low-power Idle On Interrupt Mask */
++//	XRX200_MAC_PIER_JAM,           /* Jam Status Interrupt Mask */
++//	XRX200_MAC_PIER_TOOSHORT,      /* Too Short Frame Error Interrupt Mask */
++//	XRX200_MAC_PIER_TOOLONG,       /* Too Long Frame Error Interrupt Mask */
++//	XRX200_MAC_PIER_LENERR,        /* Length Mismatch Error Interrupt Mask */
++//	XRX200_MAC_PIER_FCSERR,        /* Frame Checksum Error Interrupt Mask */
++//	XRX200_MAC_PIER_TXPAUSE,       /* Transmit Pause Frame Interrupt Mask */
++//	XRX200_MAC_PIER_RXPAUSE,       /* Receive Pause Frame Interrupt Mask */
++//	XRX200_MAC_CTRL_0,             /* MAC Control Register0 */
++//	XRX200_MAC_CTRL_0_LCOL,        /* Late Collision Control */
++//	XRX200_MAC_CTRL_0_BM,          /* Burst Mode Control */
++//	XRX200_MAC_CTRL_0_APADEN,      /* Automatic VLAN Padding Enable */
++//	XRX200_MAC_CTRL_0_VPAD2EN,     /* Stacked VLAN Padding Enable */
++//	XRX200_MAC_CTRL_0_VPADEN,      /* VLAN Padding Enable */
++//	XRX200_MAC_CTRL_0_PADEN,       /* Padding Enable */
++//	XRX200_MAC_CTRL_0_FCS,         /* Transmit FCS Control */
++	XRX200_MAC_CTRL_0_FCON,        /* Flow Control Mode */
++//	XRX200_MAC_CTRL_0_FDUP,        /* Full Duplex Control */
++//	XRX200_MAC_CTRL_0_GMII,        /* GMII/MII interface mode selection */
++//	XRX200_MAC_CTRL_1,             /* MAC Control Register1 */
++//	XRX200_MAC_CTRL_1_SHORTPRE,    /* Short Preamble Control */
++//	XRX200_MAC_CTRL_1_IPG,         /* Minimum Inter Packet Gap Size */
++//	XRX200_MAC_CTRL_2,             /* MAC Control Register2 */
++//	XRX200_MAC_CTRL_2_MLEN,        /* Maximum Untagged Frame Length */
++//	XRX200_MAC_CTRL_2_LCHKL,       /* Frame Length Check Long Enable */
++//	XRX200_MAC_CTRL_2_LCHKS,       /* Frame Length Check Short Enable */
++//	XRX200_MAC_CTRL_3,             /* MAC Control Register3 */
++//	XRX200_MAC_CTRL_3_RCNT,        /* Retry Count */
++//	XRX200_MAC_CTRL_4,             /* MAC Control Register4 */
++//	XRX200_MAC_CTRL_4_LPIEN,       /* LPI Mode Enable */
++//	XRX200_MAC_CTRL_4_WAIT,        /* LPI Wait Time */
++//	XRX200_MAC_CTRL_5_PJPS,        /* MAC Control Register5 */
++//	XRX200_MAC_CTRL_5_PJPS_NOBP,   /* Prolonged Jam pattern size during no-backpressure state */
++//	XRX200_MAC_CTRL_5_PJPS_BP,     /* Prolonged Jam pattern size during backpressure state */
++//	XRX200_MAC_CTRL_6_XBUF,        /* Transmit and ReceiveBuffer Control Register */
++//	XRX200_MAC_CTRL_6_RBUF_DLY_WP, /* Delay */
++//	XRX200_MAC_CTRL_6_RBUF_INIT,   /* Receive Buffer Initialization */
++//	XRX200_MAC_CTRL_6_RBUF_BYPASS, /* Bypass the Receive Buffer */
++//	XRX200_MAC_CTRL_6_XBUF_DLY_WP, /* Delay */
++//	XRX200_MAC_CTRL_6_XBUF_INIT,   /* Initialize the Transmit Buffer */
++//	XRX200_MAC_CTRL_6_XBUF_BYPASS, /* Bypass the Transmit Buffer */
++//	XRX200_MAC_BUFST_XBUF,         /* MAC Receive and TransmitBuffer Status Register */
++//	XRX200_MAC_BUFST_RBUF_UFL,     /* Receive Buffer Underflow Indicator */
++//	XRX200_MAC_BUFST_RBUF_OFL,     /* Receive Buffer Overflow Indicator */
++//	XRX200_MAC_BUFST_XBUF_UFL,     /* Transmit Buffer Underflow Indicator */
++//	XRX200_MAC_BUFST_XBUF_OFL,     /* Transmit Buffer Overflow Indicator */
++//	XRX200_MAC_TESTEN,             /* MAC Test Enable Register */
++//	XRX200_MAC_TESTEN_JTEN,        /* Jitter Test Enable */
++//	XRX200_MAC_TESTEN_TXER,        /* Transmit Error Insertion */
++//	XRX200_MAC_TESTEN_LOOP,        /* MAC Loopback Enable */
++//	XRX200_FDMA_CTRL,              /* Ethernet Switch FetchDMA Control Register */
++//	XRX200_FDMA_CTRL_LPI_THRESHOLD, /* Low Power Idle Threshold */
++//	XRX200_FDMA_CTRL_LPI_MODE,     /* Low Power Idle Mode */
++//	XRX200_FDMA_CTRL_EGSTAG,       /* Egress Special Tag Size */
++//	XRX200_FDMA_CTRL_IGSTAG,       /* Ingress Special Tag Size */
++//	XRX200_FDMA_CTRL_EXCOL,        /* Excessive Collision Handling */
++//	XRX200_FDMA_STETYPE,           /* Special Tag EthertypeControl Register */
++//	XRX200_FDMA_STETYPE_ETYPE,     /* Special Tag Ethertype */
++//	XRX200_FDMA_VTETYPE,           /* VLAN Tag EthertypeControl Register */
++//	XRX200_FDMA_VTETYPE_ETYPE,     /* VLAN Tag Ethertype */
++//	XRX200_FDMA_STAT_0,            /* FDMA Status Register0 */
++//	XRX200_FDMA_STAT_0_FSMS,       /* FSM states status */
++//	XRX200_FDMA_IER,               /* Fetch DMA Global InterruptEnable Register */
++//	XRX200_FDMA_IER_PCKD,          /* Packet Drop Interrupt Enable */
++//	XRX200_FDMA_IER_PCKR,          /* Packet Ready Interrupt Enable */
++//	XRX200_FDMA_IER_PCKT,          /* Packet Sent Interrupt Enable */
++//	XRX200_FDMA_ISR,               /* Fetch DMA Global InterruptStatus Register */
++//	XRX200_FDMA_ISR_PCKTD,         /* Packet Drop */
++//	XRX200_FDMA_ISR_PCKR,          /* Packet is Ready for Transmission */
++//	XRX200_FDMA_ISR_PCKT,          /* Packet Sent Event */
++//	XRX200_FDMA_PCTRL,             /* Ethernet SwitchFetch DMA Port Control Register */
++//	XRX200_FDMA_PCTRL_VLANMOD,     /* VLAN Modification Enable */
++//	XRX200_FDMA_PCTRL_DSCPRM,      /* DSCP Re-marking Enable */
++//	XRX200_FDMA_PCTRL_STEN,        /* Special Tag Insertion Enable */
++//	XRX200_FDMA_PCTRL_EN,          /* FDMA Port Enable */
++//	XRX200_FDMA_PRIO,              /* Ethernet SwitchFetch DMA Port Priority Register */
++//	XRX200_FDMA_PRIO_PRIO,         /* FDMA PRIO */
++//	XRX200_FDMA_PSTAT0,            /* Ethernet SwitchFetch DMA Port Status Register 0 */
++//	XRX200_FDMA_PSTAT0_PKT_AVAIL,  /* Port Egress Packet Available */
++//	XRX200_FDMA_PSTAT0_POK,        /* Port Status OK */
++//	XRX200_FDMA_PSTAT0_PSEG,       /* Port Egress Segment Count */
++//	XRX200_FDMA_PSTAT1_HDR,        /* Ethernet SwitchFetch DMA Port Status Register 1 */
++//	XRX200_FDMA_PSTAT1_HDR_PTR,    /* Header Pointer */
++//	XRX200_FDMA_TSTAMP0,           /* Egress TimeStamp Register 0 */
++//	XRX200_FDMA_TSTAMP0_TSTL,      /* Time Stamp [15:0] */
++//	XRX200_FDMA_TSTAMP1,           /* Egress TimeStamp Register 1 */
++//	XRX200_FDMA_TSTAMP1_TSTH,      /* Time Stamp [31:16] */
++//	XRX200_SDMA_CTRL,              /* Ethernet Switch StoreDMA Control Register */
++//	XRX200_SDMA_CTRL_TSTEN,        /* Time Stamp Enable */
++//	XRX200_SDMA_FCTHR1,            /* SDMA Flow Control Threshold1 Register */
++//	XRX200_SDMA_FCTHR1_THR1,       /* Threshold 1 */
++//	XRX200_SDMA_FCTHR2,            /* SDMA Flow Control Threshold2 Register */
++//	XRX200_SDMA_FCTHR2_THR2,       /* Threshold 2 */
++//	XRX200_SDMA_FCTHR3,            /* SDMA Flow Control Threshold3 Register */
++//	XRX200_SDMA_FCTHR3_THR3,       /* Threshold 3 */
++//	XRX200_SDMA_FCTHR4,            /* SDMA Flow Control Threshold4 Register */
++//	XRX200_SDMA_FCTHR4_THR4,       /* Threshold 4 */
++//	XRX200_SDMA_FCTHR5,            /* SDMA Flow Control Threshold5 Register */
++//	XRX200_SDMA_FCTHR5_THR5,       /* Threshold 5 */
++//	XRX200_SDMA_FCTHR6,            /* SDMA Flow Control Threshold6 Register */
++//	XRX200_SDMA_FCTHR6_THR6,       /* Threshold 6 */
++//	XRX200_SDMA_FCTHR7,            /* SDMA Flow Control Threshold7 Register */
++//	XRX200_SDMA_FCTHR7_THR7,       /* Threshold 7 */
++//	XRX200_SDMA_STAT_0,            /* SDMA Status Register0 */
++//	XRX200_SDMA_STAT_0_BPS_FILL,   /* Back Pressure Status */
++//	XRX200_SDMA_STAT_0_BPS_PNT,    /* Back Pressure Status */
++//	XRX200_SDMA_STAT_0_DROP,       /* Back Pressure Status */
++//	XRX200_SDMA_STAT_1,            /* SDMA Status Register1 */
++//	XRX200_SDMA_STAT_1_FILL,       /* Buffer Filling Level */
++//	XRX200_SDMA_STAT_2,            /* SDMA Status Register2 */
++//	XRX200_SDMA_STAT_2_FSMS,       /* FSM states status */
++//	XRX200_SDMA_IER,               /* SDMA Interrupt Enable Register */
++//	XRX200_SDMA_IER_BPEX,          /* Buffer Pointers Exceeded */
++//	XRX200_SDMA_IER_BFULL,         /* Buffer Full */
++//	XRX200_SDMA_IER_FERR,          /* Frame Error */
++//	XRX200_SDMA_IER_FRX,           /* Frame Received Successfully */
++//	XRX200_SDMA_ISR,               /* SDMA Interrupt Status Register */
++//	XRX200_SDMA_ISR_BPEX,          /* Packet Descriptors Exceeded */
++//	XRX200_SDMA_ISR_BFULL,         /* Buffer Full */
++//	XRX200_SDMA_ISR_FERR,          /* Frame Error */
++//	XRX200_SDMA_ISR_FRX,           /* Frame Received Successfully */
++//	XRX200_SDMA_PCTRL,             /* Ethernet SwitchStore DMA Port Control Register */
++//	XRX200_SDMA_PCTRL_DTHR,        /* Drop Threshold Selection */
++//	XRX200_SDMA_PCTRL_PTHR,        /* Pause Threshold Selection */
++//	XRX200_SDMA_PCTRL_PHYEFWD,     /* Forward PHY Error Frames */
++//	XRX200_SDMA_PCTRL_ALGFWD,      /* Forward Alignment Error Frames */
++//	XRX200_SDMA_PCTRL_LENFWD,      /* Forward Length Errored Frames */
++//	XRX200_SDMA_PCTRL_OSFWD,       /* Forward Oversized Frames */
++//	XRX200_SDMA_PCTRL_USFWD,       /* Forward Undersized Frames */
++//	XRX200_SDMA_PCTRL_FCSIGN,      /* Ignore FCS Errors */
++//	XRX200_SDMA_PCTRL_FCSFWD,      /* Forward FCS Errored Frames */
++//	XRX200_SDMA_PCTRL_PAUFWD,      /* Pause Frame Forwarding */
++//	XRX200_SDMA_PCTRL_MFCEN,       /* Metering Flow Control Enable */
++//	XRX200_SDMA_PCTRL_FCEN,        /* Flow Control Enable */
++//	XRX200_SDMA_PCTRL_PEN,         /* Port Enable */
++//	XRX200_SDMA_PRIO,              /* Ethernet SwitchStore DMA Port Priority Register */
++//	XRX200_SDMA_PRIO_PRIO,         /* SDMA PRIO */
++//	XRX200_SDMA_PSTAT0_HDR,        /* Ethernet SwitchStore DMA Port Status Register 0 */
++//	XRX200_SDMA_PSTAT0_HDR_PTR,    /* Port Ingress Queue Header Pointer */
++//	XRX200_SDMA_PSTAT1,            /* Ethernet SwitchStore DMA Port Status Register 1 */
++//	XRX200_SDMA_PSTAT1_PPKT,       /* Port Ingress Packet Count */
++//	XRX200_SDMA_TSTAMP0,           /* Ingress TimeStamp Register 0 */
++//	XRX200_SDMA_TSTAMP0_TSTL,      /* Time Stamp [15:0] */
++//	XRX200_SDMA_TSTAMP1,           /* Ingress TimeStamp Register 1 */
++//	XRX200_SDMA_TSTAMP1_TSTH,      /* Time Stamp [31:16] */
++};
++
++
++struct xrx200sw_reg {
++	int offset;
++	int shift;
++	int size;
++	int mult;
++} xrx200sw_reg[] = {
++//	offeset	     shift    size	mult
++//	{0x0000,	 0,	16,	0x00}, /* XRX200_ETHSW_SWRES             Ethernet Switch ResetControl Register */
++//	{0x0000,	 1,	 1,	0x00}, /* XRX200_ETHSW_SWRES_R1          Hardware Reset */
++//	{0x0000,	 0,	 1,	0x00}, /* XRX200_ETHSW_SWRES_R0          Register Configuration */
++//	{0x0004,	 0,	16,	0x00}, /* XRX200_ETHSW_CLK_MAC_GAT       Ethernet Switch Clock ControlRegister  */
++//	{0x0004,	12,	 4,	0x00}, /* XRX200_ETHSW_CLK_EXP_SLEEP     Exponent to put system into sleep */
++//	{0x0004,	 8,	 4,	0x00}, /* XRX200_ETHSW_CLK_EXP_WAKE      Exponent to wake up system */
++//	{0x0004,	 7,	 1,	0x00}, /* XRX200_ETHSW_CLK_CLK2_EN       CLK2 Input for MAC */
++//	{0x0004,	 6,	 1,	0x00}, /* XRX200_ETHSW_CLK_EXT_DIV_EN    External Clock Divider Enable */
++//	{0x0004,	 5,	 1,	0x00}, /* XRX200_ETHSW_CLK_RAM_DBG_EN    Clock Gating Enable */
++//	{0x0004,	 4,	 1,	0x00}, /* XRX200_ETHSW_CLK_REG_GAT_EN    Clock Gating Enable */
++//	{0x0004,	 3,	 1,	0x00}, /* XRX200_ETHSW_CLK_GAT_EN        Clock Gating Enable */
++//	{0x0004,	 2,	 1,	0x00}, /* XRX200_ETHSW_CLK_MAC_GAT_EN    Clock Gating Enable */
++//	{0x0008,	 0,	16,	0x00}, /* XRX200_ETHSW_DBG_STEP          Ethernet Switch Debug ControlRegister */
++//	{0x0008,	12,	 4,	0x00}, /* XRX200_ETHSW_DBG_CLK_SEL       Trigger Enable */
++//	{0x0008,	11,	 1,	0x00}, /* XRX200_ETHSW_DBG_MON_EN        Monitoring Enable */
++//	{0x0008,	 9,	 2,	0x00}, /* XRX200_ETHSW_DBG_TRIG_EN       Trigger Enable */
++//	{0x0008,	 8,	 1,	0x00}, /* XRX200_ETHSW_DBG_MODE          Debug Mode */
++//	{0x0008,	 0,	 8,	0x00}, /* XRX200_ETHSW_DBG_STEP_TIME     Clock Step Size */
++//	{0x000C,	 0,	16,	0x00}, /* XRX200_ETHSW_SSB_MODE          Ethernet Switch SharedSegment Buffer Mode Register */
++//	{0x000C,	 2,	 4,	0x00}, /* XRX200_ETHSW_SSB_MODE_ADDE     Memory Address */
++//	{0x000C,	 0,	 2,	0x00}, /* XRX200_ETHSW_SSB_MODE_MODE     Memory Access Mode */
++//	{0x0010,	 0,	16,	0x00}, /* XRX200_ETHSW_SSB_ADDR          Ethernet Switch SharedSegment Buffer Address Register */
++//	{0x0010,	 0,	16,	0x00}, /* XRX200_ETHSW_SSB_ADDR_ADDE     Memory Address */
++//	{0x0014,	 0,	16,	0x00}, /* XRX200_ETHSW_SSB_DATA          Ethernet Switch SharedSegment Buffer Data Register */
++//	{0x0014,	 0,	16,	0x00}, /* XRX200_ETHSW_SSB_DATA_DATA     Data Value */
++//	{0x0018,	 0,	16,	0x00}, /* XRX200_ETHSW_CAP_0             Ethernet Switch CapabilityRegister 0 */
++//	{0x0018,	 0,	16,	0x00}, /* XRX200_ETHSW_CAP_0_SPEED       Clock frequency */
++//	{0x001C,	 0,	16,	0x00}, /* XRX200_ETHSW_CAP_1             Ethernet Switch CapabilityRegister 1 */
++//	{0x001C,	15,	 1,	0x00}, /* XRX200_ETHSW_CAP_1_GMAC        MAC operation mode */
++//	{0x001C,	 8,	 7,	0x00}, /* XRX200_ETHSW_CAP_1_QUEUE       Number of queues */
++//	{0x001C,	 4,	 4,	0x00}, /* XRX200_ETHSW_CAP_1_VPORTS      Number of virtual ports */
++//	{0x001C,	 0,	 4,	0x00}, /* XRX200_ETHSW_CAP_1_PPORTS      Number of physical ports */
++//	{0x0020,	 0,	16,	0x00}, /* XRX200_ETHSW_CAP_2             Ethernet Switch CapabilityRegister 2 */
++//	{0x0020,	 0,	11,	0x00}, /* XRX200_ETHSW_CAP_2_PACKETS     Number of packets */
++//	{0x0024,	 0,	16,	0x00}, /* XRX200_ETHSW_CAP_3             Ethernet Switch CapabilityRegister 3 */
++//	{0x0024,	 8,	 8,	0x00}, /* XRX200_ETHSW_CAP_3_METERS      Number of traffic meters */
++//	{0x0024,	 0,	 8,	0x00}, /* XRX200_ETHSW_CAP_3_SHAPERS     Number of traffic shapers */
++//	{0x0028,	 0,	16,	0x00}, /* XRX200_ETHSW_CAP_4             Ethernet Switch CapabilityRegister 4 */
++//	{0x0028,	 8,	 8,	0x00}, /* XRX200_ETHSW_CAP_4_PPPOE       PPPoE table size */
++//	{0x0028,	 0,	 8,	0x00}, /* XRX200_ETHSW_CAP_4_VLAN        Active VLAN table size */
++//	{0x002C,	 0,	16,	0x00}, /* XRX200_ETHSW_CAP_5             Ethernet Switch CapabilityRegister 5 */
++//	{0x002C,	 8,	 8,	0x00}, /* XRX200_ETHSW_CAP_5_IPPLEN      IP packet length table size */
++//	{0x002C,	 0,	 8,	0x00}, /* XRX200_ETHSW_CAP_5_PROT        Protocol table size */
++//	{0x0030,	 0,	16,	0x00}, /* XRX200_ETHSW_CAP_6             Ethernet Switch CapabilityRegister 6 */
++//	{0x0030,	 8,	 8,	0x00}, /* XRX200_ETHSW_CAP_6_MACDASA     MAC DA/SA table size */
++//	{0x0030,	 0,	 8,	0x00}, /* XRX200_ETHSW_CAP_6_APPL        Application table size */
++//	{0x0034,	 0,	16,	0x00}, /* XRX200_ETHSW_CAP_7             Ethernet Switch CapabilityRegister 7 */
++//	{0x0034,	 8,	 8,	0x00}, /* XRX200_ETHSW_CAP_7_IPDASAM     IP DA/SA MSB table size */
++//	{0x0034,	 0,	 8,	0x00}, /* XRX200_ETHSW_CAP_7_IPDASAL     IP DA/SA LSB table size */
++//	{0x0038,	 0,	16,	0x00}, /* XRX200_ETHSW_CAP_8             Ethernet Switch CapabilityRegister 8 */
++//	{0x0038,	 0,	 8,	0x00}, /* XRX200_ETHSW_CAP_8_MCAST       Multicast table size */
++//	{0x003C,	 0,	16,	0x00}, /* XRX200_ETHSW_CAP_9             Ethernet Switch CapabilityRegister 9 */
++//	{0x003C,	 0,	 8,	0x00}, /* XRX200_ETHSW_CAP_9_FLAGG       Flow Aggregation table size */
++//	{0x0040,	 0,	16,	0x00}, /* XRX200_ETHSW_CAP_10            Ethernet Switch CapabilityRegister 10 */
++//	{0x0040,	 0,	13,	0x00}, /* XRX200_ETHSW_CAP_10_MACBT      MAC bridging table size */
++//	{0x0044,	 0,	16,	0x00}, /* XRX200_ETHSW_CAP_11            Ethernet Switch CapabilityRegister 11 */
++//	{0x0044,	 0,	16,	0x00}, /* XRX200_ETHSW_CAP_11_BSIZEL     Packet buffer size (lower part, in byte) */
++//	{0x0048,	 0,	16,	0x00}, /* XRX200_ETHSW_CAP_12            Ethernet Switch CapabilityRegister 12 */
++//	{0x0048,	 0,	 3,	0x00}, /* XRX200_ETHSW_CAP_12_BSIZEH     Packet buffer size (higher part, in byte) */
++//	{0x004C,	 0,	16,	0x00}, /* XRX200_ETHSW_VERSION_REV       Ethernet Switch VersionRegister */
++//	{0x004C,	 8,	 8,	0x00}, /* XRX200_ETHSW_VERSION_MOD_ID    Module Identification */
++//	{0x004C,	 0,	 8,	0x00}, /* XRX200_ETHSW_VERSION_REV_ID    Hardware Revision Identification */
++//	{0x0050,	 0,	16,	0x00}, /* XRX200_ETHSW_IER               Interrupt Enable Register */
++//	{0x0050,	 4,	 1,	0x00}, /* XRX200_ETHSW_IER_FDMAIE        Fetch DMA Interrupt Enable */
++//	{0x0050,	 3,	 1,	0x00}, /* XRX200_ETHSW_IER_SDMAIE        Store DMA Interrupt Enable */
++//	{0x0050,	 2,	 1,	0x00}, /* XRX200_ETHSW_IER_MACIE         Ethernet MAC Interrupt Enable */
++//	{0x0050,	 1,	 1,	0x00}, /* XRX200_ETHSW_IER_PCEIE         Parser and Classification Engine Interrupt Enable */
++//	{0x0050,	 0,	 1,	0x00}, /* XRX200_ETHSW_IER_BMIE          Buffer Manager Interrupt Enable */
++//	{0x0054,	 0,	16,	0x00}, /* XRX200_ETHSW_ISR               Interrupt Status Register */
++//	{0x0054,	 4,	 1,	0x00}, /* XRX200_ETHSW_ISR_FDMAINT       Fetch DMA Interrupt */
++//	{0x0054,	 3,	 1,	0x00}, /* XRX200_ETHSW_ISR_SDMAINT       Store DMA Interrupt */
++//	{0x0054,	 2,	 1,	0x00}, /* XRX200_ETHSW_ISR_MACINT        Ethernet MAC Interrupt */
++//	{0x0054,	 1,	 1,	0x00}, /* XRX200_ETHSW_ISR_PCEINT        Parser and Classification Engine Interrupt */
++//	{0x0054,	 0,	 1,	0x00}, /* XRX200_ETHSW_ISR_BMINT         Buffer Manager Interrupt */
++//	{0x0058,	 0,	16,	0x00}, /* XRX200_ETHSW_SPARE_0           Ethernet Switch SpareCells 0 */
++//	{0x0058,	 0,	16,	0x00}, /* XRX200_ETHSW_SPARE_0_SPARE     SPARE0  */
++//	{0x005C,	 0,	16,	0x00}, /* XRX200_ETHSW_SPARE_1           Ethernet Switch SpareCells 1 */
++//	{0x005C,	 0,	16,	0x00}, /* XRX200_ETHSW_SPARE_1_SPARE     SPARE1  */
++//	{0x0060,	 0,	16,	0x00}, /* XRX200_ETHSW_SPARE_2           Ethernet Switch SpareCells 2 */
++//	{0x0060,	 0,	16,	0x00}, /* XRX200_ETHSW_SPARE_2_SPARE     SPARE2  */
++//	{0x0064,	 0,	16,	0x00}, /* XRX200_ETHSW_SPARE_3           Ethernet Switch SpareCells 3 */
++//	{0x0064,	 0,	16,	0x00}, /* XRX200_ETHSW_SPARE_3_SPARE     SPARE3  */
++//	{0x0068,	 0,	16,	0x00}, /* XRX200_ETHSW_SPARE_4           Ethernet Switch SpareCells 4 */
++//	{0x0068,	 0,	16,	0x00}, /* XRX200_ETHSW_SPARE_4_SPARE     SPARE4  */
++//	{0x006C,	 0,	16,	0x00}, /* XRX200_ETHSW_SPARE_5           Ethernet Switch SpareCells 5 */
++//	{0x006C,	 0,	16,	0x00}, /* XRX200_ETHSW_SPARE_5_SPARE     SPARE5  */
++//	{0x0070,	 0,	16,	0x00}, /* XRX200_ETHSW_SPARE_6           Ethernet Switch SpareCells 6 */
++//	{0x0070,	 0,	16,	0x00}, /* XRX200_ETHSW_SPARE_6_SPARE     SPARE6  */
++//	{0x0074,	 0,	16,	0x00}, /* XRX200_ETHSW_SPARE_7           Ethernet Switch SpareCells 7 */
++//	{0x0074,	 0,	16,	0x00}, /* XRX200_ETHSW_SPARE_7_SPARE     SPARE7  */
++//	{0x0078,	 0,	16,	0x00}, /* XRX200_ETHSW_SPARE_8           Ethernet Switch SpareCells 8 */
++//	{0x0078,	 0,	16,	0x00}, /* XRX200_ETHSW_SPARE_8_SPARE     SPARE8  */
++//	{0x007C,	 0,	16,	0x00}, /* XRX200_ETHSW_SPARE_9           Ethernet Switch SpareCells 9 */
++//	{0x007C,	 0,	16,	0x00}, /* XRX200_ETHSW_SPARE_9_SPARE     SPARE9  */
++//	{0x0080,	 0,	16,	0x00}, /* XRX200_ETHSW_SPARE_10          Ethernet Switch SpareCells 10 */
++//	{0x0080,	 0,	16,	0x00}, /* XRX200_ETHSW_SPARE_10_SPARE    SPARE10  */
++//	{0x0084,	 0,	16,	0x00}, /* XRX200_ETHSW_SPARE_11          Ethernet Switch SpareCells 11 */
++//	{0x0084,	 0,	16,	0x00}, /* XRX200_ETHSW_SPARE_11_SPARE    SPARE11  */
++//	{0x0088,	 0,	16,	0x00}, /* XRX200_ETHSW_SPARE_12          Ethernet Switch SpareCells 12 */
++//	{0x0088,	 0,	16,	0x00}, /* XRX200_ETHSW_SPARE_12_SPARE    SPARE12  */
++//	{0x008C,	 0,	16,	0x00}, /* XRX200_ETHSW_SPARE_13          Ethernet Switch SpareCells 13 */
++//	{0x008C,	 0,	16,	0x00}, /* XRX200_ETHSW_SPARE_13_SPARE    SPARE13  */
++//	{0x0090,	 0,	16,	0x00}, /* XRX200_ETHSW_SPARE_14          Ethernet Switch SpareCells 14 */
++//	{0x0090,	 0,	16,	0x00}, /* XRX200_ETHSW_SPARE_14_SPARE    SPARE14  */
++//	{0x0094,	 0,	16,	0x00}, /* XRX200_ETHSW_SPARE_15          Ethernet Switch SpareCells 15 */
++//	{0x0094,	 0,	16,	0x00}, /* XRX200_ETHSW_SPARE_15_SPARE    SPARE15  */
++//	{0x0100,	 0,	16,	0x00}, /* XRX200_BM_RAM_VAL_3            RAM Value Register 3 */
++//	{0x0100,	 0,	16,	0x00}, /* XRX200_BM_RAM_VAL_3_VAL3       Data value [15:0] */
++//	{0x0104,	 0,	16,	0x00}, /* XRX200_BM_RAM_VAL_2            RAM Value Register 2 */
++//	{0x0104,	 0,	16,	0x00}, /* XRX200_BM_RAM_VAL_2_VAL2       Data value [15:0] */
++//	{0x0108,	 0,	16,	0x00}, /* XRX200_BM_RAM_VAL_1            RAM Value Register 1 */
++//	{0x0108,	 0,	16,	0x00}, /* XRX200_BM_RAM_VAL_1_VAL1       Data value [15:0] */
++//	{0x010C,	 0,	16,	0x00}, /* XRX200_BM_RAM_VAL_0            RAM Value Register 0 */
++//	{0x010C,	 0,	16,	0x00}, /* XRX200_BM_RAM_VAL_0_VAL0       Data value [15:0] */
++//	{0x0110,	 0,	16,	0x00}, /* XRX200_BM_RAM_ADDR             RAM Address Register */
++//	{0x0110,	 0,	11,	0x00}, /* XRX200_BM_RAM_ADDR_ADDR        RAM Address */
++//	{0x0114,	 0,	16,	0x00}, /* XRX200_BM_RAM_CTRL             RAM Access Control Register */
++//	{0x0114,	15,	 1,	0x00}, /* XRX200_BM_RAM_CTRL_BAS         Access Busy/Access Start */
++//	{0x0114,	 5,	 1,	0x00}, /* XRX200_BM_RAM_CTRL_OPMOD       Lookup Table Access Operation Mode */
++//	{0x0114,	 0,	 5,	0x00}, /* XRX200_BM_RAM_CTRL_ADDR        Address for RAM selection */
++//	{0x0118,	 0,	16,	0x00}, /* XRX200_BM_FSQM_GCTRL           Free Segment Queue ManagerGlobal Control Register */
++//	{0x0118,	 0,	10,	0x00}, /* XRX200_BM_FSQM_GCTRL_SEGNUM    Maximum Segment Number */
++//	{0x011C,	 0,	16,	0x00}, /* XRX200_BM_CONS_SEG             Number of Consumed SegmentsRegister */
++//	{0x011C,	 0,	10,	0x00}, /* XRX200_BM_CONS_SEG_FSEG        Number of Consumed Segments */
++//	{0x0120,	 0,	16,	0x00}, /* XRX200_BM_CONS_PKT             Number of Consumed PacketPointers Register */
++//	{0x0120,	 0,	11,	0x00}, /* XRX200_BM_CONS_PKT_FQP         Number of Consumed Packet Pointers */
++//	{0x0124,	 0,	16,	0x00}, /* XRX200_BM_GCTRL_F              Buffer Manager Global ControlRegister 0 */
++//	{0x0124,	13,	 1,	0x00}, /* XRX200_BM_GCTRL_BM_STA         Buffer Manager Initialization Status Bit */
++//	{0x0124,	12,	 1,	0x00}, /* XRX200_BM_GCTRL_SAT            RMON Counter Update Mode */
++//	{0x0124,	11,	 1,	0x00}, /* XRX200_BM_GCTRL_FR_RBC         Freeze RMON RX Bad Byte 64 Bit Counter */
++//	{0x0124,	10,	 1,	0x00}, /* XRX200_BM_GCTRL_FR_RGC         Freeze RMON RX Good Byte 64 Bit Counter */
++//	{0x0124,	 9,	 1,	0x00}, /* XRX200_BM_GCTRL_FR_TGC         Freeze RMON TX Good Byte 64 Bit Counter */
++//	{0x0124,	 8,	 1,	0x00}, /* XRX200_BM_GCTRL_I_FIN          RAM initialization finished */
++//	{0x0124,	 7,	 1,	0x00}, /* XRX200_BM_GCTRL_CX_INI         PQM Context RAM initialization */
++//	{0x0124,	 6,	 1,	0x00}, /* XRX200_BM_GCTRL_FP_INI         FPQM RAM initialization */
++//	{0x0124,	 5,	 1,	0x00}, /* XRX200_BM_GCTRL_FS_INI         FSQM RAM initialization */
++//	{0x0124,	 4,	 1,	0x00}, /* XRX200_BM_GCTRL_R_SRES         Software Reset for RMON */
++//	{0x0124,	 3,	 1,	0x00}, /* XRX200_BM_GCTRL_S_SRES         Software Reset for Scheduler */
++//	{0x0124,	 2,	 1,	0x00}, /* XRX200_BM_GCTRL_A_SRES         Software Reset for AVG */
++//	{0x0124,	 1,	 1,	0x00}, /* XRX200_BM_GCTRL_P_SRES         Software Reset for PQM */
++//	{0x0124,	 0,	 1,	0x00}, /* XRX200_BM_GCTRL_F_SRES         Software Reset for FSQM */
++//	{0x0128,	 0,	16,	0x00}, /* XRX200_BM_QUEUE_GCTRL          Queue Manager GlobalControl Register 0 */
++//	{0x0128,	10,	 1,	0x00}, /* XRX200_BM_QUEUE_GCTRL_GL_MOD   WRED Mode Signal */
++//	{0x0128,	 7,	 3,	0x00}, /* XRX200_BM_QUEUE_GCTRL_AQUI     Average Queue Update Interval */
++//	{0x0128,	 3,	 4,	0x00}, /* XRX200_BM_QUEUE_GCTRL_AQWF     Average Queue Weight Factor */
++//	{0x0128,	 2,	 1,	0x00}, /* XRX200_BM_QUEUE_GCTRL_QAVGEN   Queue Average Calculation Enable */
++//	{0x0128,	 0,	 2,	0x00}, /* XRX200_BM_QUEUE_GCTRL_DPROB    Drop Probability Profile */
++//	{0x012C,	 0,	16,	0x00}, /* XRX200_BM_WRED_RTH_0           WRED Red Threshold Register0 */
++//	{0x012C,	 0,	10,	0x00}, /* XRX200_BM_WRED_RTH_0_MINTH     Minimum Threshold */
++//	{0x0130,	 0,	16,	0x00}, /* XRX200_BM_WRED_RTH_1           WRED Red Threshold Register1 */
++//	{0x0130,	 0,	10,	0x00}, /* XRX200_BM_WRED_RTH_1_MAXTH     Maximum Threshold */
++//	{0x0134,	 0,	16,	0x00}, /* XRX200_BM_WRED_YTH_0           WRED Yellow ThresholdRegister 0 */
++//	{0x0134,	 0,	10,	0x00}, /* XRX200_BM_WRED_YTH_0_MINTH     Minimum Threshold */
++//	{0x0138,	 0,	16,	0x00}, /* XRX200_BM_WRED_YTH_1           WRED Yellow ThresholdRegister 1 */
++//	{0x0138,	 0,	10,	0x00}, /* XRX200_BM_WRED_YTH_1_MAXTH     Maximum Threshold */
++//	{0x013C,	 0,	16,	0x00}, /* XRX200_BM_WRED_GTH_0           WRED Green ThresholdRegister 0 */
++//	{0x013C,	 0,	10,	0x00}, /* XRX200_BM_WRED_GTH_0_MINTH     Minimum Threshold */
++//	{0x0140,	 0,	16,	0x00}, /* XRX200_BM_WRED_GTH_1           WRED Green ThresholdRegister 1 */
++//	{0x0140,	 0,	10,	0x00}, /* XRX200_BM_WRED_GTH_1_MAXTH     Maximum Threshold */
++//	{0x0144,	 0,	16,	0x00}, /* XRX200_BM_DROP_GTH_0_THR       Drop Threshold ConfigurationRegister 0 */
++//	{0x0144,	 0,	11,	0x00}, /* XRX200_BM_DROP_GTH_0_THR_FQ    Threshold for frames marked red */
++//	{0x0148,	 0,	16,	0x00}, /* XRX200_BM_DROP_GTH_1_THY       Drop Threshold ConfigurationRegister 1 */
++//	{0x0148,	 0,	11,	0x00}, /* XRX200_BM_DROP_GTH_1_THY_FQ    Threshold for frames marked yellow */
++//	{0x014C,	 0,	16,	0x00}, /* XRX200_BM_DROP_GTH_2_THG       Drop Threshold ConfigurationRegister 2 */
++//	{0x014C,	 0,	11,	0x00}, /* XRX200_BM_DROP_GTH_2_THG_FQ    Threshold for frames marked green */
++//	{0x0150,	 0,	16,	0x00}, /* XRX200_BM_IER                  Buffer Manager Global InterruptEnable Register */
++//	{0x0150,	 7,	 1,	0x00}, /* XRX200_BM_IER_CNT4             Counter Group 4 (RMON-CLASSIFICATION) Interrupt Enable */
++//	{0x0150,	 6,	 1,	0x00}, /* XRX200_BM_IER_CNT3             Counter Group 3 (RMON-PQM) Interrupt Enable */
++//	{0x0150,	 5,	 1,	0x00}, /* XRX200_BM_IER_CNT2             Counter Group 2 (RMON-SCHEDULER) Interrupt Enable */
++//	{0x0150,	 4,	 1,	0x00}, /* XRX200_BM_IER_CNT1             Counter Group 1 (RMON-QFETCH) Interrupt Enable */
++//	{0x0150,	 3,	 1,	0x00}, /* XRX200_BM_IER_CNT0             Counter Group 0 (RMON-QSTOR) Interrupt Enable */
++//	{0x0150,	 2,	 1,	0x00}, /* XRX200_BM_IER_DEQ              PQM dequeue Interrupt Enable */
++//	{0x0150,	 1,	 1,	0x00}, /* XRX200_BM_IER_ENQ              PQM Enqueue Interrupt Enable */
++//	{0x0150,	 0,	 1,	0x00}, /* XRX200_BM_IER_FSQM             Buffer Empty Interrupt Enable */
++//	{0x0154,	 0,	16,	0x00}, /* XRX200_BM_ISR                  Buffer Manager Global InterruptStatus Register */
++//	{0x0154,	 7,	 1,	0x00}, /* XRX200_BM_ISR_CNT4             Counter Group 4 Interrupt */
++//	{0x0154,	 6,	 1,	0x00}, /* XRX200_BM_ISR_CNT3             Counter Group 3 Interrupt */
++//	{0x0154,	 5,	 1,	0x00}, /* XRX200_BM_ISR_CNT2             Counter Group 2 Interrupt */
++//	{0x0154,	 4,	 1,	0x00}, /* XRX200_BM_ISR_CNT1             Counter Group 1 Interrupt */
++//	{0x0154,	 3,	 1,	0x00}, /* XRX200_BM_ISR_CNT0             Counter Group 0 Interrupt */
++//	{0x0154,	 2,	 1,	0x00}, /* XRX200_BM_ISR_DEQ              PQM dequeue Interrupt Enable */
++//	{0x0154,	 1,	 1,	0x00}, /* XRX200_BM_ISR_ENQ              PQM Enqueue Interrupt */
++//	{0x0154,	 0,	 1,	0x00}, /* XRX200_BM_ISR_FSQM             Buffer Empty Interrupt */
++//	{0x0158,	 0,	16,	0x00}, /* XRX200_BM_CISEL                Buffer Manager RMON CounterInterrupt Select Register */
++//	{0x0158,	 0,	 3,	0x00}, /* XRX200_BM_CISEL_PORT           Port Number */
++//	{0x015C,	 0,	16,	0x00}, /* XRX200_BM_DEBUG_CTRL_DBG       Debug Control Register */
++//	{0x015C,	 0,	 8,	0x00}, /* XRX200_BM_DEBUG_CTRL_DBG_SEL   Select Signal for Debug Multiplexer */
++//	{0x0160,	 0,	16,	0x00}, /* XRX200_BM_DEBUG_VAL_DBG        Debug Value Register */
++//	{0x0160,	 0,	16,	0x00}, /* XRX200_BM_DEBUG_VAL_DBG_DAT    Debug Data Value */
++//	{0x0200,	 0,	16,	0x08}, /* XRX200_BM_PCFG                 Buffer Manager PortConfiguration Register */
++//	{0x0200,	 0,	 1,	0x08}, /* XRX200_BM_PCFG_CNTEN           RMON Counter Enable */
++//	{0x0204,	 0,	16,	0x08}, /* XRX200_BM_RMON_CTRL_RAM1       Buffer ManagerRMON Control Register */
++//	{0x0204,	 1,	 1,	0x08}, /* XRX200_BM_RMON_CTRL_RAM2_RES   Software Reset for RMON RAM2 */
++//	{0x0204,	 0,	 1,	0x08}, /* XRX200_BM_RMON_CTRL_RAM1_RES   Software Reset for RMON RAM1 */
++//	{0x0400,	 0,	16,	0x08}, /* XRX200_PQM_DP                  Packet Queue ManagerDrop Probability Register */
++//	{0x0400,	 0,	 2,	0x08}, /* XRX200_PQM_DP_DPROB            Drop Probability Profile */
++//	{0x0404,	 0,	16,	0x08}, /* XRX200_PQM_RS                  Packet Queue ManagerRate Shaper Assignment Register */
++//	{0x0404,	15,	 1,	0x08}, /* XRX200_PQM_RS_EN2              Rate Shaper 2 Enable */
++//	{0x0404,	 8,	 6,	0x08}, /* XRX200_PQM_RS_RS2              Rate Shaper 2 */
++//	{0x0404,	 7,	 1,	0x08}, /* XRX200_PQM_RS_EN1              Rate Shaper 1 Enable */
++//	{0x0404,	 0,	 6,	0x08}, /* XRX200_PQM_RS_RS1              Rate Shaper 1 */
++//	{0x0500,	 0,	16,	0x14}, /* XRX200_RS_CTRL                 Rate Shaper ControlRegister */
++//	{0x0500,	 0,	 1,	0x14}, /* XRX200_RS_CTRL_RSEN            Rate Shaper Enable */
++//	{0x0504,	 0,	16,	0x14}, /* XRX200_RS_CBS                  Rate Shaper CommittedBurst Size Register */
++//	{0x0504,	 0,	10,	0x14}, /* XRX200_RS_CBS_CBS              Committed Burst Size */
++//	{0x0508,	 0,	16,	0x14}, /* XRX200_RS_IBS                  Rate Shaper InstantaneousBurst Size Register */
++//	{0x0508,	 0,	 2,	0x14}, /* XRX200_RS_IBS_IBS              Instantaneous Burst Size */
++//	{0x050C,	 0,	16,	0x14}, /* XRX200_RS_CIR_EXP              Rate Shaper RateExponent Register */
++//	{0x050C,	 0,	 4,	0x14}, /* XRX200_RS_CIR_EXP_EXP          Exponent */
++//	{0x0510,	 0,	16,	0x14}, /* XRX200_RS_CIR_MANT             Rate Shaper RateMantissa Register */
++//	{0x0510,	 0,	10,	0x14}, /* XRX200_RS_CIR_MANT_MANT        Mantissa */
++	{0x1100,	 0,	16,	0x00}, /* XRX200_PCE_TBL_KEY_7           Table Key Data 7 */
++//	{0x1100,	 0,	16,	0x00}, /* XRX200_PCE_TBL_KEY_7_KEY7      Key Value[15:0] */
++	{0x1104,	 0,	16,	0x00}, /* XRX200_PCE_TBL_KEY_6           Table Key Data 6 */
++//	{0x1104,	 0,	16,	0x00}, /* XRX200_PCE_TBL_KEY_6_KEY6      Key Value[15:0] */
++	{0x1108,	 0,	16,	0x00}, /* XRX200_PCE_TBL_KEY_5           Table Key Data 5 */
++//	{0x1108,	 0,	16,	0x00}, /* XRX200_PCE_TBL_KEY_5_KEY5      Key Value[15:0] */
++	{0x110C,	 0,	16,	0x00}, /* XRX200_PCE_TBL_KEY_4           Table Key Data 4 */
++//	{0x110C,	 0,	16,	0x00}, /* XRX200_PCE_TBL_KEY_4_KEY4      Key Value[15:0] */
++	{0x1110,	 0,	16,	0x00}, /* XRX200_PCE_TBL_KEY_3           Table Key Data 3 */
++//	{0x1110,	 0,	16,	0x00}, /* XRX200_PCE_TBL_KEY_3_KEY3      Key Value[15:0] */
++	{0x1114,	 0,	16,	0x00}, /* XRX200_PCE_TBL_KEY_2           Table Key Data 2 */
++//	{0x1114,	 0,	16,	0x00}, /* XRX200_PCE_TBL_KEY_2_KEY2      Key Value[15:0] */
++	{0x1118,	 0,	16,	0x00}, /* XRX200_PCE_TBL_KEY_1           Table Key Data 1 */
++//	{0x1118,	 0,	16,	0x00}, /* XRX200_PCE_TBL_KEY_1_KEY1      Key Value[31:16] */
++	{0x111C,	 0,	16,	0x00}, /* XRX200_PCE_TBL_KEY_0           Table Key Data 0 */
++//	{0x111C,	 0,	16,	0x00}, /* XRX200_PCE_TBL_KEY_0_KEY0      Key Value[15:0] */
++	{0x1120,	 0,	16,	0x00}, /* XRX200_PCE_TBL_MASK_0          Table Mask Write Register0 */
++//	{0x1120,	 0,	16,	0x00}, /* XRX200_PCE_TBL_MASK_0_MASK0    Mask Pattern [15:0] */
++	{0x1124,	 0,	16,	0x00}, /* XRX200_PCE_TBL_VAL_4           Table Value Register4 */
++//	{0x1124,	 0,	16,	0x00}, /* XRX200_PCE_TBL_VAL_4_VAL4      Data value [15:0] */
++	{0x1128,	 0,	16,	0x00}, /* XRX200_PCE_TBL_VAL_3           Table Value Register3 */
++//	{0x1128,	 0,	16,	0x00}, /* XRX200_PCE_TBL_VAL_3_VAL3      Data value [15:0] */
++	{0x112C,	 0,	16,	0x00}, /* XRX200_PCE_TBL_VAL_2           Table Value Register2 */
++//	{0x112C,	 0,	16,	0x00}, /* XRX200_PCE_TBL_VAL_2_VAL2      Data value [15:0] */
++	{0x1130,	 0,	16,	0x00}, /* XRX200_PCE_TBL_VAL_1           Table Value Register1 */
++//	{0x1130,	 0,	16,	0x00}, /* XRX200_PCE_TBL_VAL_1_VAL1      Data value [15:0] */
++	{0x1134,	 0,	16,	0x00}, /* XRX200_PCE_TBL_VAL_0           Table Value Register0 */
++//	{0x1134,	 0,	16,	0x00}, /* XRX200_PCE_TBL_VAL_0_VAL0      Data value [15:0] */
++//	{0x1138,	 0,	16,	0x00}, /* XRX200_PCE_TBL_ADDR            Table Entry AddressRegister */
++	{0x1138,	 0,	11,	0x00}, /* XRX200_PCE_TBL_ADDR_ADDR       Table Address */
++//	{0x113C,	 0,	16,	0x00}, /* XRX200_PCE_TBL_CTRL            Table Access ControlRegister */
++	{0x113C,	15,	 1,	0x00}, /* XRX200_PCE_TBL_CTRL_BAS        Access Busy/Access Start */
++	{0x113C,	13,	 1,	0x00}, /* XRX200_PCE_TBL_CTRL_TYPE       Lookup Entry Type */
++	{0x113C,	12,	 1,	0x00}, /* XRX200_PCE_TBL_CTRL_VLD        Lookup Entry Valid */
++	{0x113C,	 7,	 4,	0x00}, /* XRX200_PCE_TBL_CTRL_GMAP       Group Map */
++	{0x113C,	 5,	 2,	0x00}, /* XRX200_PCE_TBL_CTRL_OPMOD      Lookup Table Access Operation Mode */
++	{0x113C,	 0,	 5,	0x00}, /* XRX200_PCE_TBL_CTRL_ADDR       Lookup Table Address */
++//	{0x1140,	 0,	16,	0x00}, /* XRX200_PCE_TBL_STAT            Table General StatusRegister */
++//	{0x1140,	 2,	 1,	0x00}, /* XRX200_PCE_TBL_STAT_TBUSY      Table Access Busy */
++//	{0x1140,	 1,	 1,	0x00}, /* XRX200_PCE_TBL_STAT_TEMPT      Table Empty */
++//	{0x1140,	 0,	 1,	0x00}, /* XRX200_PCE_TBL_STAT_TFUL       Table Full */
++//	{0x1144,	 0,	16,	0x00}, /* XRX200_PCE_AGE_0               Aging Counter ConfigurationRegister 0 */
++//	{0x1144,	 0,	 4,	0x00}, /* XRX200_PCE_AGE_0_EXP           Aging Counter Exponent Value  */
++//	{0x1148,	 0,	16,	0x00}, /* XRX200_PCE_AGE_1               Aging Counter ConfigurationRegister 1 */
++//	{0x1148,	 0,	16,	0x00}, /* XRX200_PCE_AGE_1_MANT          Aging Counter Mantissa Value  */
++//	{0x114C,	 0,	16,	0x00}, /* XRX200_PCE_PMAP_1              Port Map Register 1 */
++//	{0x114C,	 0,	16,	0x00}, /* XRX200_PCE_PMAP_1_MPMAP        Monitoring Port Map */
++//	{0x1150,	 0,	16,	0x00}, /* XRX200_PCE_PMAP_2              Port Map Register 2 */
++//	{0x1150,	 0,	16,	0x00}, /* XRX200_PCE_PMAP_2_DMCPMAP      Default Multicast Port Map */
++//	{0x1154,	 0,	16,	0x00}, /* XRX200_PCE_PMAP_3              Port Map Register 3 */
++//	{0x1154,	 0,	16,	0x00}, /* XRX200_PCE_PMAP_3_UUCMAP       Default Unknown Unicast Port Map */
++//	{0x1158,	 0,	16,	0x00}, /* XRX200_PCE_GCTRL_0             PCE Global Control Register0 */
++//	{0x1158,	15,	 1,	0x00}, /* XRX200_PCE_GCTRL_0_IGMP        IGMP Mode Selection */
++	{0x1158,	14,	 1,	0x00}, /* XRX200_PCE_GCTRL_0_VLAN        VLAN-aware Switching */
++//	{0x1158,	13,	 1,	0x00}, /* XRX200_PCE_GCTRL_0_NOPM        No Port Map Forwarding */
++//	{0x1158,	12,	 1,	0x00}, /* XRX200_PCE_GCTRL_0_SCONUC      Unknown Unicast Storm Control */
++//	{0x1158,	11,	 1,	0x00}, /* XRX200_PCE_GCTRL_0_SCONMC      Multicast Storm Control */
++//	{0x1158,	10,	 1,	0x00}, /* XRX200_PCE_GCTRL_0_SCONBC      Broadcast Storm Control */
++//	{0x1158,	 8,	 2,	0x00}, /* XRX200_PCE_GCTRL_0_SCONMOD     Storm Control Mode */
++//	{0x1158,	 4,	 4,	0x00}, /* XRX200_PCE_GCTRL_0_SCONMET     Storm Control Metering Instance */
++//	{0x1158,	 3,	 1,	0x00}, /* XRX200_PCE_GCTRL_0_MC_VALID    Access Request */
++//	{0x1158,	 2,	 1,	0x00}, /* XRX200_PCE_GCTRL_0_PLCKMOD     Port Lock Mode */
++//	{0x1158,	 1,	 1,	0x00}, /* XRX200_PCE_GCTRL_0_PLIMMOD     MAC Address Learning Limitation Mode */
++//	{0x1158,	 0,	 1,	0x00}, /* XRX200_PCE_GCTRL_0_MTFL        MAC Table Flushing */
++//	{0x115C,	 0,	16,	0x00}, /* XRX200_PCE_GCTRL_1             PCE Global Control Register1 */
++//	{0x115C,	 1,	 1,	0x00}, /* XRX200_PCE_GCTRL_1_PCE_DIS     PCE Disable after currently processed packet */
++//	{0x115C,	 0,	 1,	0x00}, /* XRX200_PCE_GCTRL_1_LRNMOD      MAC Address Learning Mode */
++//	{0x1160,	 0,	16,	0x00}, /* XRX200_PCE_TCM_GLOB_CTRL       Three-color MarkerGlobal Control Register */
++//	{0x1160,	 6,	 3,	0x00}, /* XRX200_PCE_TCM_GLOB_CTRL_DPRED Re-marking Drop Precedence Red Encoding */
++//	{0x1160,	 3,	 3,	0x00}, /* XRX200_PCE_TCM_GLOB_CTRL_DPYEL Re-marking Drop Precedence Yellow Encoding */
++//	{0x1160,	 0,	 3,	0x00}, /* XRX200_PCE_TCM_GLOB_CTRL_DPGRN Re-marking Drop Precedence Green Encoding */
++//	{0x1164,	 0,	16,	0x00}, /* XRX200_PCE_IGMP_CTRL           IGMP Control Register */
++//	{0x1164,	15,	 1,	0x00}, /* XRX200_PCE_IGMP_CTRL_FAGEEN    Force Aging of Table Entries Enable */
++//	{0x1164,	14,	 1,	0x00}, /* XRX200_PCE_IGMP_CTRL_FLEAVE    Fast Leave Enable */
++//	{0x1164,	13,	 1,	0x00}, /* XRX200_PCE_IGMP_CTRL_DMRTEN    Default Maximum Response Time Enable */
++//	{0x1164,	12,	 1,	0x00}, /* XRX200_PCE_IGMP_CTRL_JASUP     Join Aggregation Suppression Enable */
++//	{0x1164,	11,	 1,	0x00}, /* XRX200_PCE_IGMP_CTRL_REPSUP    Report Suppression Enable */
++//	{0x1164,	10,	 1,	0x00}, /* XRX200_PCE_IGMP_CTRL_SRPEN     Snooping of Router Port Enable */
++//	{0x1164,	 8,	 2,	0x00}, /* XRX200_PCE_IGMP_CTRL_ROB       Robustness Variable */
++//	{0x1164,	 0,	 8,	0x00}, /* XRX200_PCE_IGMP_CTRL_DMRT      IGMP Default Maximum Response Time */
++//	{0x1168,	 0,	16,	0x00}, /* XRX200_PCE_IGMP_DRPM           IGMP Default RouterPort Map Register */
++//	{0x1168,	 0,	16,	0x00}, /* XRX200_PCE_IGMP_DRPM_DRPM      IGMP Default Router Port Map */
++//	{0x116C,	 0,	16,	0x00}, /* XRX200_PCE_IGMP_AGE_0          IGMP Aging Register0 */
++//	{0x116C,	 3,	 8,	0x00}, /* XRX200_PCE_IGMP_AGE_0_MANT     IGMP Group Aging Time Mantissa */
++//	{0x116C,	 0,	 3,	0x00}, /* XRX200_PCE_IGMP_AGE_0_EXP      IGMP Group Aging Time Exponent */
++//	{0x1170,	 0,	16,	0x00}, /* XRX200_PCE_IGMP_AGE_1          IGMP Aging Register1 */
++//	{0x1170,	 0,	12,	0x00}, /* XRX200_PCE_IGMP_AGE_1_MANT     IGMP Router Port Aging Time Mantissa */
++//	{0x1174,	 0,	16,	0x00}, /* XRX200_PCE_IGMP_STAT           IGMP Status Register */
++//	{0x1174,	 0,	16,	0x00}, /* XRX200_PCE_IGMP_STAT_IGPM      IGMP Port Map */
++//	{0x1178,	 0,	16,	0x00}, /* XRX200_WOL_GLB_CTRL            Wake-on-LAN ControlRegister */
++//	{0x1178,	 0,	 1,	0x00}, /* XRX200_WOL_GLB_CTRL_PASSEN     WoL Password Enable */
++//	{0x117C,	 0,	16,	0x00}, /* XRX200_WOL_DA_0                Wake-on-LAN DestinationAddress Register 0 */
++//	{0x117C,	 0,	16,	0x00}, /* XRX200_WOL_DA_0_DA0            WoL Destination Address [15:0] */
++//	{0x1180,	 0,	16,	0x00}, /* XRX200_WOL_DA_1                Wake-on-LAN DestinationAddress Register 1 */
++//	{0x1180,	 0,	16,	0x00}, /* XRX200_WOL_DA_1_DA1            WoL Destination Address [31:16] */
++//	{0x1184,	 0,	16,	0x00}, /* XRX200_WOL_DA_2                Wake-on-LAN DestinationAddress Register 2 */
++//	{0x1184,	 0,	16,	0x00}, /* XRX200_WOL_DA_2_DA2            WoL Destination Address [47:32] */
++//	{0x1188,	 0,	16,	0x00}, /* XRX200_WOL_PW_0                Wake-on-LAN Password Register0 */
++//	{0x1188,	 0,	16,	0x00}, /* XRX200_WOL_PW_0_PW0            WoL Password [15:0] */
++//	{0x118C,	 0,	16,	0x00}, /* XRX200_WOL_PW_1                Wake-on-LAN Password Register1 */
++//	{0x118C,	 0,	16,	0x00}, /* XRX200_WOL_PW_1_PW1            WoL Password [31:16] */
++//	{0x1190,	 0,	16,	0x00}, /* XRX200_WOL_PW_2                Wake-on-LAN Password Register2 */
++//	{0x1190,	 0,	16,	0x00}, /* XRX200_WOL_PW_2_PW2            WoL Password [47:32] */
++//	{0x1194,	 0,	16,	0x00}, /* XRX200_PCE_IER_0_PINT          Parser and ClassificationEngine Global Interrupt Enable Register 0 */
++//	{0x1194,	15,	 1,	0x00}, /* XRX200_PCE_IER_0_PINT_15       Port Interrupt Enable */
++//	{0x1194,	14,	 1,	0x00}, /* XRX200_PCE_IER_0_PINT_14       Port Interrupt Enable */
++//	{0x1194,	13,	 1,	0x00}, /* XRX200_PCE_IER_0_PINT_13       Port Interrupt Enable */
++//	{0x1194,	12,	 1,	0x00}, /* XRX200_PCE_IER_0_PINT_12       Port Interrupt Enable */
++//	{0x1194,	11,	 1,	0x00}, /* XRX200_PCE_IER_0_PINT_11       Port Interrupt Enable */
++//	{0x1194,	10,	 1,	0x00}, /* XRX200_PCE_IER_0_PINT_10       Port Interrupt Enable */
++//	{0x1194,	 9,	 1,	0x00}, /* XRX200_PCE_IER_0_PINT_9        Port Interrupt Enable */
++//	{0x1194,	 8,	 1,	0x00}, /* XRX200_PCE_IER_0_PINT_8        Port Interrupt Enable */
++//	{0x1194,	 7,	 1,	0x00}, /* XRX200_PCE_IER_0_PINT_7        Port Interrupt Enable */
++//	{0x1194,	 6,	 1,	0x00}, /* XRX200_PCE_IER_0_PINT_6        Port Interrupt Enable */
++//	{0x1194,	 5,	 1,	0x00}, /* XRX200_PCE_IER_0_PINT_5        Port Interrupt Enable */
++//	{0x1194,	 4,	 1,	0x00}, /* XRX200_PCE_IER_0_PINT_4        Port Interrupt Enable */
++//	{0x1194,	 3,	 1,	0x00}, /* XRX200_PCE_IER_0_PINT_3        Port Interrupt Enable */
++//	{0x1194,	 2,	 1,	0x00}, /* XRX200_PCE_IER_0_PINT_2        Port Interrupt Enable */
++//	{0x1194,	 1,	 1,	0x00}, /* XRX200_PCE_IER_0_PINT_1        Port Interrupt Enable */
++//	{0x1194,	 0,	 1,	0x00}, /* XRX200_PCE_IER_0_PINT_0        Port Interrupt Enable */
++//	{0x1198,	 0,	16,	0x00}, /* XRX200_PCE_IER_1               Parser and ClassificationEngine Global Interrupt Enable Register 1 */
++//	{0x1198,	 6,	 1,	0x00}, /* XRX200_PCE_IER_1_FLOWINT       Traffic Flow Table Interrupt Rule matched Interrupt Enable */
++//	{0x1198,	 5,	 1,	0x00}, /* XRX200_PCE_IER_1_CPH2          Classification Phase 2 Ready Interrupt Enable */
++//	{0x1198,	 4,	 1,	0x00}, /* XRX200_PCE_IER_1_CPH1          Classification Phase 1 Ready Interrupt Enable */
++//	{0x1198,	 3,	 1,	0x00}, /* XRX200_PCE_IER_1_CPH0          Classification Phase 0 Ready Interrupt Enable */
++//	{0x1198,	 2,	 1,	0x00}, /* XRX200_PCE_IER_1_PRDY          Parser Ready Interrupt Enable */
++//	{0x1198,	 1,	 1,	0x00}, /* XRX200_PCE_IER_1_IGTF          IGMP Table Full Interrupt Enable */
++//	{0x1198,	 0,	 1,	0x00}, /* XRX200_PCE_IER_1_MTF           MAC Table Full Interrupt Enable */
++//	{0x119C,	 0,	16,	0x00}, /* XRX200_PCE_ISR_0_PINT          Parser and ClassificationEngine Global Interrupt Status Register 0 */
++//	{0x119C,	15,	 1,	0x00}, /* XRX200_PCE_ISR_0_PINT_15       Port Interrupt */
++//	{0x119C,	14,	 1,	0x00}, /* XRX200_PCE_ISR_0_PINT_14       Port Interrupt */
++//	{0x119C,	13,	 1,	0x00}, /* XRX200_PCE_ISR_0_PINT_13       Port Interrupt */
++//	{0x119C,	12,	 1,	0x00}, /* XRX200_PCE_ISR_0_PINT_12       Port Interrupt */
++//	{0x119C,	11,	 1,	0x00}, /* XRX200_PCE_ISR_0_PINT_11       Port Interrupt */
++//	{0x119C,	10,	 1,	0x00}, /* XRX200_PCE_ISR_0_PINT_10       Port Interrupt */
++//	{0x119C,	 9,	 1,	0x00}, /* XRX200_PCE_ISR_0_PINT_9        Port Interrupt */
++//	{0x119C,	 8,	 1,	0x00}, /* XRX200_PCE_ISR_0_PINT_8        Port Interrupt */
++//	{0x119C,	 7,	 1,	0x00}, /* XRX200_PCE_ISR_0_PINT_7        Port Interrupt */
++//	{0x119C,	 6,	 1,	0x00}, /* XRX200_PCE_ISR_0_PINT_6        Port Interrupt */
++//	{0x119C,	 5,	 1,	0x00}, /* XRX200_PCE_ISR_0_PINT_5        Port Interrupt */
++//	{0x119C,	 4,	 1,	0x00}, /* XRX200_PCE_ISR_0_PINT_4        Port Interrupt */
++//	{0x119C,	 3,	 1,	0x00}, /* XRX200_PCE_ISR_0_PINT_3        Port Interrupt */
++//	{0x119C,	 2,	 1,	0x00}, /* XRX200_PCE_ISR_0_PINT_2        Port Interrupt */
++//	{0x119C,	 1,	 1,	0x00}, /* XRX200_PCE_ISR_0_PINT_1        Port Interrupt */
++//	{0x119C,	 0,	 1,	0x00}, /* XRX200_PCE_ISR_0_PINT_0        Port Interrupt */
++//	{0x11A0,	 0,	16,	0x00}, /* XRX200_PCE_ISR_1               Parser and ClassificationEngine Global Interrupt Status Register 1 */
++//	{0x11A0,	 6,	 1,	0x00}, /* XRX200_PCE_ISR_1_FLOWINT       Traffic Flow Table Interrupt Rule matched */
++//	{0x11A0,	 5,	 1,	0x00}, /* XRX200_PCE_ISR_1_CPH2          Classification Phase 2 Ready Interrupt */
++//	{0x11A0,	 4,	 1,	0x00}, /* XRX200_PCE_ISR_1_CPH1          Classification Phase 1 Ready Interrupt */
++//	{0x11A0,	 3,	 1,	0x00}, /* XRX200_PCE_ISR_1_CPH0          Classification Phase 0 Ready Interrupt */
++//	{0x11A0,	 2,	 1,	0x00}, /* XRX200_PCE_ISR_1_PRDY          Parser Ready Interrupt */
++//	{0x11A0,	 1,	 1,	0x00}, /* XRX200_PCE_ISR_1_IGTF          IGMP Table Full Interrupt */
++//	{0x11A0,	 0,	 1,	0x00}, /* XRX200_PCE_ISR_1_MTF           MAC Table Full Interrupt */
++//	{0x11A4,	 0,	16,	0x00}, /* XRX200_PARSER_STAT_FIFO        Parser Status Register */
++//	{0x11A4,	 8,	 8,	0x00}, /* XRX200_PARSER_STAT_FSM_DAT_CNT Parser FSM Data Counter */
++//	{0x11A4,	 5,	 3,	0x00}, /* XRX200_PARSER_STAT_FSM_STATE   Parser FSM State */
++//	{0x11A4,	 4,	 1,	0x00}, /* XRX200_PARSER_STAT_PKT_ERR     Packet error detected */
++//	{0x11A4,	 3,	 1,	0x00}, /* XRX200_PARSER_STAT_FSM_FIN     Parser FSM finished */
++//	{0x11A4,	 2,	 1,	0x00}, /* XRX200_PARSER_STAT_FSM_START   Parser FSM start */
++//	{0x11A4,	 1,	 1,	0x00}, /* XRX200_PARSER_STAT_FIFO_RDY    Parser FIFO ready for read. */
++//	{0x11A4,	 0,	 1,	0x00}, /* XRX200_PARSER_STAT_FIFO_FULL   Parser */
++//	{0x1200,	 0,	16,	0x28}, /* XRX200_PCE_PCTRL_0             PCE Port ControlRegister 0 */
++//	{0x1200,	13,	 1,	0x28}, /* XRX200_PCE_PCTRL_0_MCST        Multicast Forwarding Mode Selection */
++//	{0x1200,	12,	 1,	0x28}, /* XRX200_PCE_PCTRL_0_EGSTEN      Table-based Egress Special Tag Enable */
++//	{0x1200,	11,	 1,	0x28}, /* XRX200_PCE_PCTRL_0_IGSTEN      Ingress Special Tag Enable */
++//	{0x1200,	10,	 1,	0x28}, /* XRX200_PCE_PCTRL_0_PCPEN       PCP Remarking Mode */
++//	{0x1200,	 9,	 1,	0x28}, /* XRX200_PCE_PCTRL_0_CLPEN       Class Remarking Mode */
++//	{0x1200,	 8,	 1,	0x28}, /* XRX200_PCE_PCTRL_0_DPEN        Drop Precedence Remarking Mode */
++//	{0x1200,	 7,	 1,	0x28}, /* XRX200_PCE_PCTRL_0_CMOD        Three-color Marker Color Mode */
++//	{0x1200,	 6,	 1,	0x28}, /* XRX200_PCE_PCTRL_0_VREP        VLAN Replacement Mode */
++	{0x1200,	 5,	 1,	0x28}, /* XRX200_PCE_PCTRL_0_TVM         Transparent VLAN Mode */
++//	{0x1200,	 4,	 1,	0x28}, /* XRX200_PCE_PCTRL_0_PLOCK       Port Locking Enable */
++//	{0x1200,	 3,	 1,	0x28}, /* XRX200_PCE_PCTRL_0_AGEDIS      Aging Disable */
++//	{0x1200,	 0,	 3,	0x28}, /* XRX200_PCE_PCTRL_0_PSTATE      Port State */
++//	{0x1204,	 0,	16,	0x28}, /* XRX200_PCE_PCTRL_1             PCE Port ControlRegister 1 */
++//	{0x1204,	 0,	 8,	0x28}, /* XRX200_PCE_PCTRL_1_LRNLIM      MAC Address Learning Limit */
++//	{0x1208,	 0,	16,	0x28}, /* XRX200_PCE_PCTRL_2             PCE Port ControlRegister 2 */
++//	{0x1208,	 7,	 1,	0x28}, /* XRX200_PCE_PCTRL_2_DSCPMOD     DSCP Mode Selection */
++//	{0x1208,	 5,	 2,	0x28}, /* XRX200_PCE_PCTRL_2_DSCP        Enable DSCP to select the Class of Service */
++//	{0x1208,	 4,	 1,	0x28}, /* XRX200_PCE_PCTRL_2_PCP         Enable VLAN PCP to select the Class of Service */
++//	{0x1208,	 0,	 4,	0x28}, /* XRX200_PCE_PCTRL_2_PCLASS      Port-based Traffic Class */
++//	{0x120C,	 0,	16,	0x28}, /* XRX200_PCE_PCTRL_3_VIO         PCE Port ControlRegister 3 */
++//	{0x120C,	11,	 1,	0x28}, /* XRX200_PCE_PCTRL_3_EDIR        Egress Redirection Mode */
++//	{0x120C,	10,	 1,	0x28}, /* XRX200_PCE_PCTRL_3_RXDMIR      Receive Mirroring Enable for dropped frames */
++//	{0x120C,	 9,	 1,	0x28}, /* XRX200_PCE_PCTRL_3_RXVMIR      Receive Mirroring Enable for valid frames */
++//	{0x120C,	 8,	 1,	0x28}, /* XRX200_PCE_PCTRL_3_TXMIR       Transmit Mirroring Enable */
++//	{0x120C,	 7,	 1,	0x28}, /* XRX200_PCE_PCTRL_3_VIO_7       Violation Type 7 Mirroring Enable */
++//	{0x120C,	 6,	 1,	0x28}, /* XRX200_PCE_PCTRL_3_VIO_6       Violation Type 6 Mirroring Enable */
++//	{0x120C,	 5,	 1,	0x28}, /* XRX200_PCE_PCTRL_3_VIO_5       Violation Type 5 Mirroring Enable */
++//	{0x120C,	 4,	 1,	0x28}, /* XRX200_PCE_PCTRL_3_VIO_4       Violation Type 4 Mirroring Enable */
++//	{0x120C,	 3,	 1,	0x28}, /* XRX200_PCE_PCTRL_3_VIO_3       Violation Type 3 Mirroring Enable */
++//	{0x120C,	 2,	 1,	0x28}, /* XRX200_PCE_PCTRL_3_VIO_2       Violation Type 2 Mirroring Enable */
++//	{0x120C,	 1,	 1,	0x28}, /* XRX200_PCE_PCTRL_3_VIO_1       Violation Type 1 Mirroring Enable */
++//	{0x120C,	 0,	 1,	0x28}, /* XRX200_PCE_PCTRL_3_VIO_0       Violation Type 0 Mirroring Enable */
++//	{0x1210,	 0,	16,	0x28}, /* XRX200_WOL_CTRL                Wake-on-LAN ControlRegister */
++//	{0x1210,	 0,	 1,	0x28}, /* XRX200_WOL_CTRL_PORT           WoL Enable */
++//	{0x1214,	 0,	16,	0x28}, /* XRX200_PCE_VCTRL               PCE VLAN ControlRegister */
++	{0x1214,	 5,	 1,	0x28}, /* XRX200_PCE_VCTRL_VSR           VLAN Security Rule */
++	{0x1214,	 4,	 1,	0x28}, /* XRX200_PCE_VCTRL_VEMR          VLAN Egress Member Violation Rule */
++	{0x1214,	 3,	 1,	0x28}, /* XRX200_PCE_VCTRL_VIMR          VLAN Ingress Member Violation Rule */
++	{0x1214,	 1,	 2,	0x28}, /* XRX200_PCE_VCTRL_VINR          VLAN Ingress Tag Rule */
++	{0x1214,	 0,	 1,	0x28}, /* XRX200_PCE_VCTRL_UVR           Unknown VLAN Rule */
++//	{0x1218,	 0,	16,	0x28}, /* XRX200_PCE_DEFPVID             PCE Default PortVID Register */
++	{0x1218,	 0,	 6,	0x28}, /* XRX200_PCE_DEFPVID_PVID        Default Port VID Index */
++//	{0x121C,	 0,	16,	0x28}, /* XRX200_PCE_PSTAT               PCE Port StatusRegister */
++//	{0x121C,	 0,	16,	0x28}, /* XRX200_PCE_PSTAT_LRNCNT        Learning Count */
++//	{0x1220,	 0,	16,	0x28}, /* XRX200_PCE_PIER                Parser and ClassificationEngine Port Interrupt Enable Register */
++//	{0x1220,	 5,	 1,	0x28}, /* XRX200_PCE_PIER_CLDRP          Classification Drop Interrupt Enable */
++//	{0x1220,	 4,	 1,	0x28}, /* XRX200_PCE_PIER_PTDRP          Port Drop Interrupt Enable */
++//	{0x1220,	 3,	 1,	0x28}, /* XRX200_PCE_PIER_VLAN           VLAN Violation Interrupt Enable */
++//	{0x1220,	 2,	 1,	0x28}, /* XRX200_PCE_PIER_WOL            Wake-on-LAN Interrupt Enable */
++//	{0x1220,	 1,	 1,	0x28}, /* XRX200_PCE_PIER_LOCK           Port Limit Alert Interrupt Enable */
++//	{0x1220,	 0,	 1,	0x28}, /* XRX200_PCE_PIER_LIM            Port Lock Alert Interrupt Enable */
++//	{0x1224,	 0,	16,	0x28}, /* XRX200_PCE_PISR                Parser and ClassificationEngine Port Interrupt Status Register */
++//	{0x1224,	 5,	 1,	0x28}, /* XRX200_PCE_PISR_CLDRP          Classification Drop Interrupt */
++//	{0x1224,	 4,	 1,	0x28}, /* XRX200_PCE_PISR_PTDRP          Port Drop Interrupt */
++//	{0x1224,	 3,	 1,	0x28}, /* XRX200_PCE_PISR_VLAN           VLAN Violation Interrupt */
++//	{0x1224,	 2,	 1,	0x28}, /* XRX200_PCE_PISR_WOL            Wake-on-LAN Interrupt */
++//	{0x1224,	 1,	 1,	0x28}, /* XRX200_PCE_PISR_LOCK           Port Lock Alert Interrupt */
++//	{0x1224,	 0,	 1,	0x28}, /* XRX200_PCE_PISR_LIMIT          Port Limitation Alert Interrupt */
++//	{0x1600,	 0,	16,	0x1c}, /* XRX200_PCE_TCM_CTRL            Three-colorMarker Control Register */
++//	{0x1600,	 0,	 1,	0x1c}, /* XRX200_PCE_TCM_CTRL_TCMEN      Three-color Marker metering instance enable */
++//	{0x1604,	 0,	16,	0x1c}, /* XRX200_PCE_TCM_STAT            Three-colorMarker Status Register */
++//	{0x1604,	 1,	 1,	0x1c}, /* XRX200_PCE_TCM_STAT_AL1        Three-color Marker Alert 1 Status */
++//	{0x1604,	 0,	 1,	0x1c}, /* XRX200_PCE_TCM_STAT_AL0        Three-color Marker Alert 0 Status */
++//	{0x1608,	 0,	16,	0x1c}, /* XRX200_PCE_TCM_CBS             Three-color MarkerCommitted Burst Size Register */
++//	{0x1608,	 0,	10,	0x1c}, /* XRX200_PCE_TCM_CBS_CBS         Committed Burst Size */
++//	{0x160C,	 0,	16,	0x1c}, /* XRX200_PCE_TCM_EBS             Three-color MarkerExcess Burst Size Register */
++//	{0x160C,	 0,	10,	0x1c}, /* XRX200_PCE_TCM_EBS_EBS         Excess Burst Size */
++//	{0x1610,	 0,	16,	0x1c}, /* XRX200_PCE_TCM_IBS             Three-color MarkerInstantaneous Burst Size Register */
++//	{0x1610,	 0,	 2,	0x1c}, /* XRX200_PCE_TCM_IBS_IBS         Instantaneous Burst Size */
++//	{0x1614,	 0,	16,	0x1c}, /* XRX200_PCE_TCM_CIR_MANT        Three-colorMarker Constant Information Rate Mantissa Register */
++//	{0x1614,	 0,	10,	0x1c}, /* XRX200_PCE_TCM_CIR_MANT_MANT   Rate Counter Mantissa */
++//	{0x1618,	 0,	16,	0x1c}, /* XRX200_PCE_TCM_CIR_EXP         Three-colorMarker Constant Information Rate Exponent Register */
++//	{0x1618,	 0,	 4,	0x1c}, /* XRX200_PCE_TCM_CIR_EXP_EXP     Rate Counter Exponent */
++//	{0x2300,	 0,	16,	0x00}, /* XRX200_MAC_TEST                MAC Test Register */
++//	{0x2300,	 0,	16,	0x00}, /* XRX200_MAC_TEST_JTP            Jitter Test Pattern */
++//	{0x2304,	 0,	16,	0x00}, /* XRX200_MAC_PFAD_CFG            MAC Pause FrameSource Address Configuration Register */
++//	{0x2304,	 0,	 1,	0x00}, /* XRX200_MAC_PFAD_CFG_SAMOD      Source Address Mode */
++//	{0x2308,	 0,	16,	0x00}, /* XRX200_MAC_PFSA_0              Pause Frame SourceAddress Part 0  */
++//	{0x2308,	 0,	16,	0x00}, /* XRX200_MAC_PFSA_0_PFAD         Pause Frame Source Address Part 0 */
++//	{0x230C,	 0,	16,	0x00}, /* XRX200_MAC_PFSA_1              Pause Frame SourceAddress Part 1  */
++//	{0x230C,	 0,	16,	0x00}, /* XRX200_MAC_PFSA_1_PFAD         Pause Frame Source Address Part 1 */
++//	{0x2310,	 0,	16,	0x00}, /* XRX200_MAC_PFSA_2              Pause Frame SourceAddress Part 2  */
++//	{0x2310,	 0,	16,	0x00}, /* XRX200_MAC_PFSA_2_PFAD         Pause Frame Source Address Part 2 */
++//	{0x2314,	 0,	16,	0x00}, /* XRX200_MAC_FLEN                MAC Frame Length Register */
++//	{0x2314,	 0,	14,	0x00}, /* XRX200_MAC_FLEN_LEN            Maximum Frame Length */
++//	{0x2318,	 0,	16,	0x00}, /* XRX200_MAC_VLAN_ETYPE_0        MAC VLAN EthertypeRegister 0 */
++//	{0x2318,	 0,	16,	0x00}, /* XRX200_MAC_VLAN_ETYPE_0_OUTER  Ethertype */
++//	{0x231C,	 0,	16,	0x00}, /* XRX200_MAC_VLAN_ETYPE_1        MAC VLAN EthertypeRegister 1 */
++//	{0x231C,	 0,	16,	0x00}, /* XRX200_MAC_VLAN_ETYPE_1_INNER  Ethertype */
++//	{0x2320,	 0,	16,	0x00}, /* XRX200_MAC_IER                 MAC Interrupt EnableRegister */
++//	{0x2320,	 0,	 8,	0x00}, /* XRX200_MAC_IER_MACIEN          MAC Interrupt Enable */
++//	{0x2324,	 0,	16,	0x00}, /* XRX200_MAC_ISR                 MAC Interrupt StatusRegister */
++//	{0x2324,	 0,	 8,	0x00}, /* XRX200_MAC_ISR_MACINT          MAC Interrupt */
++//	{0x2400,	 0,	16,	0x30}, /* XRX200_MAC_PSTAT               MAC Port Status Register */
++//	{0x2400,	11,	 1,	0x30}, /* XRX200_MAC_PSTAT_PACT          PHY Active Status */
++	{0x2400,	10,	 1,	0x30}, /* XRX200_MAC_PSTAT_GBIT          Gigabit Speed Status */
++	{0x2400,	 9,	 1,	0x30}, /* XRX200_MAC_PSTAT_MBIT          Megabit Speed Status */
++	{0x2400,	 8,	 1,	0x30}, /* XRX200_MAC_PSTAT_FDUP          Full Duplex Status */
++//	{0x2400,	 7,	 1,	0x30}, /* XRX200_MAC_PSTAT_RXPAU         Receive Pause Status */
++//	{0x2400,	 6,	 1,	0x30}, /* XRX200_MAC_PSTAT_TXPAU         Transmit Pause Status */
++//	{0x2400,	 5,	 1,	0x30}, /* XRX200_MAC_PSTAT_RXPAUEN       Receive Pause Enable Status */
++//	{0x2400,	 4,	 1,	0x30}, /* XRX200_MAC_PSTAT_TXPAUEN       Transmit Pause Enable Status */
++	{0x2400,	 3,	 1,	0x30}, /* XRX200_MAC_PSTAT_LSTAT         Link Status */
++//	{0x2400,	 2,	 1,	0x30}, /* XRX200_MAC_PSTAT_CRS           Carrier Sense Status */
++//	{0x2400,	 1,	 1,	0x30}, /* XRX200_MAC_PSTAT_TXLPI         Transmit Low-power Idle Status */
++//	{0x2400,	 0,	 1,	0x30}, /* XRX200_MAC_PSTAT_RXLPI         Receive Low-power Idle Status */
++//	{0x2404,	 0,	16,	0x30}, /* XRX200_MAC_PISR                MAC Interrupt Status Register */
++//	{0x2404,	13,	 1,	0x30}, /* XRX200_MAC_PISR_PACT           PHY Active Status */
++//	{0x2404,	12,	 1,	0x30}, /* XRX200_MAC_PISR_SPEED          Megabit Speed Status */
++//	{0x2404,	11,	 1,	0x30}, /* XRX200_MAC_PISR_FDUP           Full Duplex Status */
++//	{0x2404,	10,	 1,	0x30}, /* XRX200_MAC_PISR_RXPAUEN        Receive Pause Enable Status */
++//	{0x2404,	 9,	 1,	0x30}, /* XRX200_MAC_PISR_TXPAUEN        Transmit Pause Enable Status */
++//	{0x2404,	 8,	 1,	0x30}, /* XRX200_MAC_PISR_LPIOFF         Receive Low-power Idle Mode is left */
++//	{0x2404,	 7,	 1,	0x30}, /* XRX200_MAC_PISR_LPION          Receive Low-power Idle Mode is entered */
++//	{0x2404,	 6,	 1,	0x30}, /* XRX200_MAC_PISR_JAM            Jam Status Detected */
++//	{0x2404,	 5,	 1,	0x30}, /* XRX200_MAC_PISR_TOOSHORT       Too Short Frame Error Detected */
++//	{0x2404,	 4,	 1,	0x30}, /* XRX200_MAC_PISR_TOOLONG        Too Long Frame Error Detected */
++//	{0x2404,	 3,	 1,	0x30}, /* XRX200_MAC_PISR_LENERR         Length Mismatch Error Detected */
++//	{0x2404,	 2,	 1,	0x30}, /* XRX200_MAC_PISR_FCSERR         Frame Checksum Error Detected */
++//	{0x2404,	 1,	 1,	0x30}, /* XRX200_MAC_PISR_TXPAUSE        Pause Frame Transmitted */
++//	{0x2404,	 0,	 1,	0x30}, /* XRX200_MAC_PISR_RXPAUSE        Pause Frame Received */
++//	{0x2408,	 0,	16,	0x30}, /* XRX200_MAC_PIER                MAC Interrupt Enable Register */
++//	{0x2408,	13,	 1,	0x30}, /* XRX200_MAC_PIER_PACT           PHY Active Status */
++//	{0x2408,	12,	 1,	0x30}, /* XRX200_MAC_PIER_SPEED          Megabit Speed Status */
++//	{0x2408,	11,	 1,	0x30}, /* XRX200_MAC_PIER_FDUP           Full Duplex Status */
++//	{0x2408,	10,	 1,	0x30}, /* XRX200_MAC_PIER_RXPAUEN        Receive Pause Enable Status */
++//	{0x2408,	 9,	 1,	0x30}, /* XRX200_MAC_PIER_TXPAUEN        Transmit Pause Enable Status */
++//	{0x2408,	 8,	 1,	0x30}, /* XRX200_MAC_PIER_LPIOFF         Low-power Idle Off Interrupt Mask */
++//	{0x2408,	 7,	 1,	0x30}, /* XRX200_MAC_PIER_LPION          Low-power Idle On Interrupt Mask */
++//	{0x2408,	 6,	 1,	0x30}, /* XRX200_MAC_PIER_JAM            Jam Status Interrupt Mask */
++//	{0x2408,	 5,	 1,	0x30}, /* XRX200_MAC_PIER_TOOSHORT       Too Short Frame Error Interrupt Mask */
++//	{0x2408,	 4,	 1,	0x30}, /* XRX200_MAC_PIER_TOOLONG        Too Long Frame Error Interrupt Mask */
++//	{0x2408,	 3,	 1,	0x30}, /* XRX200_MAC_PIER_LENERR         Length Mismatch Error Interrupt Mask */
++//	{0x2408,	 2,	 1,	0x30}, /* XRX200_MAC_PIER_FCSERR         Frame Checksum Error Interrupt Mask */
++//	{0x2408,	 1,	 1,	0x30}, /* XRX200_MAC_PIER_TXPAUSE        Transmit Pause Frame Interrupt Mask */
++//	{0x2408,	 0,	 1,	0x30}, /* XRX200_MAC_PIER_RXPAUSE        Receive Pause Frame Interrupt Mask */
++//	{0x240C,	 0,	16,	0x30}, /* XRX200_MAC_CTRL_0              MAC Control Register0 */
++//	{0x240C,	13,	 2,	0x30}, /* XRX200_MAC_CTRL_0_LCOL         Late Collision Control */
++//	{0x240C,	12,	 1,	0x30}, /* XRX200_MAC_CTRL_0_BM           Burst Mode Control */
++//	{0x240C,	11,	 1,	0x30}, /* XRX200_MAC_CTRL_0_APADEN       Automatic VLAN Padding Enable */
++//	{0x240C,	10,	 1,	0x30}, /* XRX200_MAC_CTRL_0_VPAD2EN      Stacked VLAN Padding Enable */
++//	{0x240C,	 9,	 1,	0x30}, /* XRX200_MAC_CTRL_0_VPADEN       VLAN Padding Enable */
++//	{0x240C,	 8,	 1,	0x30}, /* XRX200_MAC_CTRL_0_PADEN        Padding Enable */
++//	{0x240C,	 7,	 1,	0x30}, /* XRX200_MAC_CTRL_0_FCS          Transmit FCS Control */
++	{0x240C,	 4,	 3,	0x30}, /* XRX200_MAC_CTRL_0_FCON         Flow Control Mode */
++//	{0x240C,	 2,	 2,	0x30}, /* XRX200_MAC_CTRL_0_FDUP         Full Duplex Control */
++//	{0x240C,	 0,	 2,	0x30}, /* XRX200_MAC_CTRL_0_GMII         GMII/MII interface mode selection */
++//	{0x2410,	 0,	16,	0x30}, /* XRX200_MAC_CTRL_1              MAC Control Register1 */
++//	{0x2410,	 8,	 1,	0x30}, /* XRX200_MAC_CTRL_1_SHORTPRE     Short Preamble Control */
++//	{0x2410,	 0,	 4,	0x30}, /* XRX200_MAC_CTRL_1_IPG          Minimum Inter Packet Gap Size */
++//	{0x2414,	 0,	16,	0x30}, /* XRX200_MAC_CTRL_2              MAC Control Register2 */
++//	{0x2414,	 3,	 1,	0x30}, /* XRX200_MAC_CTRL_2_MLEN         Maximum Untagged Frame Length */
++//	{0x2414,	 2,	 1,	0x30}, /* XRX200_MAC_CTRL_2_LCHKL        Frame Length Check Long Enable */
++//	{0x2414,	 0,	 2,	0x30}, /* XRX200_MAC_CTRL_2_LCHKS        Frame Length Check Short Enable */
++//	{0x2418,	 0,	16,	0x30}, /* XRX200_MAC_CTRL_3              MAC Control Register3 */
++//	{0x2418,	 0,	 4,	0x30}, /* XRX200_MAC_CTRL_3_RCNT         Retry Count */
++//	{0x241C,	 0,	16,	0x30}, /* XRX200_MAC_CTRL_4              MAC Control Register4 */
++//	{0x241C,	 7,	 1,	0x30}, /* XRX200_MAC_CTRL_4_LPIEN        LPI Mode Enable */
++//	{0x241C,	 0,	 7,	0x30}, /* XRX200_MAC_CTRL_4_WAIT         LPI Wait Time */
++//	{0x2420,	 0,	16,	0x30}, /* XRX200_MAC_CTRL_5_PJPS         MAC Control Register5 */
++//	{0x2420,	 1,	 1,	0x30}, /* XRX200_MAC_CTRL_5_PJPS_NOBP    Prolonged Jam pattern size during no-backpressure state */
++//	{0x2420,	 0,	 1,	0x30}, /* XRX200_MAC_CTRL_5_PJPS_BP      Prolonged Jam pattern size during backpressure state */
++//	{0x2424,	 0,	16,	0x30}, /* XRX200_MAC_CTRL_6_XBUF         Transmit and ReceiveBuffer Control Register */
++//	{0x2424,	 9,	 3,	0x30}, /* XRX200_MAC_CTRL_6_RBUF_DLY_WP  Delay */
++//	{0x2424,	 8,	 1,	0x30}, /* XRX200_MAC_CTRL_6_RBUF_INIT    Receive Buffer Initialization */
++//	{0x2424,	 6,	 1,	0x30}, /* XRX200_MAC_CTRL_6_RBUF_BYPASS  Bypass the Receive Buffer */
++//	{0x2424,	 3,	 3,	0x30}, /* XRX200_MAC_CTRL_6_XBUF_DLY_WP  Delay */
++//	{0x2424,	 2,	 1,	0x30}, /* XRX200_MAC_CTRL_6_XBUF_INIT    Initialize the Transmit Buffer */
++//	{0x2424,	 0,	 1,	0x30}, /* XRX200_MAC_CTRL_6_XBUF_BYPASS  Bypass the Transmit Buffer */
++//	{0x2428,	 0,	16,	0x30}, /* XRX200_MAC_BUFST_XBUF          MAC Receive and TransmitBuffer Status Register */
++//	{0x2428,	 3,	 1,	0x30}, /* XRX200_MAC_BUFST_RBUF_UFL      Receive Buffer Underflow Indicator */
++//	{0x2428,	 2,	 1,	0x30}, /* XRX200_MAC_BUFST_RBUF_OFL      Receive Buffer Overflow Indicator */
++//	{0x2428,	 1,	 1,	0x30}, /* XRX200_MAC_BUFST_XBUF_UFL      Transmit Buffer Underflow Indicator */
++//	{0x2428,	 0,	 1,	0x30}, /* XRX200_MAC_BUFST_XBUF_OFL      Transmit Buffer Overflow Indicator */
++//	{0x242C,	 0,	16,	0x30}, /* XRX200_MAC_TESTEN              MAC Test Enable Register */
++//	{0x242C,	 2,	 1,	0x30}, /* XRX200_MAC_TESTEN_JTEN         Jitter Test Enable */
++//	{0x242C,	 1,	 1,	0x30}, /* XRX200_MAC_TESTEN_TXER         Transmit Error Insertion */
++//	{0x242C,	 0,	 1,	0x30}, /* XRX200_MAC_TESTEN_LOOP         MAC Loopback Enable */
++//	{0x2900,	 0,	16,	0x00}, /* XRX200_FDMA_CTRL               Ethernet Switch FetchDMA Control Register */
++//	{0x2900,	 7,	 5,	0x00}, /* XRX200_FDMA_CTRL_LPI_THRESHOLD Low Power Idle Threshold */
++//	{0x2900,	 4,	 3,	0x00}, /* XRX200_FDMA_CTRL_LPI_MODE      Low Power Idle Mode */
++//	{0x2900,	 2,	 2,	0x00}, /* XRX200_FDMA_CTRL_EGSTAG        Egress Special Tag Size */
++//	{0x2900,	 1,	 1,	0x00}, /* XRX200_FDMA_CTRL_IGSTAG        Ingress Special Tag Size */
++//	{0x2900,	 0,	 1,	0x00}, /* XRX200_FDMA_CTRL_EXCOL         Excessive Collision Handling */
++//	{0x2904,	 0,	16,	0x00}, /* XRX200_FDMA_STETYPE            Special Tag EthertypeControl Register */
++//	{0x2904,	 0,	16,	0x00}, /* XRX200_FDMA_STETYPE_ETYPE      Special Tag Ethertype */
++//	{0x2908,	 0,	16,	0x00}, /* XRX200_FDMA_VTETYPE            VLAN Tag EthertypeControl Register */
++//	{0x2908,	 0,	16,	0x00}, /* XRX200_FDMA_VTETYPE_ETYPE      VLAN Tag Ethertype */
++//	{0x290C,	 0,	16,	0x00}, /* XRX200_FDMA_STAT_0             FDMA Status Register0 */
++//	{0x290C,	 0,	16,	0x00}, /* XRX200_FDMA_STAT_0_FSMS        FSM states status */
++//	{0x2910,	 0,	16,	0x00}, /* XRX200_FDMA_IER                Fetch DMA Global InterruptEnable Register */
++//	{0x2910,	14,	 1,	0x00}, /* XRX200_FDMA_IER_PCKD           Packet Drop Interrupt Enable */
++//	{0x2910,	13,	 1,	0x00}, /* XRX200_FDMA_IER_PCKR           Packet Ready Interrupt Enable */
++//	{0x2910,	 0,	 8,	0x00}, /* XRX200_FDMA_IER_PCKT           Packet Sent Interrupt Enable */
++//	{0x2914,	 0,	16,	0x00}, /* XRX200_FDMA_ISR                Fetch DMA Global InterruptStatus Register */
++//	{0x2914,	14,	 1,	0x00}, /* XRX200_FDMA_ISR_PCKTD          Packet Drop */
++//	{0x2914,	13,	 1,	0x00}, /* XRX200_FDMA_ISR_PCKR           Packet is Ready for Transmission */
++//	{0x2914,	 0,	 8,	0x00}, /* XRX200_FDMA_ISR_PCKT           Packet Sent Event */
++//	{0x2A00,	 0,	16,	0x18}, /* XRX200_FDMA_PCTRL              Ethernet SwitchFetch DMA Port Control Register */
++//	{0x2A00,	 3,	 2,	0x18}, /* XRX200_FDMA_PCTRL_VLANMOD      VLAN Modification Enable */
++//	{0x2A00,	 2,	 1,	0x18}, /* XRX200_FDMA_PCTRL_DSCPRM       DSCP Re-marking Enable */
++//	{0x2A00,	 1,	 1,	0x18}, /* XRX200_FDMA_PCTRL_STEN         Special Tag Insertion Enable */
++//	{0x2A00,	 0,	 1,	0x18}, /* XRX200_FDMA_PCTRL_EN           FDMA Port Enable */
++//	{0x2A04,	 0,	16,	0x18}, /* XRX200_FDMA_PRIO               Ethernet SwitchFetch DMA Port Priority Register */
++//	{0x2A04,	 0,	 2,	0x18}, /* XRX200_FDMA_PRIO_PRIO          FDMA PRIO */
++//	{0x2A08,	 0,	16,	0x18}, /* XRX200_FDMA_PSTAT0             Ethernet SwitchFetch DMA Port Status Register 0 */
++//	{0x2A08,	15,	 1,	0x18}, /* XRX200_FDMA_PSTAT0_PKT_AVAIL   Port Egress Packet Available */
++//	{0x2A08,	14,	 1,	0x18}, /* XRX200_FDMA_PSTAT0_POK         Port Status OK */
++//	{0x2A08,	 0,	 6,	0x18}, /* XRX200_FDMA_PSTAT0_PSEG        Port Egress Segment Count */
++//	{0x2A0C,	 0,	16,	0x18}, /* XRX200_FDMA_PSTAT1_HDR         Ethernet SwitchFetch DMA Port Status Register 1 */
++//	{0x2A0C,	 0,	10,	0x18}, /* XRX200_FDMA_PSTAT1_HDR_PTR     Header Pointer */
++//	{0x2A10,	 0,	16,	0x18}, /* XRX200_FDMA_TSTAMP0            Egress TimeStamp Register 0 */
++//	{0x2A10,	 0,	16,	0x18}, /* XRX200_FDMA_TSTAMP0_TSTL       Time Stamp [15:0] */
++//	{0x2A14,	 0,	16,	0x18}, /* XRX200_FDMA_TSTAMP1            Egress TimeStamp Register 1 */
++//	{0x2A14,	 0,	16,	0x18}, /* XRX200_FDMA_TSTAMP1_TSTH       Time Stamp [31:16] */
++//	{0x2D00,	 0,	16,	0x00}, /* XRX200_SDMA_CTRL               Ethernet Switch StoreDMA Control Register */
++//	{0x2D00,	 0,	 1,	0x00}, /* XRX200_SDMA_CTRL_TSTEN         Time Stamp Enable */
++//	{0x2D04,	 0,	16,	0x00}, /* XRX200_SDMA_FCTHR1             SDMA Flow Control Threshold1 Register */
++//	{0x2D04,	 0,	10,	0x00}, /* XRX200_SDMA_FCTHR1_THR1        Threshold 1 */
++//	{0x2D08,	 0,	16,	0x00}, /* XRX200_SDMA_FCTHR2             SDMA Flow Control Threshold2 Register */
++//	{0x2D08,	 0,	10,	0x00}, /* XRX200_SDMA_FCTHR2_THR2        Threshold 2 */
++//	{0x2D0C,	 0,	16,	0x00}, /* XRX200_SDMA_FCTHR3             SDMA Flow Control Threshold3 Register */
++//	{0x2D0C,	 0,	10,	0x00}, /* XRX200_SDMA_FCTHR3_THR3        Threshold 3 */
++//	{0x2D10,	 0,	16,	0x00}, /* XRX200_SDMA_FCTHR4             SDMA Flow Control Threshold4 Register */
++//	{0x2D10,	 0,	10,	0x00}, /* XRX200_SDMA_FCTHR4_THR4        Threshold 4 */
++//	{0x2D14,	 0,	16,	0x00}, /* XRX200_SDMA_FCTHR5             SDMA Flow Control Threshold5 Register */
++//	{0x2D14,	 0,	10,	0x00}, /* XRX200_SDMA_FCTHR5_THR5        Threshold 5 */
++//	{0x2D18,	 0,	16,	0x00}, /* XRX200_SDMA_FCTHR6             SDMA Flow Control Threshold6 Register */
++//	{0x2D18,	 0,	10,	0x00}, /* XRX200_SDMA_FCTHR6_THR6        Threshold 6 */
++//	{0x2D1C,	 0,	16,	0x00}, /* XRX200_SDMA_FCTHR7             SDMA Flow Control Threshold7 Register */
++//	{0x2D1C,	 0,	11,	0x00}, /* XRX200_SDMA_FCTHR7_THR7        Threshold 7 */
++//	{0x2D20,	 0,	16,	0x00}, /* XRX200_SDMA_STAT_0             SDMA Status Register0 */
++//	{0x2D20,	 4,	 3,	0x00}, /* XRX200_SDMA_STAT_0_BPS_FILL    Back Pressure Status */
++//	{0x2D20,	 2,	 2,	0x00}, /* XRX200_SDMA_STAT_0_BPS_PNT     Back Pressure Status */
++//	{0x2D20,	 0,	 2,	0x00}, /* XRX200_SDMA_STAT_0_DROP        Back Pressure Status */
++//	{0x2D24,	 0,	16,	0x00}, /* XRX200_SDMA_STAT_1             SDMA Status Register1 */
++//	{0x2D24,	 0,	10,	0x00}, /* XRX200_SDMA_STAT_1_FILL        Buffer Filling Level */
++//	{0x2D28,	 0,	16,	0x00}, /* XRX200_SDMA_STAT_2             SDMA Status Register2 */
++//	{0x2D28,	 0,	16,	0x00}, /* XRX200_SDMA_STAT_2_FSMS        FSM states status */
++//	{0x2D2C,	 0,	16,	0x00}, /* XRX200_SDMA_IER                SDMA Interrupt Enable Register */
++//	{0x2D2C,	15,	 1,	0x00}, /* XRX200_SDMA_IER_BPEX           Buffer Pointers Exceeded */
++//	{0x2D2C,	14,	 1,	0x00}, /* XRX200_SDMA_IER_BFULL          Buffer Full */
++//	{0x2D2C,	13,	 1,	0x00}, /* XRX200_SDMA_IER_FERR           Frame Error */
++//	{0x2D2C,	 0,	 8,	0x00}, /* XRX200_SDMA_IER_FRX            Frame Received Successfully */
++//	{0x2D30,	 0,	16,	0x00}, /* XRX200_SDMA_ISR                SDMA Interrupt Status Register */
++//	{0x2D30,	15,	 1,	0x00}, /* XRX200_SDMA_ISR_BPEX           Packet Descriptors Exceeded */
++//	{0x2D30,	14,	 1,	0x00}, /* XRX200_SDMA_ISR_BFULL          Buffer Full */
++//	{0x2D30,	13,	 1,	0x00}, /* XRX200_SDMA_ISR_FERR           Frame Error */
++//	{0x2D30,	 0,	 8,	0x00}, /* XRX200_SDMA_ISR_FRX            Frame Received Successfully */
++//	{0x2F00,	 0,	16,	0x18}, /* XRX200_SDMA_PCTRL              Ethernet SwitchStore DMA Port Control Register */
++//	{0x2F00,	13,	 2,	0x18}, /* XRX200_SDMA_PCTRL_DTHR         Drop Threshold Selection */
++//	{0x2F00,	11,	 2,	0x18}, /* XRX200_SDMA_PCTRL_PTHR         Pause Threshold Selection */
++//	{0x2F00,	10,	 1,	0x18}, /* XRX200_SDMA_PCTRL_PHYEFWD      Forward PHY Error Frames */
++//	{0x2F00,	 9,	 1,	0x18}, /* XRX200_SDMA_PCTRL_ALGFWD       Forward Alignment Error Frames */
++//	{0x2F00,	 8,	 1,	0x18}, /* XRX200_SDMA_PCTRL_LENFWD       Forward Length Errored Frames */
++//	{0x2F00,	 7,	 1,	0x18}, /* XRX200_SDMA_PCTRL_OSFWD        Forward Oversized Frames */
++//	{0x2F00,	 6,	 1,	0x18}, /* XRX200_SDMA_PCTRL_USFWD        Forward Undersized Frames */
++//	{0x2F00,	 5,	 1,	0x18}, /* XRX200_SDMA_PCTRL_FCSIGN       Ignore FCS Errors */
++//	{0x2F00,	 4,	 1,	0x18}, /* XRX200_SDMA_PCTRL_FCSFWD       Forward FCS Errored Frames */
++//	{0x2F00,	 3,	 1,	0x18}, /* XRX200_SDMA_PCTRL_PAUFWD       Pause Frame Forwarding */
++//	{0x2F00,	 2,	 1,	0x18}, /* XRX200_SDMA_PCTRL_MFCEN        Metering Flow Control Enable */
++//	{0x2F00,	 1,	 1,	0x18}, /* XRX200_SDMA_PCTRL_FCEN         Flow Control Enable */
++//	{0x2F00,	 0,	 1,	0x18}, /* XRX200_SDMA_PCTRL_PEN          Port Enable */
++//	{0x2F04,	 0,	16,	0x18}, /* XRX200_SDMA_PRIO               Ethernet SwitchStore DMA Port Priority Register */
++//	{0x2F04,	 0,	 2,	0x18}, /* XRX200_SDMA_PRIO_PRIO          SDMA PRIO */
++//	{0x2F08,	 0,	16,	0x18}, /* XRX200_SDMA_PSTAT0_HDR         Ethernet SwitchStore DMA Port Status Register 0 */
++//	{0x2F08,	 0,	10,	0x18}, /* XRX200_SDMA_PSTAT0_HDR_PTR     Port Ingress Queue Header Pointer */
++//	{0x2F0C,	 0,	16,	0x18}, /* XRX200_SDMA_PSTAT1             Ethernet SwitchStore DMA Port Status Register 1 */
++//	{0x2F0C,	 0,	10,	0x18}, /* XRX200_SDMA_PSTAT1_PPKT        Port Ingress Packet Count */
++//	{0x2F10,	 0,	16,	0x18}, /* XRX200_SDMA_TSTAMP0            Ingress TimeStamp Register 0 */
++//	{0x2F10,	 0,	16,	0x18}, /* XRX200_SDMA_TSTAMP0_TSTL       Time Stamp [15:0] */
++//	{0x2F14,	 0,	16,	0x18}, /* XRX200_SDMA_TSTAMP1            Ingress TimeStamp Register 1 */
++//	{0x2F14,	 0,	16,	0x18}, /* XRX200_SDMA_TSTAMP1_TSTH       Time Stamp [31:16] */
++};
++
++
diff --git a/target/linux/lantiq/patches-4.4/0026-NET-multi-phy-support.patch b/target/linux/lantiq/patches-4.4/0026-NET-multi-phy-support.patch
new file mode 100644
index 0000000..c19ddd3
--- /dev/null
+++ b/target/linux/lantiq/patches-4.4/0026-NET-multi-phy-support.patch
@@ -0,0 +1,53 @@
+From c6feeeb407a3b8a6597ae377ba4dd138e185e3dd Mon Sep 17 00:00:00 2001
+From: John Crispin <blogic at openwrt.org>
+Date: Sun, 27 Jul 2014 09:38:50 +0100
+Subject: [PATCH 26/36] NET: multi phy support
+
+Signed-off-by: John Crispin <blogic at openwrt.org>
+---
+ drivers/net/phy/phy.c |    9 ++++++---
+ include/linux/phy.h   |    1 +
+ 2 files changed, 7 insertions(+), 3 deletions(-)
+
+--- a/drivers/net/phy/phy.c
++++ b/drivers/net/phy/phy.c
+@@ -888,7 +888,8 @@ void phy_state_machine(struct work_struc
+ 		/* If the link is down, give up on negotiation for now */
+ 		if (!phydev->link) {
+ 			phydev->state = PHY_NOLINK;
+-			netif_carrier_off(phydev->attached_dev);
++			if (!phydev->no_auto_carrier_off)
++				netif_carrier_off(phydev->attached_dev);
+ 			phydev->adjust_link(phydev->attached_dev);
+ 			break;
+ 		}
+@@ -971,7 +972,8 @@ void phy_state_machine(struct work_struc
+ 			netif_carrier_on(phydev->attached_dev);
+ 		} else {
+ 			phydev->state = PHY_NOLINK;
+-			netif_carrier_off(phydev->attached_dev);
++			if (!phydev->no_auto_carrier_off)
++				netif_carrier_off(phydev->attached_dev);
+ 		}
+ 
+ 		phydev->adjust_link(phydev->attached_dev);
+@@ -983,7 +985,8 @@ void phy_state_machine(struct work_struc
+ 	case PHY_HALTED:
+ 		if (phydev->link) {
+ 			phydev->link = 0;
+-			netif_carrier_off(phydev->attached_dev);
++			if (!phydev->no_auto_carrier_off)
++				netif_carrier_off(phydev->attached_dev);
+ 			phydev->adjust_link(phydev->attached_dev);
+ 			do_suspend = true;
+ 		}
+--- a/include/linux/phy.h
++++ b/include/linux/phy.h
+@@ -377,6 +377,7 @@ struct phy_device {
+ 	bool is_pseudo_fixed_link;
+ 	bool has_fixups;
+ 	bool suspended;
++	bool no_auto_carrier_off;
+ 
+ 	enum phy_state state;
+ 
diff --git a/target/linux/lantiq/patches-4.4/0028-NET-lantiq-various-etop-fixes.patch b/target/linux/lantiq/patches-4.4/0028-NET-lantiq-various-etop-fixes.patch
new file mode 100644
index 0000000..3301264
--- /dev/null
+++ b/target/linux/lantiq/patches-4.4/0028-NET-lantiq-various-etop-fixes.patch
@@ -0,0 +1,907 @@
+From 870ed9cae083ff8a60a739ef7e74c5a1800533be Mon Sep 17 00:00:00 2001
+From: John Crispin <blogic at openwrt.org>
+Date: Tue, 9 Sep 2014 22:45:34 +0200
+Subject: [PATCH 28/36] NET: lantiq: various etop fixes
+
+Signed-off-by: John Crispin <blogic at openwrt.org>
+---
+ drivers/net/ethernet/lantiq_etop.c |  555 +++++++++++++++++++++++++-----------
+ 1 file changed, 389 insertions(+), 166 deletions(-)
+
+--- a/drivers/net/ethernet/lantiq_etop.c
++++ b/drivers/net/ethernet/lantiq_etop.c
+@@ -11,7 +11,7 @@
+  *   You should have received a copy of the GNU General Public License
+  *   along with this program; if not, see <http://www.gnu.org/licenses/>.
+  *
+- *   Copyright (C) 2011 John Crispin <blogic at openwrt.org>
++ *   Copyright (C) 2011-12 John Crispin <blogic at openwrt.org>
+  */
+ 
+ #include <linux/kernel.h>
+@@ -30,11 +30,16 @@
+ #include <linux/mm.h>
+ #include <linux/platform_device.h>
+ #include <linux/ethtool.h>
++#include <linux/if_vlan.h>
+ #include <linux/init.h>
+ #include <linux/delay.h>
+ #include <linux/io.h>
+ #include <linux/dma-mapping.h>
+ #include <linux/module.h>
++#include <linux/clk.h>
++#include <linux/of_net.h>
++#include <linux/of_irq.h>
++#include <linux/of_platform.h>
+ 
+ #include <asm/checksum.h>
+ 
+@@ -42,7 +47,7 @@
+ #include <xway_dma.h>
+ #include <lantiq_platform.h>
+ 
+-#define LTQ_ETOP_MDIO		0x11804
++#define LTQ_ETOP_MDIO_ACC	0x11804
+ #define MDIO_REQUEST		0x80000000
+ #define MDIO_READ		0x40000000
+ #define MDIO_ADDR_MASK		0x1f
+@@ -51,44 +56,91 @@
+ #define MDIO_REG_OFFSET		0x10
+ #define MDIO_VAL_MASK		0xffff
+ 
+-#define PPE32_CGEN		0x800
+-#define LQ_PPE32_ENET_MAC_CFG	0x1840
++#define LTQ_ETOP_MDIO_CFG       0x11800
++#define MDIO_CFG_MASK           0x6
++
++#define LTQ_ETOP_CFG            0x11808
++#define LTQ_ETOP_IGPLEN         0x11820
++#define LTQ_ETOP_MAC_CFG	0x11840
+ 
+ #define LTQ_ETOP_ENETS0		0x11850
+ #define LTQ_ETOP_MAC_DA0	0x1186C
+ #define LTQ_ETOP_MAC_DA1	0x11870
+-#define LTQ_ETOP_CFG		0x16020
+-#define LTQ_ETOP_IGPLEN		0x16080
++
++#define MAC_CFG_MASK		0xfff
++#define MAC_CFG_CGEN		(1 << 11)
++#define MAC_CFG_DUPLEX		(1 << 2)
++#define MAC_CFG_SPEED		(1 << 1)
++#define MAC_CFG_LINK		(1 << 0)
+ 
+ #define MAX_DMA_CHAN		0x8
+ #define MAX_DMA_CRC_LEN		0x4
+ #define MAX_DMA_DATA_LEN	0x600
+ 
+ #define ETOP_FTCU		BIT(28)
+-#define ETOP_MII_MASK		0xf
+-#define ETOP_MII_NORMAL		0xd
+-#define ETOP_MII_REVERSE	0xe
+ #define ETOP_PLEN_UNDER		0x40
+-#define ETOP_CGEN		0x800
++#define ETOP_CFG_MII0		0x01
+ 
+-/* use 2 static channels for TX/RX */
+-#define LTQ_ETOP_TX_CHANNEL	1
+-#define LTQ_ETOP_RX_CHANNEL	6
+-#define IS_TX(x)		(x == LTQ_ETOP_TX_CHANNEL)
+-#define IS_RX(x)		(x == LTQ_ETOP_RX_CHANNEL)
++#define ETOP_CFG_MASK           0xfff
++#define ETOP_CFG_FEN0		(1 << 8)
++#define ETOP_CFG_SEN0		(1 << 6)
++#define ETOP_CFG_OFF1		(1 << 3)
++#define ETOP_CFG_REMII0		(1 << 1)
++#define ETOP_CFG_OFF0		(1 << 0)
++
++#define LTQ_GBIT_MDIO_CTL	0xCC
++#define LTQ_GBIT_MDIO_DATA	0xd0
++#define LTQ_GBIT_GCTL0		0x68
++#define LTQ_GBIT_PMAC_HD_CTL	0x8c
++#define LTQ_GBIT_P0_CTL		0x4
++#define LTQ_GBIT_PMAC_RX_IPG	0xa8
++#define LTQ_GBIT_RGMII_CTL	0x78
++
++#define PMAC_HD_CTL_AS		(1 << 19)
++#define PMAC_HD_CTL_RXSH	(1 << 22)
++
++/* Switch Enable (0=disable, 1=enable) */
++#define GCTL0_SE		0x80000000
++/* Disable MDIO auto polling (0=disable, 1=enable) */
++#define PX_CTL_DMDIO		0x00400000
++
++/* MDC clock divider, clock = 25MHz/((MDC_CLOCK + 1) * 2) */
++#define MDC_CLOCK_MASK		0xff000000
++#define MDC_CLOCK_OFFSET	24
++
++/* register information for the gbit's MDIO bus */
++#define MDIO_XR9_REQUEST	0x00008000
++#define MDIO_XR9_READ		0x00000800
++#define MDIO_XR9_WRITE		0x00000400
++#define MDIO_XR9_REG_MASK	0x1f
++#define MDIO_XR9_ADDR_MASK	0x1f
++#define MDIO_XR9_RD_MASK	0xffff
++#define MDIO_XR9_REG_OFFSET	0
++#define MDIO_XR9_ADDR_OFFSET	5
++#define MDIO_XR9_WR_OFFSET	16
+ 
++#define LTQ_DMA_ETOP	((of_machine_is_compatible("lantiq,ase")) ? \
++			(INT_NUM_IM3_IRL0) : (INT_NUM_IM2_IRL0))
++
++/* the newer xway socks have a embedded 3/7 port gbit multiplexer */
+ #define ltq_etop_r32(x)		ltq_r32(ltq_etop_membase + (x))
+ #define ltq_etop_w32(x, y)	ltq_w32(x, ltq_etop_membase + (y))
+ #define ltq_etop_w32_mask(x, y, z)	\
+ 		ltq_w32_mask(x, y, ltq_etop_membase + (z))
+ 
+-#define DRV_VERSION	"1.0"
++#define ltq_gbit_r32(x)		ltq_r32(ltq_gbit_membase + (x))
++#define ltq_gbit_w32(x, y)	ltq_w32(x, ltq_gbit_membase + (y))
++#define ltq_gbit_w32_mask(x, y, z)	\
++		ltq_w32_mask(x, y, ltq_gbit_membase + (z))
++
++#define DRV_VERSION	"1.2"
+ 
+ static void __iomem *ltq_etop_membase;
++static void __iomem *ltq_gbit_membase;
+ 
+ struct ltq_etop_chan {
+-	int idx;
+ 	int tx_free;
++	int irq;
+ 	struct net_device *netdev;
+ 	struct napi_struct napi;
+ 	struct ltq_dma_channel dma;
+@@ -98,22 +150,35 @@ struct ltq_etop_chan {
+ struct ltq_etop_priv {
+ 	struct net_device *netdev;
+ 	struct platform_device *pdev;
+-	struct ltq_eth_data *pldata;
+ 	struct resource *res;
+ 
+ 	struct mii_bus *mii_bus;
+ 	struct phy_device *phydev;
+ 
+-	struct ltq_etop_chan ch[MAX_DMA_CHAN];
+-	int tx_free[MAX_DMA_CHAN >> 1];
++	struct ltq_etop_chan txch;
++	struct ltq_etop_chan rxch;
++
++	int tx_irq;
++	int rx_irq;
++
++	unsigned char mac[6];
++	int mii_mode;
+ 
+ 	spinlock_t lock;
++
++	struct clk *clk_ppe;
++	struct clk *clk_switch;
++	struct clk *clk_ephy;
++	struct clk *clk_ephycgu;
+ };
+ 
++static int ltq_etop_mdio_wr(struct mii_bus *bus, int phy_addr,
++				int phy_reg, u16 phy_data);
++
+ static int
+ ltq_etop_alloc_skb(struct ltq_etop_chan *ch)
+ {
+-	ch->skb[ch->dma.desc] = netdev_alloc_skb(ch->netdev, MAX_DMA_DATA_LEN);
++	ch->skb[ch->dma.desc] = dev_alloc_skb(MAX_DMA_DATA_LEN);
+ 	if (!ch->skb[ch->dma.desc])
+ 		return -ENOMEM;
+ 	ch->dma.desc_base[ch->dma.desc].addr = dma_map_single(NULL,
+@@ -148,8 +213,11 @@ ltq_etop_hw_receive(struct ltq_etop_chan
+ 	spin_unlock_irqrestore(&priv->lock, flags);
+ 
+ 	skb_put(skb, len);
++	skb->dev = ch->netdev;
+ 	skb->protocol = eth_type_trans(skb, ch->netdev);
+ 	netif_receive_skb(skb);
++	ch->netdev->stats.rx_packets++;
++	ch->netdev->stats.rx_bytes += len;
+ }
+ 
+ static int
+@@ -157,8 +225,10 @@ ltq_etop_poll_rx(struct napi_struct *nap
+ {
+ 	struct ltq_etop_chan *ch = container_of(napi,
+ 				struct ltq_etop_chan, napi);
++	struct ltq_etop_priv *priv = netdev_priv(ch->netdev);
+ 	int rx = 0;
+ 	int complete = 0;
++	unsigned long flags;
+ 
+ 	while ((rx < budget) && !complete) {
+ 		struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc];
+@@ -172,7 +242,9 @@ ltq_etop_poll_rx(struct napi_struct *nap
+ 	}
+ 	if (complete || !rx) {
+ 		napi_complete(&ch->napi);
++		spin_lock_irqsave(&priv->lock, flags);
+ 		ltq_dma_ack_irq(&ch->dma);
++		spin_unlock_irqrestore(&priv->lock, flags);
+ 	}
+ 	return rx;
+ }
+@@ -184,12 +256,14 @@ ltq_etop_poll_tx(struct napi_struct *nap
+ 		container_of(napi, struct ltq_etop_chan, napi);
+ 	struct ltq_etop_priv *priv = netdev_priv(ch->netdev);
+ 	struct netdev_queue *txq =
+-		netdev_get_tx_queue(ch->netdev, ch->idx >> 1);
++		netdev_get_tx_queue(ch->netdev, ch->dma.nr >> 1);
+ 	unsigned long flags;
+ 
+ 	spin_lock_irqsave(&priv->lock, flags);
+ 	while ((ch->dma.desc_base[ch->tx_free].ctl &
+ 			(LTQ_DMA_OWN | LTQ_DMA_C)) == LTQ_DMA_C) {
++		ch->netdev->stats.tx_packets++;
++		ch->netdev->stats.tx_bytes += ch->skb[ch->tx_free]->len;
+ 		dev_kfree_skb_any(ch->skb[ch->tx_free]);
+ 		ch->skb[ch->tx_free] = NULL;
+ 		memset(&ch->dma.desc_base[ch->tx_free], 0,
+@@ -202,7 +276,9 @@ ltq_etop_poll_tx(struct napi_struct *nap
+ 	if (netif_tx_queue_stopped(txq))
+ 		netif_tx_start_queue(txq);
+ 	napi_complete(&ch->napi);
++	spin_lock_irqsave(&priv->lock, flags);
+ 	ltq_dma_ack_irq(&ch->dma);
++	spin_unlock_irqrestore(&priv->lock, flags);
+ 	return 1;
+ }
+ 
+@@ -210,9 +286,10 @@ static irqreturn_t
+ ltq_etop_dma_irq(int irq, void *_priv)
+ {
+ 	struct ltq_etop_priv *priv = _priv;
+-	int ch = irq - LTQ_DMA_CH0_INT;
+-
+-	napi_schedule(&priv->ch[ch].napi);
++	if (irq == priv->txch.dma.irq)
++		napi_schedule(&priv->txch.napi);
++	else
++		napi_schedule(&priv->rxch.napi);
+ 	return IRQ_HANDLED;
+ }
+ 
+@@ -224,7 +301,7 @@ ltq_etop_free_channel(struct net_device
+ 	ltq_dma_free(&ch->dma);
+ 	if (ch->dma.irq)
+ 		free_irq(ch->dma.irq, priv);
+-	if (IS_RX(ch->idx)) {
++	if (ch == &priv->txch) {
+ 		int desc;
+ 		for (desc = 0; desc < LTQ_DESC_NUM; desc++)
+ 			dev_kfree_skb_any(ch->skb[ch->dma.desc]);
+@@ -235,65 +312,133 @@ static void
+ ltq_etop_hw_exit(struct net_device *dev)
+ {
+ 	struct ltq_etop_priv *priv = netdev_priv(dev);
+-	int i;
+ 
+-	ltq_pmu_disable(PMU_PPE);
+-	for (i = 0; i < MAX_DMA_CHAN; i++)
+-		if (IS_TX(i) || IS_RX(i))
+-			ltq_etop_free_channel(dev, &priv->ch[i]);
++	clk_disable(priv->clk_ppe);
++
++	if (of_machine_is_compatible("lantiq,ar9"))
++		clk_disable(priv->clk_switch);
++
++	if (of_machine_is_compatible("lantiq,ase")) {
++		clk_disable(priv->clk_ephy);
++		clk_disable(priv->clk_ephycgu);
++	}
++
++	ltq_etop_free_channel(dev, &priv->txch);
++	ltq_etop_free_channel(dev, &priv->rxch);
++}
++
++static void
++ltq_etop_gbit_init(struct net_device *dev)
++{
++	struct ltq_etop_priv *priv = netdev_priv(dev);
++
++	clk_enable(priv->clk_switch);
++
++	/* enable gbit port0 on the SoC */
++	ltq_gbit_w32_mask((1 << 17), (1 << 18), LTQ_GBIT_P0_CTL);
++
++	ltq_gbit_w32_mask(0, GCTL0_SE, LTQ_GBIT_GCTL0);
++	/* disable MDIO auto polling mode */
++	ltq_gbit_w32_mask(0, PX_CTL_DMDIO, LTQ_GBIT_P0_CTL);
++	/* set 1522 packet size */
++	ltq_gbit_w32_mask(0x300, 0, LTQ_GBIT_GCTL0);
++	/* disable pmac & dmac headers */
++	ltq_gbit_w32_mask(PMAC_HD_CTL_AS | PMAC_HD_CTL_RXSH, 0,
++		LTQ_GBIT_PMAC_HD_CTL);
++	/* Due to traffic halt when burst length 8,
++		replace default IPG value with 0x3B */
++	ltq_gbit_w32(0x3B, LTQ_GBIT_PMAC_RX_IPG);
++	/* set mdc clock to 2.5 MHz */
++	ltq_gbit_w32_mask(MDC_CLOCK_MASK, 4 << MDC_CLOCK_OFFSET,
++		LTQ_GBIT_RGMII_CTL);
+ }
+ 
+ static int
+ ltq_etop_hw_init(struct net_device *dev)
+ {
+ 	struct ltq_etop_priv *priv = netdev_priv(dev);
+-	int i;
++	int mii_mode = priv->mii_mode;
+ 
+-	ltq_pmu_enable(PMU_PPE);
++	clk_enable(priv->clk_ppe);
++
++	if (of_machine_is_compatible("lantiq,ar9")) {
++		ltq_etop_gbit_init(dev);
++		/* force the etops link to the gbit to MII */
++		mii_mode = PHY_INTERFACE_MODE_MII;
++	}
++	ltq_etop_w32_mask(MDIO_CFG_MASK, 0, LTQ_ETOP_MDIO_CFG);
++	ltq_etop_w32_mask(MAC_CFG_MASK, MAC_CFG_CGEN | MAC_CFG_DUPLEX |
++			MAC_CFG_SPEED | MAC_CFG_LINK, LTQ_ETOP_MAC_CFG);
+ 
+-	switch (priv->pldata->mii_mode) {
++	switch (mii_mode) {
+ 	case PHY_INTERFACE_MODE_RMII:
+-		ltq_etop_w32_mask(ETOP_MII_MASK,
+-			ETOP_MII_REVERSE, LTQ_ETOP_CFG);
++		ltq_etop_w32_mask(ETOP_CFG_MASK, ETOP_CFG_REMII0 | ETOP_CFG_OFF1 |
++			ETOP_CFG_SEN0 | ETOP_CFG_FEN0, LTQ_ETOP_CFG);
+ 		break;
+ 
+ 	case PHY_INTERFACE_MODE_MII:
+-		ltq_etop_w32_mask(ETOP_MII_MASK,
+-			ETOP_MII_NORMAL, LTQ_ETOP_CFG);
++		ltq_etop_w32_mask(ETOP_CFG_MASK, ETOP_CFG_OFF1 |
++			ETOP_CFG_SEN0 | ETOP_CFG_FEN0, LTQ_ETOP_CFG);
+ 		break;
+ 
+ 	default:
++		if (of_machine_is_compatible("lantiq,ase")) {
++			clk_enable(priv->clk_ephy);
++			/* disable external MII */
++			ltq_etop_w32_mask(0, ETOP_CFG_MII0, LTQ_ETOP_CFG);
++			/* enable clock for internal PHY */
++			clk_enable(priv->clk_ephycgu);
++			/* we need to write this magic to the internal phy to
++			   make it work */
++			ltq_etop_mdio_wr(NULL, 0x8, 0x12, 0xC020);
++			pr_info("Selected EPHY mode\n");
++			break;
++		}
+ 		netdev_err(dev, "unknown mii mode %d\n",
+-			priv->pldata->mii_mode);
++			mii_mode);
+ 		return -ENOTSUPP;
+ 	}
+ 
+-	/* enable crc generation */
+-	ltq_etop_w32(PPE32_CGEN, LQ_PPE32_ENET_MAC_CFG);
++	return 0;
++}
++
++static int
++ltq_etop_dma_init(struct net_device *dev)
++{
++	struct ltq_etop_priv *priv = netdev_priv(dev);
++	int tx = priv->tx_irq - LTQ_DMA_ETOP;
++	int rx = priv->rx_irq - LTQ_DMA_ETOP;
++	int err;
+ 
+ 	ltq_dma_init_port(DMA_PORT_ETOP);
+ 
+-	for (i = 0; i < MAX_DMA_CHAN; i++) {
+-		int irq = LTQ_DMA_CH0_INT + i;
+-		struct ltq_etop_chan *ch = &priv->ch[i];
+-
+-		ch->idx = ch->dma.nr = i;
+-
+-		if (IS_TX(i)) {
+-			ltq_dma_alloc_tx(&ch->dma);
+-			request_irq(irq, ltq_etop_dma_irq, 0, "etop_tx", priv);
+-		} else if (IS_RX(i)) {
+-			ltq_dma_alloc_rx(&ch->dma);
+-			for (ch->dma.desc = 0; ch->dma.desc < LTQ_DESC_NUM;
+-					ch->dma.desc++)
+-				if (ltq_etop_alloc_skb(ch))
+-					return -ENOMEM;
+-			ch->dma.desc = 0;
+-			request_irq(irq, ltq_etop_dma_irq, 0, "etop_rx", priv);
++	priv->txch.dma.nr = tx;
++	ltq_dma_alloc_tx(&priv->txch.dma);
++	err = request_irq(priv->tx_irq, ltq_etop_dma_irq, 0, "eth_tx", priv);
++	if (err) {
++		netdev_err(dev, "failed to allocate tx irq\n");
++		goto err_out;
++	}
++	priv->txch.dma.irq = priv->tx_irq;
++
++	priv->rxch.dma.nr = rx;
++	ltq_dma_alloc_rx(&priv->rxch.dma);
++	for (priv->rxch.dma.desc = 0; priv->rxch.dma.desc < LTQ_DESC_NUM;
++			priv->rxch.dma.desc++) {
++		if (ltq_etop_alloc_skb(&priv->rxch)) {
++			netdev_err(dev, "failed to allocate skbs\n");
++			err = -ENOMEM;
++			goto err_out;
+ 		}
+-		ch->dma.irq = irq;
+ 	}
+-	return 0;
++	priv->rxch.dma.desc = 0;
++	err = request_irq(priv->rx_irq, ltq_etop_dma_irq, 0, "eth_rx", priv);
++	if (err)
++		netdev_err(dev, "failed to allocate rx irq\n");
++	else
++		priv->rxch.dma.irq = priv->rx_irq;
++err_out:
++	return err;
+ }
+ 
+ static void
+@@ -309,7 +454,10 @@ ltq_etop_get_settings(struct net_device
+ {
+ 	struct ltq_etop_priv *priv = netdev_priv(dev);
+ 
+-	return phy_ethtool_gset(priv->phydev, cmd);
++	if (priv->phydev)
++		return phy_ethtool_gset(priv->phydev, cmd);
++	else
++		return 0;
+ }
+ 
+ static int
+@@ -317,7 +465,10 @@ ltq_etop_set_settings(struct net_device
+ {
+ 	struct ltq_etop_priv *priv = netdev_priv(dev);
+ 
+-	return phy_ethtool_sset(priv->phydev, cmd);
++	if (priv->phydev)
++		return phy_ethtool_sset(priv->phydev, cmd);
++	else
++		return 0;
+ }
+ 
+ static int
+@@ -325,7 +476,10 @@ ltq_etop_nway_reset(struct net_device *d
+ {
+ 	struct ltq_etop_priv *priv = netdev_priv(dev);
+ 
+-	return phy_start_aneg(priv->phydev);
++	if (priv->phydev)
++		return phy_start_aneg(priv->phydev);
++	else
++		return 0;
+ }
+ 
+ static const struct ethtool_ops ltq_etop_ethtool_ops = {
+@@ -336,6 +490,39 @@ static const struct ethtool_ops ltq_etop
+ };
+ 
+ static int
++ltq_etop_mdio_wr_xr9(struct mii_bus *bus, int phy_addr,
++		int phy_reg, u16 phy_data)
++{
++	u32 val = MDIO_XR9_REQUEST | MDIO_XR9_WRITE |
++		(phy_data << MDIO_XR9_WR_OFFSET) |
++		((phy_addr & MDIO_XR9_ADDR_MASK) << MDIO_XR9_ADDR_OFFSET) |
++		((phy_reg & MDIO_XR9_REG_MASK) << MDIO_XR9_REG_OFFSET);
++
++	while (ltq_gbit_r32(LTQ_GBIT_MDIO_CTL) & MDIO_XR9_REQUEST)
++		;
++	ltq_gbit_w32(val, LTQ_GBIT_MDIO_CTL);
++	while (ltq_gbit_r32(LTQ_GBIT_MDIO_CTL) & MDIO_XR9_REQUEST)
++		;
++	return 0;
++}
++
++static int
++ltq_etop_mdio_rd_xr9(struct mii_bus *bus, int phy_addr, int phy_reg)
++{
++	u32 val = MDIO_XR9_REQUEST | MDIO_XR9_READ |
++		((phy_addr & MDIO_XR9_ADDR_MASK) << MDIO_XR9_ADDR_OFFSET) |
++		((phy_reg & MDIO_XR9_REG_MASK) << MDIO_XR9_REG_OFFSET);
++
++	while (ltq_gbit_r32(LTQ_GBIT_MDIO_CTL) & MDIO_XR9_REQUEST)
++		;
++	ltq_gbit_w32(val, LTQ_GBIT_MDIO_CTL);
++	while (ltq_gbit_r32(LTQ_GBIT_MDIO_CTL) & MDIO_XR9_REQUEST)
++		;
++	val = ltq_gbit_r32(LTQ_GBIT_MDIO_DATA) & MDIO_XR9_RD_MASK;
++	return val;
++}
++
++static int
+ ltq_etop_mdio_wr(struct mii_bus *bus, int phy_addr, int phy_reg, u16 phy_data)
+ {
+ 	u32 val = MDIO_REQUEST |
+@@ -343,9 +530,9 @@ ltq_etop_mdio_wr(struct mii_bus *bus, in
+ 		((phy_reg & MDIO_REG_MASK) << MDIO_REG_OFFSET) |
+ 		phy_data;
+ 
+-	while (ltq_etop_r32(LTQ_ETOP_MDIO) & MDIO_REQUEST)
++	while (ltq_etop_r32(LTQ_ETOP_MDIO_ACC) & MDIO_REQUEST)
+ 		;
+-	ltq_etop_w32(val, LTQ_ETOP_MDIO);
++	ltq_etop_w32(val, LTQ_ETOP_MDIO_ACC);
+ 	return 0;
+ }
+ 
+@@ -356,12 +543,12 @@ ltq_etop_mdio_rd(struct mii_bus *bus, in
+ 		((phy_addr & MDIO_ADDR_MASK) << MDIO_ADDR_OFFSET) |
+ 		((phy_reg & MDIO_REG_MASK) << MDIO_REG_OFFSET);
+ 
+-	while (ltq_etop_r32(LTQ_ETOP_MDIO) & MDIO_REQUEST)
++	while (ltq_etop_r32(LTQ_ETOP_MDIO_ACC) & MDIO_REQUEST)
+ 		;
+-	ltq_etop_w32(val, LTQ_ETOP_MDIO);
+-	while (ltq_etop_r32(LTQ_ETOP_MDIO) & MDIO_REQUEST)
++	ltq_etop_w32(val, LTQ_ETOP_MDIO_ACC);
++	while (ltq_etop_r32(LTQ_ETOP_MDIO_ACC) & MDIO_REQUEST)
+ 		;
+-	val = ltq_etop_r32(LTQ_ETOP_MDIO) & MDIO_VAL_MASK;
++	val = ltq_etop_r32(LTQ_ETOP_MDIO_ACC) & MDIO_VAL_MASK;
+ 	return val;
+ }
+ 
+@@ -376,14 +563,18 @@ ltq_etop_mdio_probe(struct net_device *d
+ {
+ 	struct ltq_etop_priv *priv = netdev_priv(dev);
+ 	struct phy_device *phydev = NULL;
+-	int phy_addr;
++	u32 phy_supported =  (SUPPORTED_10baseT_Half
++			| SUPPORTED_10baseT_Full
++			| SUPPORTED_100baseT_Half
++			| SUPPORTED_100baseT_Full
++			| SUPPORTED_Autoneg
++			| SUPPORTED_MII
++			| SUPPORTED_TP);
+ 
+-	for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) {
+-		if (priv->mii_bus->phy_map[phy_addr]) {
+-			phydev = priv->mii_bus->phy_map[phy_addr];
+-			break;
+-		}
+-	}
++	if (of_machine_is_compatible("lantiq,ase"))
++		phydev = priv->mii_bus->phy_map[8];
++	else
++		phydev = priv->mii_bus->phy_map[0];
+ 
+ 	if (!phydev) {
+ 		netdev_err(dev, "no PHY found\n");
+@@ -391,21 +582,18 @@ ltq_etop_mdio_probe(struct net_device *d
+ 	}
+ 
+ 	phydev = phy_connect(dev, dev_name(&phydev->dev),
+-			     &ltq_etop_mdio_link, priv->pldata->mii_mode);
++			&ltq_etop_mdio_link, priv->mii_mode);
+ 
+ 	if (IS_ERR(phydev)) {
+ 		netdev_err(dev, "Could not attach to PHY\n");
+ 		return PTR_ERR(phydev);
+ 	}
+ 
+-	phydev->supported &= (SUPPORTED_10baseT_Half
+-			      | SUPPORTED_10baseT_Full
+-			      | SUPPORTED_100baseT_Half
+-			      | SUPPORTED_100baseT_Full
+-			      | SUPPORTED_Autoneg
+-			      | SUPPORTED_MII
+-			      | SUPPORTED_TP);
++	if (of_machine_is_compatible("lantiq,ar9"))
++		phy_supported |= SUPPORTED_1000baseT_Half
++			| SUPPORTED_1000baseT_Full;
+ 
++	phydev->supported &= phy_supported;
+ 	phydev->advertising = phydev->supported;
+ 	priv->phydev = phydev;
+ 	pr_info("%s: attached PHY [%s] (phy_addr=%s, irq=%d)\n",
+@@ -430,8 +618,13 @@ ltq_etop_mdio_init(struct net_device *de
+ 	}
+ 
+ 	priv->mii_bus->priv = dev;
+-	priv->mii_bus->read = ltq_etop_mdio_rd;
+-	priv->mii_bus->write = ltq_etop_mdio_wr;
++	if (of_machine_is_compatible("lantiq,ar9")) {
++		priv->mii_bus->read = ltq_etop_mdio_rd_xr9;
++		priv->mii_bus->write = ltq_etop_mdio_wr_xr9;
++	} else {
++		priv->mii_bus->read = ltq_etop_mdio_rd;
++		priv->mii_bus->write = ltq_etop_mdio_wr;
++	}
+ 	priv->mii_bus->name = "ltq_mii";
+ 	snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
+ 		priv->pdev->name, priv->pdev->id);
+@@ -480,17 +673,19 @@ static int
+ ltq_etop_open(struct net_device *dev)
+ {
+ 	struct ltq_etop_priv *priv = netdev_priv(dev);
+-	int i;
++	unsigned long flags;
+ 
+-	for (i = 0; i < MAX_DMA_CHAN; i++) {
+-		struct ltq_etop_chan *ch = &priv->ch[i];
++	napi_enable(&priv->txch.napi);
++	napi_enable(&priv->rxch.napi);
++
++	spin_lock_irqsave(&priv->lock, flags);
++	ltq_dma_open(&priv->txch.dma);
++	ltq_dma_open(&priv->rxch.dma);
++	spin_unlock_irqrestore(&priv->lock, flags);
++
++	if (priv->phydev)
++		phy_start(priv->phydev);
+ 
+-		if (!IS_TX(i) && (!IS_RX(i)))
+-			continue;
+-		ltq_dma_open(&ch->dma);
+-		napi_enable(&ch->napi);
+-	}
+-	phy_start(priv->phydev);
+ 	netif_tx_start_all_queues(dev);
+ 	return 0;
+ }
+@@ -499,18 +694,19 @@ static int
+ ltq_etop_stop(struct net_device *dev)
+ {
+ 	struct ltq_etop_priv *priv = netdev_priv(dev);
+-	int i;
++	unsigned long flags;
+ 
+ 	netif_tx_stop_all_queues(dev);
+-	phy_stop(priv->phydev);
+-	for (i = 0; i < MAX_DMA_CHAN; i++) {
+-		struct ltq_etop_chan *ch = &priv->ch[i];
+-
+-		if (!IS_RX(i) && !IS_TX(i))
+-			continue;
+-		napi_disable(&ch->napi);
+-		ltq_dma_close(&ch->dma);
+-	}
++	if (priv->phydev)
++		phy_stop(priv->phydev);
++	napi_disable(&priv->txch.napi);
++	napi_disable(&priv->rxch.napi);
++
++	spin_lock_irqsave(&priv->lock, flags);
++	ltq_dma_close(&priv->txch.dma);
++	ltq_dma_close(&priv->rxch.dma);
++	spin_unlock_irqrestore(&priv->lock, flags);
++
+ 	return 0;
+ }
+ 
+@@ -520,16 +716,16 @@ ltq_etop_tx(struct sk_buff *skb, struct
+ 	int queue = skb_get_queue_mapping(skb);
+ 	struct netdev_queue *txq = netdev_get_tx_queue(dev, queue);
+ 	struct ltq_etop_priv *priv = netdev_priv(dev);
+-	struct ltq_etop_chan *ch = &priv->ch[(queue << 1) | 1];
+-	struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc];
+-	int len;
++	struct ltq_dma_desc *desc =
++		&priv->txch.dma.desc_base[priv->txch.dma.desc];
+ 	unsigned long flags;
+ 	u32 byte_offset;
++	int len;
+ 
+ 	len = skb->len < ETH_ZLEN ? ETH_ZLEN : skb->len;
+ 
+-	if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) || ch->skb[ch->dma.desc]) {
+-		dev_kfree_skb_any(skb);
++	if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) ||
++			priv->txch.skb[priv->txch.dma.desc]) {
+ 		netdev_err(dev, "tx ring full\n");
+ 		netif_tx_stop_queue(txq);
+ 		return NETDEV_TX_BUSY;
+@@ -537,7 +733,7 @@ ltq_etop_tx(struct sk_buff *skb, struct
+ 
+ 	/* dma needs to start on a 16 byte aligned address */
+ 	byte_offset = CPHYSADDR(skb->data) % 16;
+-	ch->skb[ch->dma.desc] = skb;
++	priv->txch.skb[priv->txch.dma.desc] = skb;
+ 
+ 	dev->trans_start = jiffies;
+ 
+@@ -547,11 +743,11 @@ ltq_etop_tx(struct sk_buff *skb, struct
+ 	wmb();
+ 	desc->ctl = LTQ_DMA_OWN | LTQ_DMA_SOP | LTQ_DMA_EOP |
+ 		LTQ_DMA_TX_OFFSET(byte_offset) | (len & LTQ_DMA_SIZE_MASK);
+-	ch->dma.desc++;
+-	ch->dma.desc %= LTQ_DESC_NUM;
++	priv->txch.dma.desc++;
++	priv->txch.dma.desc %= LTQ_DESC_NUM;
+ 	spin_unlock_irqrestore(&priv->lock, flags);
+ 
+-	if (ch->dma.desc_base[ch->dma.desc].ctl & LTQ_DMA_OWN)
++	if (priv->txch.dma.desc_base[priv->txch.dma.desc].ctl & LTQ_DMA_OWN)
+ 		netif_tx_stop_queue(txq);
+ 
+ 	return NETDEV_TX_OK;
+@@ -566,8 +762,10 @@ ltq_etop_change_mtu(struct net_device *d
+ 		struct ltq_etop_priv *priv = netdev_priv(dev);
+ 		unsigned long flags;
+ 
++		int max = ETH_HLEN + VLAN_HLEN + new_mtu + ETH_FCS_LEN;
++
+ 		spin_lock_irqsave(&priv->lock, flags);
+-		ltq_etop_w32((ETOP_PLEN_UNDER << 16) | new_mtu,
++		ltq_etop_w32((ETOP_PLEN_UNDER << 16) | max,
+ 			LTQ_ETOP_IGPLEN);
+ 		spin_unlock_irqrestore(&priv->lock, flags);
+ 	}
+@@ -638,6 +836,9 @@ ltq_etop_init(struct net_device *dev)
+ 	if (err)
+ 		goto err_hw;
+ 	ltq_etop_change_mtu(dev, 1500);
++	err = ltq_etop_dma_init(dev);
++	if (err)
++		goto err_hw;
+ 
+ 	memcpy(&mac, &priv->pldata->mac, sizeof(struct sockaddr));
+ 	if (!is_valid_ether_addr(mac.sa_data)) {
+@@ -655,9 +856,10 @@ ltq_etop_init(struct net_device *dev)
+ 		dev->addr_assign_type = NET_ADDR_RANDOM;
+ 
+ 	ltq_etop_set_multicast_list(dev);
+-	err = ltq_etop_mdio_init(dev);
+-	if (err)
+-		goto err_netdev;
++	if (!ltq_etop_mdio_init(dev))
++		dev->ethtool_ops = &ltq_etop_ethtool_ops;
++	else
++		pr_warn("etop: mdio probe failed\n");;
+ 	return 0;
+ 
+ err_netdev:
+@@ -677,6 +879,9 @@ ltq_etop_tx_timeout(struct net_device *d
+ 	err = ltq_etop_hw_init(dev);
+ 	if (err)
+ 		goto err_hw;
++	err = ltq_etop_dma_init(dev);
++	if (err)
++		goto err_hw;
+ 	dev->trans_start = jiffies;
+ 	netif_wake_queue(dev);
+ 	return;
+@@ -700,14 +905,18 @@ static const struct net_device_ops ltq_e
+ 	.ndo_tx_timeout = ltq_etop_tx_timeout,
+ };
+ 
+-static int __init
+-ltq_etop_probe(struct platform_device *pdev)
++static int ltq_etop_probe(struct platform_device *pdev)
+ {
+ 	struct net_device *dev;
+ 	struct ltq_etop_priv *priv;
+-	struct resource *res;
++	struct resource *res, *gbit_res, irqres[2];
+ 	int err;
+-	int i;
++
++	err = of_irq_to_resource_table(pdev->dev.of_node, irqres, 2);
++	if (err != 2) {
++		dev_err(&pdev->dev, "failed to get etop irqs\n");
++		return -EINVAL;
++	}
+ 
+ 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ 	if (!res) {
+@@ -733,30 +942,58 @@ ltq_etop_probe(struct platform_device *p
+ 		goto err_out;
+ 	}
+ 
+-	dev = alloc_etherdev_mq(sizeof(struct ltq_etop_priv), 4);
+-	if (!dev) {
+-		err = -ENOMEM;
+-		goto err_out;
++	if (of_machine_is_compatible("lantiq,ar9")) {
++		gbit_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
++		if (!gbit_res) {
++			dev_err(&pdev->dev, "failed to get gbit resource\n");
++			err = -ENOENT;
++			goto err_out;
++		}
++		ltq_gbit_membase = devm_ioremap_nocache(&pdev->dev,
++			gbit_res->start, resource_size(gbit_res));
++		if (!ltq_gbit_membase) {
++			dev_err(&pdev->dev, "failed to remap gigabit switch %d\n",
++				pdev->id);
++			err = -ENOMEM;
++			goto err_out;
++		}
+ 	}
++
++	dev = alloc_etherdev_mq(sizeof(struct ltq_etop_priv), 4);
+ 	strcpy(dev->name, "eth%d");
+ 	dev->netdev_ops = &ltq_eth_netdev_ops;
+-	dev->ethtool_ops = &ltq_etop_ethtool_ops;
+ 	priv = netdev_priv(dev);
+ 	priv->res = res;
+ 	priv->pdev = pdev;
+-	priv->pldata = dev_get_platdata(&pdev->dev);
+ 	priv->netdev = dev;
++	priv->tx_irq = irqres[0].start;
++	priv->rx_irq = irqres[1].start;
++	priv->mii_mode = of_get_phy_mode(pdev->dev.of_node);
++	of_get_mac_address_mtd(pdev->dev.of_node, priv->mac);
++
++	priv->clk_ppe = clk_get(&pdev->dev, NULL);
++	if (IS_ERR(priv->clk_ppe))
++		return PTR_ERR(priv->clk_ppe);
++	if (of_machine_is_compatible("lantiq,ar9")) {
++		priv->clk_switch = clk_get(&pdev->dev, "switch");
++		if (IS_ERR(priv->clk_switch))
++			return PTR_ERR(priv->clk_switch);
++	}
++	if (of_machine_is_compatible("lantiq,ase")) {
++		priv->clk_ephy = clk_get(&pdev->dev, "ephy");
++		if (IS_ERR(priv->clk_ephy))
++			return PTR_ERR(priv->clk_ephy);
++		priv->clk_ephycgu = clk_get(&pdev->dev, "ephycgu");
++		if (IS_ERR(priv->clk_ephycgu))
++			return PTR_ERR(priv->clk_ephycgu);
++	}
++
+ 	spin_lock_init(&priv->lock);
+ 
+-	for (i = 0; i < MAX_DMA_CHAN; i++) {
+-		if (IS_TX(i))
+-			netif_napi_add(dev, &priv->ch[i].napi,
+-				ltq_etop_poll_tx, 8);
+-		else if (IS_RX(i))
+-			netif_napi_add(dev, &priv->ch[i].napi,
+-				ltq_etop_poll_rx, 32);
+-		priv->ch[i].netdev = dev;
+-	}
++	netif_napi_add(dev, &priv->txch.napi, ltq_etop_poll_tx, 8);
++	netif_napi_add(dev, &priv->rxch.napi, ltq_etop_poll_rx, 32);
++	priv->txch.netdev = dev;
++	priv->rxch.netdev = dev;
+ 
+ 	err = register_netdev(dev);
+ 	if (err)
+@@ -785,31 +1022,22 @@ ltq_etop_remove(struct platform_device *
+ 	return 0;
+ }
+ 
++static const struct of_device_id ltq_etop_match[] = {
++	{ .compatible = "lantiq,etop-xway" },
++	{},
++};
++MODULE_DEVICE_TABLE(of, ltq_etop_match);
++
+ static struct platform_driver ltq_mii_driver = {
++	.probe = ltq_etop_probe,
+ 	.remove = ltq_etop_remove,
+ 	.driver = {
+ 		.name = "ltq_etop",
++		.of_match_table = ltq_etop_match,
+ 	},
+ };
+ 
+-int __init
+-init_ltq_etop(void)
+-{
+-	int ret = platform_driver_probe(&ltq_mii_driver, ltq_etop_probe);
+-
+-	if (ret)
+-		pr_err("ltq_etop: Error registering platform driver!");
+-	return ret;
+-}
+-
+-static void __exit
+-exit_ltq_etop(void)
+-{
+-	platform_driver_unregister(&ltq_mii_driver);
+-}
+-
+-module_init(init_ltq_etop);
+-module_exit(exit_ltq_etop);
++module_platform_driver(ltq_mii_driver);
+ 
+ MODULE_AUTHOR("John Crispin <blogic at openwrt.org>");
+ MODULE_DESCRIPTION("Lantiq SoC ETOP");
diff --git a/target/linux/lantiq/patches-4.4/0030-GPIO-add-named-gpio-exports.patch b/target/linux/lantiq/patches-4.4/0030-GPIO-add-named-gpio-exports.patch
new file mode 100644
index 0000000..2e27918
--- /dev/null
+++ b/target/linux/lantiq/patches-4.4/0030-GPIO-add-named-gpio-exports.patch
@@ -0,0 +1,166 @@
+From cc809a441d8f2924f785eb863dfa6aef47a25b0b Mon Sep 17 00:00:00 2001
+From: John Crispin <blogic at openwrt.org>
+Date: Tue, 12 Aug 2014 20:49:27 +0200
+Subject: [PATCH 30/36] GPIO: add named gpio exports
+
+Signed-off-by: John Crispin <blogic at openwrt.org>
+---
+ drivers/gpio/gpiolib-of.c     |   68 +++++++++++++++++++++++++++++++++++++++++
+ drivers/gpio/gpiolib.c        |   11 +++++--
+ include/asm-generic/gpio.h    |    5 +++
+ include/linux/gpio/consumer.h |    8 +++++
+ 4 files changed, 90 insertions(+), 2 deletions(-)
+
+--- a/drivers/gpio/gpiolib-of.c
++++ b/drivers/gpio/gpiolib-of.c
+@@ -23,6 +23,8 @@
+ #include <linux/pinctrl/pinctrl.h>
+ #include <linux/slab.h>
+ #include <linux/gpio/machine.h>
++#include <linux/init.h>
++#include <linux/platform_device.h>
+ 
+ #include "gpiolib.h"
+ 
+@@ -450,3 +452,69 @@ void of_gpiochip_remove(struct gpio_chip
+ 	gpiochip_remove_pin_ranges(chip);
+ 	of_node_put(chip->of_node);
+ }
++
++static struct of_device_id gpio_export_ids[] = {
++	{ .compatible = "gpio-export" },
++	{ /* sentinel */ }
++};
++
++static int __init of_gpio_export_probe(struct platform_device *pdev)
++{
++	struct device_node *np = pdev->dev.of_node;
++	struct device_node *cnp;
++	u32 val;
++	int nb = 0;
++
++	for_each_child_of_node(np, cnp) {
++		const char *name = NULL;
++		int gpio;
++		bool dmc;
++		int max_gpio = 1;
++		int i;
++
++		of_property_read_string(cnp, "gpio-export,name", &name);
++
++		if (!name)
++			max_gpio = of_gpio_count(cnp);
++
++		for (i = 0; i < max_gpio; i++) {
++			unsigned flags = 0;
++			enum of_gpio_flags of_flags;
++
++			gpio = of_get_gpio_flags(cnp, i, &of_flags);
++
++			if (of_flags == OF_GPIO_ACTIVE_LOW)
++				flags |= GPIOF_ACTIVE_LOW;
++
++			if (!of_property_read_u32(cnp, "gpio-export,output", &val))
++				flags |= val ? GPIOF_OUT_INIT_HIGH : GPIOF_OUT_INIT_LOW;
++			else
++				flags |= GPIOF_IN;
++
++			if (devm_gpio_request_one(&pdev->dev, gpio, flags, name ? name : of_node_full_name(np)))
++				continue;
++
++			dmc = of_property_read_bool(cnp, "gpio-export,direction_may_change");
++			gpio_export_with_name(gpio, dmc, name);
++			nb++;
++		}
++	}
++
++	dev_info(&pdev->dev, "%d gpio(s) exported\n", nb);
++
++	return 0;
++}
++
++static struct platform_driver gpio_export_driver = {
++	.driver		= {
++		.name		= "gpio-export",
++		.owner	= THIS_MODULE,
++		.of_match_table	= of_match_ptr(gpio_export_ids),
++	},
++};
++
++static int __init of_gpio_export_init(void)
++{
++	return platform_driver_probe(&gpio_export_driver, of_gpio_export_probe);
++}
++device_initcall(of_gpio_export_init);
+--- a/include/asm-generic/gpio.h
++++ b/include/asm-generic/gpio.h
+@@ -122,6 +122,12 @@ static inline int gpio_export(unsigned g
+ 	return gpiod_export(gpio_to_desc(gpio), direction_may_change);
+ }
+ 
++int __gpiod_export(struct gpio_desc *desc, bool direction_may_change, const char *name);
++static inline int gpio_export_with_name(unsigned gpio, bool direction_may_change, const char *name)
++{
++	return __gpiod_export(gpio_to_desc(gpio), direction_may_change, name);
++}
++
+ static inline int gpio_export_link(struct device *dev, const char *name,
+ 				   unsigned gpio)
+ {
+--- a/include/linux/gpio/consumer.h
++++ b/include/linux/gpio/consumer.h
+@@ -427,6 +427,7 @@ static inline struct gpio_desc *devm_get
+ 
+ #if IS_ENABLED(CONFIG_GPIOLIB) && IS_ENABLED(CONFIG_GPIO_SYSFS)
+ 
++int _gpiod_export(struct gpio_desc *desc, bool direction_may_change, const char *name);
+ int gpiod_export(struct gpio_desc *desc, bool direction_may_change);
+ int gpiod_export_link(struct device *dev, const char *name,
+ 		      struct gpio_desc *desc);
+@@ -434,6 +435,13 @@ void gpiod_unexport(struct gpio_desc *de
+ 
+ #else  /* CONFIG_GPIOLIB && CONFIG_GPIO_SYSFS */
+ 
++static inline int _gpiod_export(struct gpio_desc *desc,
++			       bool direction_may_change,
++			       const char *name)
++{
++	return -ENOSYS;
++}
++
+ static inline int gpiod_export(struct gpio_desc *desc,
+ 			       bool direction_may_change)
+ {
+--- a/drivers/gpio/gpiolib-sysfs.c
++++ b/drivers/gpio/gpiolib-sysfs.c
+@@ -544,7 +544,7 @@ static struct class gpio_class = {
+  *
+  * Returns zero on success, else an error.
+  */
+-int gpiod_export(struct gpio_desc *desc, bool direction_may_change)
++int __gpiod_export(struct gpio_desc *desc, bool direction_may_change, const char *name)
+ {
+ 	struct gpio_chip	*chip;
+ 	struct gpiod_data	*data;
+@@ -604,6 +604,8 @@ int gpiod_export(struct gpio_desc *desc,
+ 	offset = gpio_chip_hwgpio(desc);
+ 	if (chip->names && chip->names[offset])
+ 		ioname = chip->names[offset];
++	if (name)
++		ioname = name;
+ 
+ 	dev = device_create_with_groups(&gpio_class, chip->dev,
+ 					MKDEV(0, 0), data, gpio_groups,
+@@ -625,6 +627,12 @@ err_unlock:
+ 	gpiod_dbg(desc, "%s: status %d\n", __func__, status);
+ 	return status;
+ }
++EXPORT_SYMBOL_GPL(__gpiod_export);
++
++int gpiod_export(struct gpio_desc *desc, bool direction_may_change)
++{
++	return __gpiod_export(desc, direction_may_change, NULL);
++}
+ EXPORT_SYMBOL_GPL(gpiod_export);
+ 
+ static int match_export(struct device *dev, const void *desc)
diff --git a/target/linux/lantiq/patches-4.4/0031-I2C-MIPS-lantiq-add-FALC-ON-i2c-bus-master.patch b/target/linux/lantiq/patches-4.4/0031-I2C-MIPS-lantiq-add-FALC-ON-i2c-bus-master.patch
new file mode 100644
index 0000000..a9ae084
--- /dev/null
+++ b/target/linux/lantiq/patches-4.4/0031-I2C-MIPS-lantiq-add-FALC-ON-i2c-bus-master.patch
@@ -0,0 +1,1034 @@
+From f17e50f67fa3c77624edf2ca03fae0d50f0ce39b Mon Sep 17 00:00:00 2001
+From: John Crispin <blogic at openwrt.org>
+Date: Thu, 7 Aug 2014 18:26:42 +0200
+Subject: [PATCH 31/36] I2C: MIPS: lantiq: add FALC-ON i2c bus master
+
+This patch adds the driver needed to make the I2C bus work on FALC-ON SoCs.
+
+Signed-off-by: Thomas Langer <thomas.langer at lantiq.com>
+Signed-off-by: John Crispin <blogic at openwrt.org>
+---
+ drivers/i2c/busses/Kconfig      |   10 +
+ drivers/i2c/busses/Makefile     |    1 +
+ drivers/i2c/busses/i2c-lantiq.c |  747 +++++++++++++++++++++++++++++++++++++++
+ drivers/i2c/busses/i2c-lantiq.h |  234 ++++++++++++
+ 4 files changed, 992 insertions(+)
+ create mode 100644 drivers/i2c/busses/i2c-lantiq.c
+ create mode 100644 drivers/i2c/busses/i2c-lantiq.h
+
+--- a/drivers/i2c/busses/Kconfig
++++ b/drivers/i2c/busses/Kconfig
+@@ -639,6 +639,16 @@ config I2C_MESON
+ 	  If you say yes to this option, support will be included for the
+ 	  I2C interface on the Amlogic Meson family of SoCs.
+ 
++config I2C_LANTIQ
++	tristate "Lantiq I2C interface"
++	depends on LANTIQ && SOC_FALCON
++	help
++	  If you say yes to this option, support will be included for the
++	  Lantiq I2C core.
++
++	  This driver can also be built as a module. If so, the module
++	  will be called i2c-lantiq.
++
+ config I2C_MPC
+ 	tristate "MPC107/824x/85xx/512x/52xx/83xx/86xx"
+ 	depends on PPC
+--- a/drivers/i2c/busses/Makefile
++++ b/drivers/i2c/busses/Makefile
+@@ -59,6 +59,7 @@ obj-$(CONFIG_I2C_IMX)		+= i2c-imx.o
+ obj-$(CONFIG_I2C_IOP3XX)	+= i2c-iop3xx.o
+ obj-$(CONFIG_I2C_JZ4780)	+= i2c-jz4780.o
+ obj-$(CONFIG_I2C_KEMPLD)	+= i2c-kempld.o
++obj-$(CONFIG_I2C_LANTIQ)	+= i2c-lantiq.o
+ obj-$(CONFIG_I2C_LPC2K)		+= i2c-lpc2k.o
+ obj-$(CONFIG_I2C_MESON)		+= i2c-meson.o
+ obj-$(CONFIG_I2C_MPC)		+= i2c-mpc.o
+--- /dev/null
++++ b/drivers/i2c/busses/i2c-lantiq.c
+@@ -0,0 +1,747 @@
++
++/*
++ * Lantiq I2C bus adapter
++ *
++ * Parts based on i2c-designware.c and other i2c drivers from Linux 2.6.33
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++ *
++ * Copyright (C) 2012 Thomas Langer <thomas.langer at lantiq.com>
++ */
++
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/delay.h>
++#include <linux/slab.h> /* for kzalloc, kfree */
++#include <linux/i2c.h>
++#include <linux/errno.h>
++#include <linux/completion.h>
++#include <linux/interrupt.h>
++#include <linux/platform_device.h>
++#include <linux/io.h>
++#include <linux/of_irq.h>
++#include <linux/of_i2c.h>
++
++#include <lantiq_soc.h>
++#include "i2c-lantiq.h"
++
++/*
++ * CURRENT ISSUES:
++ * - no high speed support
++ * - ten bit mode is not tested (no slave devices)
++ */
++
++/* access macros */
++#define i2c_r32(reg)	\
++	__raw_readl(&(priv->membase)->reg)
++#define i2c_w32(val, reg)	\
++	__raw_writel(val, &(priv->membase)->reg)
++#define i2c_w32_mask(clear, set, reg)	\
++	i2c_w32((i2c_r32(reg) & ~(clear)) | (set), reg)
++
++#define DRV_NAME "i2c-lantiq"
++#define DRV_VERSION "1.00"
++
++#define LTQ_I2C_BUSY_TIMEOUT		20 /* ms */
++
++#ifdef DEBUG
++#define LTQ_I2C_XFER_TIMEOUT		(25*HZ)
++#else
++#define LTQ_I2C_XFER_TIMEOUT		HZ
++#endif
++
++#define LTQ_I2C_IMSC_DEFAULT_MASK	(I2C_IMSC_I2C_P_INT_EN | \
++					 I2C_IMSC_I2C_ERR_INT_EN)
++
++#define LTQ_I2C_ARB_LOST		(1 << 0)
++#define LTQ_I2C_NACK			(1 << 1)
++#define LTQ_I2C_RX_UFL			(1 << 2)
++#define LTQ_I2C_RX_OFL			(1 << 3)
++#define LTQ_I2C_TX_UFL			(1 << 4)
++#define LTQ_I2C_TX_OFL			(1 << 5)
++
++struct ltq_i2c {
++	struct mutex mutex;
++
++
++	/* active clock settings */
++	unsigned int input_clock;	/* clock input for i2c hardware block */
++	unsigned int i2c_clock;		/* approximated bus clock in kHz */
++
++	struct clk *clk_gate;
++	struct clk *clk_input;
++
++
++	/* resources (memory and interrupts) */
++	int irq_lb;				/* last burst irq */
++
++	struct lantiq_reg_i2c __iomem *membase;	/* base of mapped registers */
++
++	struct i2c_adapter adap;
++	struct device *dev;
++
++	struct completion cmd_complete;
++
++
++	/* message transfer data */
++	struct i2c_msg *current_msg;	/* current message */
++	int msgs_num;		/* number of messages to handle */
++	u8 *msg_buf;		/* current buffer */
++	u32 msg_buf_len;	/* remaining length of current buffer */
++	int msg_err;		/* error status of the current transfer */
++
++
++	/* master status codes */
++	enum {
++		STATUS_IDLE,
++		STATUS_ADDR,	/* address phase */
++		STATUS_WRITE,
++		STATUS_READ,
++		STATUS_READ_END,
++		STATUS_STOP
++	} status;
++};
++
++static irqreturn_t ltq_i2c_isr(int irq, void *dev_id);
++
++static inline void enable_burst_irq(struct ltq_i2c *priv)
++{
++	i2c_w32_mask(0, I2C_IMSC_LBREQ_INT_EN | I2C_IMSC_BREQ_INT_EN, imsc);
++}
++static inline void disable_burst_irq(struct ltq_i2c *priv)
++{
++	i2c_w32_mask(I2C_IMSC_LBREQ_INT_EN | I2C_IMSC_BREQ_INT_EN, 0, imsc);
++}
++
++static void prepare_msg_send_addr(struct ltq_i2c *priv)
++{
++	struct i2c_msg *msg = priv->current_msg;
++	int rd = !!(msg->flags & I2C_M_RD);	/* extends to 0 or 1 */
++	u16 addr = msg->addr;
++
++	/* new i2c_msg */
++	priv->msg_buf = msg->buf;
++	priv->msg_buf_len = msg->len;
++	if (rd)
++		priv->status = STATUS_READ;
++	else
++		priv->status = STATUS_WRITE;
++
++	/* send slave address */
++	if (msg->flags & I2C_M_TEN) {
++		i2c_w32(0xf0 | ((addr & 0x300) >> 7) | rd, txd);
++		i2c_w32(addr & 0xff, txd);
++	} else {
++		i2c_w32((addr & 0x7f) << 1 | rd, txd);
++	}
++}
++
++static void ltq_i2c_set_tx_len(struct ltq_i2c *priv)
++{
++	struct i2c_msg *msg = priv->current_msg;
++	int len = (msg->flags & I2C_M_TEN) ? 2 : 1;
++
++	pr_debug("set_tx_len %cX\n", (msg->flags & I2C_M_RD) ? 'R' : 'T');
++
++	priv->status = STATUS_ADDR;
++
++	if (!(msg->flags & I2C_M_RD))
++		len += msg->len;
++	else
++		/* set maximum received packet size (before rx int!) */
++		i2c_w32(msg->len, mrps_ctrl);
++	i2c_w32(len, tps_ctrl);
++	enable_burst_irq(priv);
++}
++
++static int ltq_i2c_hw_set_clock(struct i2c_adapter *adap)
++{
++	struct ltq_i2c *priv = i2c_get_adapdata(adap);
++	unsigned int input_clock = clk_get_rate(priv->clk_input);
++	u32 dec, inc = 1;
++
++	/* clock changed? */
++	if (priv->input_clock == input_clock)
++		return 0;
++
++	/*
++	 * this formula is only an approximation, found by the recommended
++	 * values in the "I2C Architecture Specification 1.7.1"
++	 */
++	dec = input_clock / (priv->i2c_clock * 2);
++	if (dec <= 6)
++		return -ENXIO;
++
++	i2c_w32(0, fdiv_high_cfg);
++	i2c_w32((inc << I2C_FDIV_CFG_INC_OFFSET) |
++		(dec << I2C_FDIV_CFG_DEC_OFFSET),
++		fdiv_cfg);
++
++	dev_info(priv->dev, "setup clocks (in %d kHz, bus %d kHz, dec=%d)\n",
++		input_clock, priv->i2c_clock, dec);
++
++	priv->input_clock = input_clock;
++	return 0;
++}
++
++static int ltq_i2c_hw_init(struct i2c_adapter *adap)
++{
++	int ret = 0;
++	struct ltq_i2c *priv = i2c_get_adapdata(adap);
++
++	/* disable bus */
++	i2c_w32_mask(I2C_RUN_CTRL_RUN_EN, 0, run_ctrl);
++
++#ifndef DEBUG
++	/* set normal operation clock divider */
++	i2c_w32(1 << I2C_CLC_RMC_OFFSET, clc);
++#else
++	/* for debugging a higher divider value! */
++	i2c_w32(0xF0 << I2C_CLC_RMC_OFFSET, clc);
++#endif
++
++	/* setup clock */
++	ret = ltq_i2c_hw_set_clock(adap);
++	if (ret != 0) {
++		dev_warn(priv->dev, "invalid clock settings\n");
++		return ret;
++	}
++
++	/* configure fifo */
++	i2c_w32(I2C_FIFO_CFG_TXFC | /* tx fifo as flow controller */
++		I2C_FIFO_CFG_RXFC | /* rx fifo as flow controller */
++		I2C_FIFO_CFG_TXFA_TXFA2 | /* tx fifo 4-byte aligned */
++		I2C_FIFO_CFG_RXFA_RXFA2 | /* rx fifo 4-byte aligned */
++		I2C_FIFO_CFG_TXBS_TXBS0 | /* tx fifo burst size is 1 word */
++		I2C_FIFO_CFG_RXBS_RXBS0,  /* rx fifo burst size is 1 word */
++		fifo_cfg);
++
++	/* configure address */
++	i2c_w32(I2C_ADDR_CFG_SOPE_EN |	/* generate stop when no more data in
++					   the fifo */
++		I2C_ADDR_CFG_SONA_EN |	/* generate stop when NA received */
++		I2C_ADDR_CFG_MnS_EN |	/* we are master device */
++		0,			/* our slave address (not used!) */
++		addr_cfg);
++
++	/* enable bus */
++	i2c_w32_mask(0, I2C_RUN_CTRL_RUN_EN, run_ctrl);
++
++	return 0;
++}
++
++static int ltq_i2c_wait_bus_not_busy(struct ltq_i2c *priv)
++{
++	unsigned long timeout;
++
++	timeout = jiffies + msecs_to_jiffies(LTQ_I2C_BUSY_TIMEOUT);
++
++	do {
++		u32 stat = i2c_r32(bus_stat);
++
++		if ((stat & I2C_BUS_STAT_BS_MASK) == I2C_BUS_STAT_BS_FREE)
++			return 0;
++
++		cond_resched();
++	} while (!time_after_eq(jiffies, timeout));
++
++	dev_err(priv->dev, "timeout waiting for bus ready\n");
++	return -ETIMEDOUT;
++}
++
++static void ltq_i2c_tx(struct ltq_i2c *priv, int last)
++{
++	if (priv->msg_buf_len && priv->msg_buf) {
++		i2c_w32(*priv->msg_buf, txd);
++
++		if (--priv->msg_buf_len)
++			priv->msg_buf++;
++		else
++			priv->msg_buf = NULL;
++	} else {
++		last = 1;
++	}
++
++	if (last)
++		disable_burst_irq(priv);
++}
++
++static void ltq_i2c_rx(struct ltq_i2c *priv, int last)
++{
++	u32 fifo_stat, timeout;
++	if (priv->msg_buf_len && priv->msg_buf) {
++		timeout = 5000000;
++		do {
++			fifo_stat = i2c_r32(ffs_stat);
++		} while (!fifo_stat && --timeout);
++		if (!timeout) {
++			last = 1;
++			pr_debug("\nrx timeout\n");
++			goto err;
++		}
++		while (fifo_stat) {
++			*priv->msg_buf = i2c_r32(rxd);
++			if (--priv->msg_buf_len) {
++				priv->msg_buf++;
++			} else {
++				priv->msg_buf = NULL;
++				last = 1;
++				break;
++			}
++			/*
++			 * do not read more than burst size, otherwise no "last
++			 * burst" is generated and the transaction is blocked!
++			 */
++			fifo_stat = 0;
++		}
++	} else {
++		last = 1;
++	}
++err:
++	if (last) {
++		disable_burst_irq(priv);
++
++		if (priv->status == STATUS_READ_END) {
++			/* 
++			 * do the STATUS_STOP and complete() here, as sometimes
++			 * the tx_end is already seen before this is finished
++			 */
++			priv->status = STATUS_STOP;
++			complete(&priv->cmd_complete);
++		} else {
++			i2c_w32(I2C_ENDD_CTRL_SETEND, endd_ctrl);
++			priv->status = STATUS_READ_END;
++		}
++	}
++}
++
++static void ltq_i2c_xfer_init(struct ltq_i2c *priv)
++{
++	/* enable interrupts */
++	i2c_w32(LTQ_I2C_IMSC_DEFAULT_MASK, imsc);
++
++	/* trigger transfer of first msg */
++	ltq_i2c_set_tx_len(priv);
++}
++
++static void dump_msgs(struct i2c_msg msgs[], int num, int rx)
++{
++#if defined(DEBUG)
++	int i, j;
++	pr_debug("Messages %d %s\n", num, rx ? "out" : "in");
++	for (i = 0; i < num; i++) {
++		pr_debug("%2d %cX Msg(%d) addr=0x%X: ", i,
++			(msgs[i].flags & I2C_M_RD) ? 'R' : 'T',
++			msgs[i].len, msgs[i].addr);
++		if (!(msgs[i].flags & I2C_M_RD) || rx) {
++			for (j = 0; j < msgs[i].len; j++)
++				pr_debug("%02X ", msgs[i].buf[j]);
++		}
++		pr_debug("\n");
++	}
++#endif
++}
++
++static void ltq_i2c_release_bus(struct ltq_i2c *priv)
++{
++	if ((i2c_r32(bus_stat) & I2C_BUS_STAT_BS_MASK) == I2C_BUS_STAT_BS_BM)
++		i2c_w32(I2C_ENDD_CTRL_SETEND, endd_ctrl);
++}
++
++static int ltq_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[],
++			   int num)
++{
++	struct ltq_i2c *priv = i2c_get_adapdata(adap);
++	int ret;
++
++	dev_dbg(priv->dev, "xfer %u messages\n", num);
++	dump_msgs(msgs, num, 0);
++
++	mutex_lock(&priv->mutex);
++
++	init_completion(&priv->cmd_complete);
++	priv->current_msg = msgs;
++	priv->msgs_num = num;
++	priv->msg_err = 0;
++	priv->status = STATUS_IDLE;
++
++	/* wait for the bus to become ready */
++	ret = ltq_i2c_wait_bus_not_busy(priv);
++	if (ret)
++		goto done;
++
++	while (priv->msgs_num) {
++		/* start the transfers */
++		ltq_i2c_xfer_init(priv);
++
++		/* wait for transfers to complete */
++		ret = wait_for_completion_interruptible_timeout(
++			&priv->cmd_complete, LTQ_I2C_XFER_TIMEOUT);
++		if (ret == 0) {
++			dev_err(priv->dev, "controller timed out\n");
++			ltq_i2c_hw_init(adap);
++			ret = -ETIMEDOUT;
++			goto done;
++		} else if (ret < 0)
++			goto done;
++
++		if (priv->msg_err) {
++			if (priv->msg_err & LTQ_I2C_NACK)
++				ret = -ENXIO;
++			else
++				ret = -EREMOTEIO;
++			goto done;
++		}
++		if (--priv->msgs_num)
++			priv->current_msg++;
++	}
++	/* no error? */
++	ret = num;
++
++done:
++	ltq_i2c_release_bus(priv);
++
++	mutex_unlock(&priv->mutex);
++
++	if (ret >= 0)
++		dump_msgs(msgs, num, 1);
++
++	pr_debug("XFER ret %d\n", ret);
++	return ret;
++}
++
++static irqreturn_t ltq_i2c_isr_burst(int irq, void *dev_id)
++{
++	struct ltq_i2c *priv = dev_id;
++	struct i2c_msg *msg = priv->current_msg;
++	int last = (irq == priv->irq_lb);
++
++	if (last)
++		pr_debug("LB ");
++	else
++		pr_debug("B ");
++
++	if (msg->flags & I2C_M_RD) {
++		switch (priv->status) {
++		case STATUS_ADDR:
++			pr_debug("X");
++			prepare_msg_send_addr(priv);
++			disable_burst_irq(priv);
++			break;
++		case STATUS_READ:
++		case STATUS_READ_END:
++			pr_debug("R");
++			ltq_i2c_rx(priv, last);
++			break;
++		default:
++			disable_burst_irq(priv);
++			pr_warn("Status R %d\n", priv->status);
++			break;
++		}
++	} else {
++		switch (priv->status) {
++		case STATUS_ADDR:
++			pr_debug("x");
++			prepare_msg_send_addr(priv);
++			break;
++		case STATUS_WRITE:
++			pr_debug("w");
++			ltq_i2c_tx(priv, last);
++			break;
++		default:
++			disable_burst_irq(priv);
++			pr_warn("Status W %d\n", priv->status);
++			break;
++		}
++	}
++
++	i2c_w32(I2C_ICR_BREQ_INT_CLR | I2C_ICR_LBREQ_INT_CLR, icr);
++	return IRQ_HANDLED;
++}
++
++static void ltq_i2c_isr_prot(struct ltq_i2c *priv)
++{
++	u32 i_pro = i2c_r32(p_irqss);
++
++	pr_debug("i2c-p");
++
++	/* not acknowledge */
++	if (i_pro & I2C_P_IRQSS_NACK) {
++		priv->msg_err |= LTQ_I2C_NACK;
++		pr_debug(" nack");
++	}
++
++	/* arbitration lost */
++	if (i_pro & I2C_P_IRQSS_AL) {
++		priv->msg_err |= LTQ_I2C_ARB_LOST;
++		pr_debug(" arb-lost");
++	}
++	/* tx -> rx switch */
++	if (i_pro & I2C_P_IRQSS_RX)
++		pr_debug(" rx");
++
++	/* tx end */
++	if (i_pro & I2C_P_IRQSS_TX_END)
++		pr_debug(" txend");
++	pr_debug("\n");
++
++	if (!priv->msg_err) {
++		/* tx -> rx switch */
++		if (i_pro & I2C_P_IRQSS_RX) {
++			priv->status = STATUS_READ;
++			enable_burst_irq(priv);
++		}
++		if (i_pro & I2C_P_IRQSS_TX_END) {
++			if (priv->status == STATUS_READ)
++				priv->status = STATUS_READ_END;
++			else {
++				disable_burst_irq(priv);
++				priv->status = STATUS_STOP;
++			}
++		}
++	}
++
++	i2c_w32(i_pro, p_irqsc);
++}
++
++static irqreturn_t ltq_i2c_isr(int irq, void *dev_id)
++{
++	u32 i_raw, i_err = 0;
++	struct ltq_i2c *priv = dev_id;
++
++	i_raw = i2c_r32(mis);
++	pr_debug("i_raw 0x%08X\n", i_raw);
++
++	/* error interrupt */
++	if (i_raw & I2C_RIS_I2C_ERR_INT_INTOCC) {
++		i_err = i2c_r32(err_irqss);
++		pr_debug("i_err 0x%08X bus_stat 0x%04X\n",
++			i_err, i2c_r32(bus_stat));
++
++		/* tx fifo overflow (8) */
++		if (i_err & I2C_ERR_IRQSS_TXF_OFL)
++			priv->msg_err |= LTQ_I2C_TX_OFL;
++
++		/* tx fifo underflow (4) */
++		if (i_err & I2C_ERR_IRQSS_TXF_UFL)
++			priv->msg_err |= LTQ_I2C_TX_UFL;
++
++		/* rx fifo overflow (2) */
++		if (i_err & I2C_ERR_IRQSS_RXF_OFL)
++			priv->msg_err |= LTQ_I2C_RX_OFL;
++
++		/* rx fifo underflow (1) */
++		if (i_err & I2C_ERR_IRQSS_RXF_UFL)
++			priv->msg_err |= LTQ_I2C_RX_UFL;
++
++		i2c_w32(i_err, err_irqsc);
++	}
++
++	/* protocol interrupt */
++	if (i_raw & I2C_RIS_I2C_P_INT_INTOCC)
++		ltq_i2c_isr_prot(priv);
++
++	if ((priv->msg_err) || (priv->status == STATUS_STOP))
++		complete(&priv->cmd_complete);
++
++	return IRQ_HANDLED;
++}
++
++static u32 ltq_i2c_functionality(struct i2c_adapter *adap)
++{
++	return	I2C_FUNC_I2C |
++		I2C_FUNC_10BIT_ADDR |
++		I2C_FUNC_SMBUS_EMUL;
++}
++
++static struct i2c_algorithm ltq_i2c_algorithm = {
++	.master_xfer	= ltq_i2c_xfer,
++	.functionality	= ltq_i2c_functionality,
++};
++
++static int __devinit ltq_i2c_probe(struct platform_device *pdev)
++{
++	struct device_node *node = pdev->dev.of_node;
++	struct ltq_i2c *priv;
++	struct i2c_adapter *adap;
++	struct resource *mmres, irqres[4];
++	int ret = 0;
++
++	dev_dbg(&pdev->dev, "probing\n");
++
++	mmres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++	ret = of_irq_to_resource_table(node, irqres, 4);
++	if (!mmres || (ret != 4)) {
++		dev_err(&pdev->dev, "no resources\n");
++		return -ENODEV;
++	}
++
++	/* allocate private data */
++	priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
++	if (!priv) {
++		dev_err(&pdev->dev, "can't allocate private data\n");
++		return -ENOMEM;
++	}
++
++	adap = &priv->adap;
++	i2c_set_adapdata(adap, priv);
++	adap->owner = THIS_MODULE;
++	adap->class = I2C_CLASS_HWMON | I2C_CLASS_SPD;
++	strlcpy(adap->name, DRV_NAME "-adapter", sizeof(adap->name));
++	adap->algo = &ltq_i2c_algorithm;
++
++	if (of_property_read_u32(node, "clock-frequency", &priv->i2c_clock)) {
++		dev_warn(&pdev->dev, "No I2C speed selected, using 100kHz\n");
++		priv->i2c_clock = 100000;
++	}
++
++	init_completion(&priv->cmd_complete);
++	mutex_init(&priv->mutex);
++
++	priv->membase = devm_request_and_ioremap(&pdev->dev, mmres);
++	if (priv->membase == NULL)
++		return -ENOMEM;
++
++	priv->dev = &pdev->dev;
++	priv->irq_lb = irqres[0].start;
++
++	ret = devm_request_irq(&pdev->dev, irqres[0].start, ltq_i2c_isr_burst,
++		0x0, "i2c lb", priv);
++	if (ret) {
++		dev_err(&pdev->dev, "can't get last burst IRQ %d\n",
++			irqres[0].start);
++		return -ENODEV;
++	}
++
++	ret = devm_request_irq(&pdev->dev, irqres[1].start, ltq_i2c_isr_burst,
++		0x0, "i2c b", priv);
++	if (ret) {
++		dev_err(&pdev->dev, "can't get burst IRQ %d\n",
++			irqres[1].start);
++		return -ENODEV;
++	}
++
++	ret = devm_request_irq(&pdev->dev, irqres[2].start, ltq_i2c_isr,
++		0x0, "i2c err", priv);
++	if (ret) {
++		dev_err(&pdev->dev, "can't get error IRQ %d\n",
++			irqres[2].start);
++		return -ENODEV;
++	}
++
++	ret = devm_request_irq(&pdev->dev, irqres[3].start, ltq_i2c_isr,
++		0x0, "i2c p", priv);
++	if (ret) {
++		dev_err(&pdev->dev, "can't get protocol IRQ %d\n",
++			irqres[3].start);
++		return -ENODEV;
++	}
++
++	dev_dbg(&pdev->dev, "mapped io-space to %p\n", priv->membase);
++	dev_dbg(&pdev->dev, "use IRQs %d, %d, %d, %d\n", irqres[0].start,
++		irqres[1].start, irqres[2].start, irqres[3].start);
++
++	priv->clk_gate = devm_clk_get(&pdev->dev, NULL);
++	if (IS_ERR(priv->clk_gate)) {
++		dev_err(&pdev->dev, "failed to get i2c clk\n");
++		return -ENOENT;
++	}
++
++	/* this is a static clock, which has no refcounting */
++	priv->clk_input = clk_get_fpi();
++	if (IS_ERR(priv->clk_input)) {
++		dev_err(&pdev->dev, "failed to get fpi clk\n");
++		return -ENOENT;
++	}
++
++	clk_activate(priv->clk_gate);
++
++	/* add our adapter to the i2c stack */
++	ret = i2c_add_numbered_adapter(adap);
++	if (ret) {
++		dev_err(&pdev->dev, "can't register I2C adapter\n");
++		goto out;
++	}
++
++	platform_set_drvdata(pdev, priv);
++	i2c_set_adapdata(adap, priv);
++
++	/* print module version information */
++	dev_dbg(&pdev->dev, "module id=%u revision=%u\n",
++		(i2c_r32(id) & I2C_ID_ID_MASK) >> I2C_ID_ID_OFFSET,
++		(i2c_r32(id) & I2C_ID_REV_MASK) >> I2C_ID_REV_OFFSET);
++
++	/* initialize HW */
++	ret = ltq_i2c_hw_init(adap);
++	if (ret) {
++		dev_err(&pdev->dev, "can't configure adapter\n");
++		i2c_del_adapter(adap);
++		platform_set_drvdata(pdev, NULL);
++	} else {
++		dev_info(&pdev->dev, "version %s\n", DRV_VERSION);
++	}
++
++	of_i2c_register_devices(adap);
++
++out:
++	/* if init failed, we need to deactivate the clock gate */
++	if (ret)
++		clk_deactivate(priv->clk_gate);
++
++	return ret;
++}
++
++static int __devexit ltq_i2c_remove(struct platform_device *pdev)
++{
++	struct ltq_i2c *priv = platform_get_drvdata(pdev);
++
++	/* disable bus */
++	i2c_w32_mask(I2C_RUN_CTRL_RUN_EN, 0, run_ctrl);
++
++	/* power down the core */
++	clk_deactivate(priv->clk_gate);
++
++	/* remove driver */
++	i2c_del_adapter(&priv->adap);
++	kfree(priv);
++
++	dev_dbg(&pdev->dev, "removed\n");
++	platform_set_drvdata(pdev, NULL);
++
++	return 0;
++}
++static const struct of_device_id ltq_i2c_match[] = {
++	{ .compatible = "lantiq,lantiq-i2c" },
++	{},
++};
++MODULE_DEVICE_TABLE(of, ltq_i2c_match);
++
++static struct platform_driver ltq_i2c_driver = {
++	.probe	= ltq_i2c_probe,
++	.remove	= __devexit_p(ltq_i2c_remove),
++	.driver	= {
++		.name	= DRV_NAME,
++		.owner	= THIS_MODULE,
++		.of_match_table = ltq_i2c_match,
++	},
++};
++
++module_platform_driver(ltq_i2c_driver);
++
++MODULE_DESCRIPTION("Lantiq I2C bus adapter");
++MODULE_AUTHOR("Thomas Langer <thomas.langer at lantiq.com>");
++MODULE_ALIAS("platform:" DRV_NAME);
++MODULE_LICENSE("GPL");
++MODULE_VERSION(DRV_VERSION);
+--- /dev/null
++++ b/drivers/i2c/busses/i2c-lantiq.h
+@@ -0,0 +1,234 @@
++#ifndef I2C_LANTIQ_H
++#define I2C_LANTIQ_H
++
++/* I2C register structure */
++struct lantiq_reg_i2c {
++	/* I2C Kernel Clock Control Register */
++	unsigned int clc; /* 0x00000000 */
++	/* Reserved */
++	unsigned int res_0; /* 0x00000004 */
++	/* I2C Identification Register */
++	unsigned int id; /* 0x00000008 */
++	/* Reserved */
++	unsigned int res_1; /* 0x0000000C */
++	/*
++	 * I2C RUN Control Register
++	 * This register enables and disables the I2C peripheral. Before
++	 * enabling, the I2C has to be configured properly. After enabling
++	 * no configuration is possible
++	 */
++	unsigned int run_ctrl; /* 0x00000010 */
++	/*
++	 * I2C End Data Control Register
++	 * This register is used to either turn around the data transmission
++	 * direction or to address another slave without sending a stop
++	 * condition. Also the software can stop the slave-transmitter by
++	 * sending a not-accolade when working as master-receiver or even
++	 * stop data transmission immediately when operating as
++	 * master-transmitter. The writing to the bits of this control
++	 * register is only effective when in MASTER RECEIVES BYTES, MASTER
++	 * TRANSMITS BYTES, MASTER RESTART or SLAVE RECEIVE BYTES state
++	 */
++	unsigned int endd_ctrl; /* 0x00000014 */
++	/*
++	 * I2C Fractional Divider Configuration Register
++	 * These register is used to program the fractional divider of the I2C
++	 * bus. Before the peripheral is switched on by setting the RUN-bit the
++	 * two (fixed) values for the two operating frequencies are programmed
++	 * into these (configuration) registers. The Register FDIV_HIGH_CFG has
++	 * the same layout as I2C_FDIV_CFG.
++	 */
++	unsigned int fdiv_cfg; /* 0x00000018 */
++	/*
++	 * I2C Fractional Divider (highspeed mode) Configuration Register
++	 * These register is used to program the fractional divider of the I2C
++	 * bus. Before the peripheral is switched on by setting the RUN-bit the
++	 * two (fixed) values for the two operating frequencies are programmed
++	 * into these (configuration) registers. The Register FDIV_CFG has the
++	 * same layout as I2C_FDIV_CFG.
++	 */
++	unsigned int fdiv_high_cfg; /* 0x0000001C */
++	/* I2C Address Configuration Register */
++	unsigned int addr_cfg; /* 0x00000020 */
++	/* I2C Bus Status Register
++	 * This register gives a status information of the I2C. This additional
++	 * information can be used by the software to start proper actions.
++	 */
++	unsigned int bus_stat; /* 0x00000024 */
++	/* I2C FIFO Configuration Register */
++	unsigned int fifo_cfg; /* 0x00000028 */
++	/* I2C Maximum Received Packet Size Register */
++	unsigned int mrps_ctrl; /* 0x0000002C */
++	/* I2C Received Packet Size Status Register */
++	unsigned int rps_stat; /* 0x00000030 */
++	/* I2C Transmit Packet Size Register */
++	unsigned int tps_ctrl; /* 0x00000034 */
++	/* I2C Filled FIFO Stages Status Register */
++	unsigned int ffs_stat; /* 0x00000038 */
++	/* Reserved */
++	unsigned int res_2; /* 0x0000003C */
++	/* I2C Timing Configuration Register */
++	unsigned int tim_cfg; /* 0x00000040 */
++	/* Reserved */
++	unsigned int res_3[7]; /* 0x00000044 */
++	/* I2C Error Interrupt Request Source Mask Register */
++	unsigned int err_irqsm; /* 0x00000060 */
++	/* I2C Error Interrupt Request Source Status Register */
++	unsigned int err_irqss; /* 0x00000064 */
++	/* I2C Error Interrupt Request Source Clear Register */
++	unsigned int err_irqsc; /* 0x00000068 */
++	/* Reserved */
++	unsigned int res_4; /* 0x0000006C */
++	/* I2C Protocol Interrupt Request Source Mask Register */
++	unsigned int p_irqsm; /* 0x00000070 */
++	/* I2C Protocol Interrupt Request Source Status Register */
++	unsigned int p_irqss; /* 0x00000074 */
++	/* I2C Protocol Interrupt Request Source Clear Register */
++	unsigned int p_irqsc; /* 0x00000078 */
++	/* Reserved */
++	unsigned int res_5; /* 0x0000007C */
++	/* I2C Raw Interrupt Status Register */
++	unsigned int ris; /* 0x00000080 */
++	/* I2C Interrupt Mask Control Register */
++	unsigned int imsc; /* 0x00000084 */
++	/* I2C Masked Interrupt Status Register */
++	unsigned int mis; /* 0x00000088 */
++	/* I2C Interrupt Clear Register */
++	unsigned int icr; /* 0x0000008C */
++	/* I2C Interrupt Set Register */
++	unsigned int isr; /* 0x00000090 */
++	/* I2C DMA Enable Register */
++	unsigned int dmae; /* 0x00000094 */
++	/* Reserved */
++	unsigned int res_6[8154]; /* 0x00000098 */
++	/* I2C Transmit Data Register */
++	unsigned int txd; /* 0x00008000 */
++	/* Reserved */
++	unsigned int res_7[4095]; /* 0x00008004 */
++	/* I2C Receive Data Register */
++	unsigned int rxd; /* 0x0000C000 */
++	/* Reserved */
++	unsigned int res_8[4095]; /* 0x0000C004 */
++};
++
++/*
++ * Clock Divider for Normal Run Mode
++ * Max 8-bit divider value. IF RMC is 0 the module is disabled. Note: As long
++ * as the new divider value RMC is not valid, the register returns 0x0000 00xx
++ * on reading.
++ */
++#define I2C_CLC_RMC_MASK 0x0000FF00
++/* field offset */
++#define I2C_CLC_RMC_OFFSET 8
++
++/* Fields of "I2C Identification Register" */
++/* Module ID */
++#define I2C_ID_ID_MASK 0x0000FF00
++/* field offset */
++#define I2C_ID_ID_OFFSET 8
++/* Revision */
++#define I2C_ID_REV_MASK 0x000000FF
++/* field offset */
++#define I2C_ID_REV_OFFSET 0
++
++/* Fields of "I2C Interrupt Mask Control Register" */
++/* Enable */
++#define I2C_IMSC_BREQ_INT_EN 0x00000008
++/* Enable */
++#define I2C_IMSC_LBREQ_INT_EN 0x00000004
++
++/* Fields of "I2C Fractional Divider Configuration Register" */
++/* field offset */
++#define I2C_FDIV_CFG_INC_OFFSET 16
++
++/* Fields of "I2C Interrupt Mask Control Register" */
++/* Enable */
++#define I2C_IMSC_I2C_P_INT_EN 0x00000020
++/* Enable */
++#define I2C_IMSC_I2C_ERR_INT_EN 0x00000010
++
++/* Fields of "I2C Error Interrupt Request Source Status Register" */
++/* TXF_OFL */
++#define I2C_ERR_IRQSS_TXF_OFL 0x00000008
++/* TXF_UFL */
++#define I2C_ERR_IRQSS_TXF_UFL 0x00000004
++/* RXF_OFL */
++#define I2C_ERR_IRQSS_RXF_OFL 0x00000002
++/* RXF_UFL */
++#define I2C_ERR_IRQSS_RXF_UFL 0x00000001
++
++/* Fields of "I2C Raw Interrupt Status Register" */
++/* Read: Interrupt occurred. */
++#define I2C_RIS_I2C_ERR_INT_INTOCC 0x00000010
++/* Read: Interrupt occurred. */
++#define I2C_RIS_I2C_P_INT_INTOCC 0x00000020
++
++/* Fields of "I2C FIFO Configuration Register" */
++/* TX FIFO Flow Control */
++#define I2C_FIFO_CFG_TXFC 0x00020000
++/* RX FIFO Flow Control */
++#define I2C_FIFO_CFG_RXFC 0x00010000
++/* Word aligned (character alignment of four characters) */
++#define I2C_FIFO_CFG_TXFA_TXFA2 0x00002000
++/* Word aligned (character alignment of four characters) */
++#define I2C_FIFO_CFG_RXFA_RXFA2 0x00000200
++/* 1 word */
++#define I2C_FIFO_CFG_TXBS_TXBS0 0x00000000
++
++/* Fields of "I2C FIFO Configuration Register" */
++/* 1 word */
++#define I2C_FIFO_CFG_RXBS_RXBS0 0x00000000
++/* Stop on Packet End Enable */
++#define I2C_ADDR_CFG_SOPE_EN 0x00200000
++/* Stop on Not Acknowledge Enable */
++#define I2C_ADDR_CFG_SONA_EN 0x00100000
++/* Enable */
++#define I2C_ADDR_CFG_MnS_EN 0x00080000
++
++/* Fields of "I2C Interrupt Clear Register" */
++/* Clear */
++#define I2C_ICR_BREQ_INT_CLR 0x00000008
++/* Clear */
++#define I2C_ICR_LBREQ_INT_CLR 0x00000004
++
++/* Fields of "I2C Fractional Divider Configuration Register" */
++/* field offset */
++#define I2C_FDIV_CFG_DEC_OFFSET 0
++
++/* Fields of "I2C Bus Status Register" */
++/* Bus Status */
++#define I2C_BUS_STAT_BS_MASK 0x00000003
++/* Read from I2C Bus. */
++#define I2C_BUS_STAT_RNW_READ 0x00000004
++/* I2C Bus is free. */
++#define I2C_BUS_STAT_BS_FREE 0x00000000
++/*
++ * The device is working as master and has claimed the control on the
++ * I2C-bus (busy master).
++ */
++#define I2C_BUS_STAT_BS_BM 0x00000002
++
++/* Fields of "I2C RUN Control Register" */
++/* Enable */
++#define I2C_RUN_CTRL_RUN_EN 0x00000001
++
++/* Fields of "I2C End Data Control Register" */
++/*
++ * Set End of Transmission
++ * Note:Do not write '1' to this bit when bus is free. This will cause an
++ * abort after the first byte when a new transfer is started.
++ */
++#define I2C_ENDD_CTRL_SETEND 0x00000002
++
++/* Fields of "I2C Protocol Interrupt Request Source Status Register" */
++/* NACK */
++#define I2C_P_IRQSS_NACK 0x00000010
++/* AL */
++#define I2C_P_IRQSS_AL 0x00000008
++/* RX */
++#define I2C_P_IRQSS_RX 0x00000040
++/* TX_END */
++#define I2C_P_IRQSS_TX_END 0x00000020
++
++
++#endif /* I2C_LANTIQ_H */
diff --git a/target/linux/lantiq/patches-4.4/0032-USB-fix-roothub-for-IFXHCD.patch b/target/linux/lantiq/patches-4.4/0032-USB-fix-roothub-for-IFXHCD.patch
new file mode 100644
index 0000000..1577cc8
--- /dev/null
+++ b/target/linux/lantiq/patches-4.4/0032-USB-fix-roothub-for-IFXHCD.patch
@@ -0,0 +1,31 @@
+From 326714a47233e4a524afa0c8398276fddf0dbd4d Mon Sep 17 00:00:00 2001
+From: John Crispin <blogic at openwrt.org>
+Date: Thu, 6 Dec 2012 19:59:53 +0100
+Subject: [PATCH 32/36] USB: fix roothub for IFXHCD
+
+---
+ arch/mips/lantiq/Kconfig |    1 +
+ drivers/usb/core/hub.c   |    2 +-
+ 2 files changed, 2 insertions(+), 1 deletion(-)
+
+--- a/arch/mips/lantiq/Kconfig
++++ b/arch/mips/lantiq/Kconfig
+@@ -3,6 +3,7 @@ if LANTIQ
+ config SOC_TYPE_XWAY
+ 	bool
+ 	select PINCTRL_XWAY
++	select USB_ARCH_HAS_HCD
+ 	default n
+ 
+ choice
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -4321,7 +4321,7 @@ hub_port_init(struct usb_hub *hub, struc
+ 		udev->ttport = hdev->ttport;
+ 	} else if (udev->speed != USB_SPEED_HIGH
+ 			&& hdev->speed == USB_SPEED_HIGH) {
+-		if (!hub->tt.hub) {
++		if (hdev->parent && !hub->tt.hub) {
+ 			dev_err(&udev->dev, "parent hub has no TT\n");
+ 			retval = -EINVAL;
+ 			goto fail;
diff --git a/target/linux/lantiq/patches-4.4/0033-SPI-MIPS-lantiq-adds-spi-xway.patch b/target/linux/lantiq/patches-4.4/0033-SPI-MIPS-lantiq-adds-spi-xway.patch
new file mode 100644
index 0000000..051a189
--- /dev/null
+++ b/target/linux/lantiq/patches-4.4/0033-SPI-MIPS-lantiq-adds-spi-xway.patch
@@ -0,0 +1,1049 @@
+From e75df4f96373e5d16f8ca13aa031e54cdcfeda62 Mon Sep 17 00:00:00 2001
+From: John Crispin <blogic at openwrt.org>
+Date: Wed, 13 Mar 2013 09:29:37 +0100
+Subject: [PATCH 33/36] SPI: MIPS: lantiq: adds spi-xway
+
+This patch adds support for the SPI core found on several Lantiq SoCs.
+The Driver has been runtime tested in combination with m25p80 Flash Devices
+on Amazon_SE and VR9.
+
+Signed-off-by: Daniel Schwierzeck <daniel.schwierzeck at googlemail.com>
+Signed-off-by: John Crispin <blogic at openwrt.org>
+---
+ drivers/spi/Kconfig    |    8 +
+ drivers/spi/Makefile   |    1 +
+ drivers/spi/spi-xway.c |  977 ++++++++++++++++++++++++++++++++++++++++++++++++
+ 3 files changed, 986 insertions(+)
+ create mode 100644 drivers/spi/spi-xway.c
+
+--- a/drivers/spi/Kconfig
++++ b/drivers/spi/Kconfig
+@@ -663,6 +663,14 @@ config SPI_NUC900
+ 	help
+ 	  SPI driver for Nuvoton NUC900 series ARM SoCs
+ 
++config SPI_XWAY
++	tristate "Lantiq SPI controller"
++	depends on LANTIQ
++	select SPI_BITBANG
++	help
++	  This driver supports the Lantiq SoC SPI controller in master
++	  mode.
++
+ #
+ # Add new SPI master controllers in alphabetical order above this line
+ #
+--- a/drivers/spi/Makefile
++++ b/drivers/spi/Makefile
+@@ -93,4 +93,5 @@ obj-$(CONFIG_SPI_XCOMM)		+= spi-xcomm.o
+ obj-$(CONFIG_SPI_XILINX)		+= spi-xilinx.o
+ obj-$(CONFIG_SPI_XLP)			+= spi-xlp.o
+ obj-$(CONFIG_SPI_XTENSA_XTFPGA)		+= spi-xtensa-xtfpga.o
++obj-$(CONFIG_SPI_XWAY)			+= spi-xway.o
+ obj-$(CONFIG_SPI_ZYNQMP_GQSPI)		+= spi-zynqmp-gqspi.o
+--- /dev/null
++++ b/drivers/spi/spi-xway.c
+@@ -0,0 +1,1003 @@
++/*
++ * Lantiq SoC SPI controller
++ *
++ * Copyright (C) 2011 Daniel Schwierzeck <daniel.schwierzeck at googlemail.com>
++ * Copyright (C) 2012 John Crispin <blogic at openwrt.org>
++ *
++ * This program is free software; you can distribute it and/or modify it
++ * under the terms of the GNU General Public License (Version 2) as
++ * published by the Free Software Foundation.
++ */
++
++#include <linux/init.h>
++#include <linux/module.h>
++#include <linux/workqueue.h>
++#include <linux/platform_device.h>
++#include <linux/io.h>
++#include <linux/sched.h>
++#include <linux/delay.h>
++#include <linux/interrupt.h>
++#include <linux/completion.h>
++#include <linux/spinlock.h>
++#include <linux/err.h>
++#include <linux/clk.h>
++#include <linux/spi/spi.h>
++#include <linux/spi/spi_bitbang.h>
++#include <linux/of_irq.h>
++
++#include <lantiq_soc.h>
++
++#define LTQ_SPI_CLC		0x00	/* Clock control */
++#define LTQ_SPI_PISEL		0x04	/* Port input select */
++#define LTQ_SPI_ID		0x08	/* Identification */
++#define LTQ_SPI_CON		0x10	/* Control */
++#define LTQ_SPI_STAT		0x14	/* Status */
++#define LTQ_SPI_WHBSTATE	0x18	/* Write HW modified state */
++#define LTQ_SPI_TB		0x20	/* Transmit buffer */
++#define LTQ_SPI_RB		0x24	/* Receive buffer */
++#define LTQ_SPI_RXFCON		0x30	/* Receive FIFO control */
++#define LTQ_SPI_TXFCON		0x34	/* Transmit FIFO control */
++#define LTQ_SPI_FSTAT		0x38	/* FIFO status */
++#define LTQ_SPI_BRT		0x40	/* Baudrate timer */
++#define LTQ_SPI_BRSTAT		0x44	/* Baudrate timer status */
++#define LTQ_SPI_SFCON		0x60	/* Serial frame control */
++#define LTQ_SPI_SFSTAT		0x64	/* Serial frame status */
++#define LTQ_SPI_GPOCON		0x70	/* General purpose output control */
++#define LTQ_SPI_GPOSTAT		0x74	/* General purpose output status */
++#define LTQ_SPI_FGPO		0x78	/* Forced general purpose output */
++#define LTQ_SPI_RXREQ		0x80	/* Receive request */
++#define LTQ_SPI_RXCNT		0x84	/* Receive count */
++#define LTQ_SPI_DMACON		0xEC	/* DMA control */
++#define LTQ_SPI_IRNEN		0xF4	/* Interrupt node enable */
++#define LTQ_SPI_IRNICR		0xF8	/* Interrupt node interrupt capture */
++#define LTQ_SPI_IRNCR		0xFC	/* Interrupt node control */
++
++#define LTQ_SPI_CLC_SMC_SHIFT	16	/* Clock divider for sleep mode */
++#define LTQ_SPI_CLC_SMC_MASK	0xFF
++#define LTQ_SPI_CLC_RMC_SHIFT	8	/* Clock divider for normal run mode */
++#define LTQ_SPI_CLC_RMC_MASK	0xFF
++#define LTQ_SPI_CLC_DISS	BIT(1)	/* Disable status bit */
++#define LTQ_SPI_CLC_DISR	BIT(0)	/* Disable request bit */
++
++#define LTQ_SPI_ID_TXFS_SHIFT	24	/* Implemented TX FIFO size */
++#define LTQ_SPI_ID_TXFS_MASK	0x3F
++#define LTQ_SPI_ID_RXFS_SHIFT	16	/* Implemented RX FIFO size */
++#define LTQ_SPI_ID_RXFS_MASK	0x3F
++#define LTQ_SPI_ID_REV_MASK	0x1F	/* Hardware revision number */
++#define LTQ_SPI_ID_CFG		BIT(5)	/* DMA interface support */
++
++#define LTQ_SPI_CON_BM_SHIFT	16	/* Data width selection */
++#define LTQ_SPI_CON_BM_MASK	0x1F
++#define LTQ_SPI_CON_EM		BIT(24)	/* Echo mode */
++#define LTQ_SPI_CON_IDLE	BIT(23)	/* Idle bit value */
++#define LTQ_SPI_CON_ENBV	BIT(22)	/* Enable byte valid control */
++#define LTQ_SPI_CON_RUEN	BIT(12)	/* Receive underflow error enable */
++#define LTQ_SPI_CON_TUEN	BIT(11)	/* Transmit underflow error enable */
++#define LTQ_SPI_CON_AEN		BIT(10)	/* Abort error enable */
++#define LTQ_SPI_CON_REN		BIT(9)	/* Receive overflow error enable */
++#define LTQ_SPI_CON_TEN		BIT(8)	/* Transmit overflow error enable */
++#define LTQ_SPI_CON_LB		BIT(7)	/* Loopback control */
++#define LTQ_SPI_CON_PO		BIT(6)	/* Clock polarity control */
++#define LTQ_SPI_CON_PH		BIT(5)	/* Clock phase control */
++#define LTQ_SPI_CON_HB		BIT(4)	/* Heading control */
++#define LTQ_SPI_CON_RXOFF	BIT(1)	/* Switch receiver off */
++#define LTQ_SPI_CON_TXOFF	BIT(0)	/* Switch transmitter off */
++
++#define LTQ_SPI_STAT_RXBV_MASK	0x7
++#define LTQ_SPI_STAT_RXBV_SHIFT	28
++#define LTQ_SPI_STAT_BSY	BIT(13)	/* Busy flag */
++#define LTQ_SPI_STAT_RUE	BIT(12)	/* Receive underflow error flag */
++#define LTQ_SPI_STAT_TUE	BIT(11)	/* Transmit underflow error flag */
++#define LTQ_SPI_STAT_AE		BIT(10)	/* Abort error flag */
++#define LTQ_SPI_STAT_RE		BIT(9)	/* Receive error flag */
++#define LTQ_SPI_STAT_TE		BIT(8)	/* Transmit error flag */
++#define LTQ_SPI_STAT_MS		BIT(1)	/* Master/slave select bit */
++#define LTQ_SPI_STAT_EN		BIT(0)	/* Enable bit */
++
++#define LTQ_SPI_WHBSTATE_SETTUE	BIT(15)	/* Set transmit underflow error flag */
++#define LTQ_SPI_WHBSTATE_SETAE	BIT(14)	/* Set abort error flag */
++#define LTQ_SPI_WHBSTATE_SETRE	BIT(13)	/* Set receive error flag */
++#define LTQ_SPI_WHBSTATE_SETTE	BIT(12)	/* Set transmit error flag */
++#define LTQ_SPI_WHBSTATE_CLRTUE	BIT(11)	/* Clear transmit underflow error
++						flag */
++#define LTQ_SPI_WHBSTATE_CLRAE	BIT(10)	/* Clear abort error flag */
++#define LTQ_SPI_WHBSTATE_CLRRE	BIT(9)	/* Clear receive error flag */
++#define LTQ_SPI_WHBSTATE_CLRTE	BIT(8)	/* Clear transmit error flag */
++#define LTQ_SPI_WHBSTATE_SETME	BIT(7)	/* Set mode error flag */
++#define LTQ_SPI_WHBSTATE_CLRME	BIT(6)	/* Clear mode error flag */
++#define LTQ_SPI_WHBSTATE_SETRUE	BIT(5)	/* Set receive underflow error flag */
++#define LTQ_SPI_WHBSTATE_CLRRUE	BIT(4)	/* Clear receive underflow error flag */
++#define LTQ_SPI_WHBSTATE_SETMS	BIT(3)	/* Set master select bit */
++#define LTQ_SPI_WHBSTATE_CLRMS	BIT(2)	/* Clear master select bit */
++#define LTQ_SPI_WHBSTATE_SETEN	BIT(1)	/* Set enable bit (operational mode) */
++#define LTQ_SPI_WHBSTATE_CLREN	BIT(0)	/* Clear enable bit (config mode */
++#define LTQ_SPI_WHBSTATE_CLR_ERRORS	0x0F50
++
++#define LTQ_SPI_RXFCON_RXFITL_SHIFT	8 /* FIFO interrupt trigger level */
++#define LTQ_SPI_RXFCON_RXFITL_MASK	0x3F
++#define LTQ_SPI_RXFCON_RXFLU		BIT(1)	/* FIFO flush */
++#define LTQ_SPI_RXFCON_RXFEN		BIT(0)	/* FIFO enable */
++
++#define LTQ_SPI_TXFCON_TXFITL_SHIFT	8 /* FIFO interrupt trigger level */
++#define LTQ_SPI_TXFCON_TXFITL_MASK	0x3F
++#define LTQ_SPI_TXFCON_TXFLU		BIT(1)	/* FIFO flush */
++#define LTQ_SPI_TXFCON_TXFEN		BIT(0)	/* FIFO enable */
++
++#define LTQ_SPI_FSTAT_RXFFL_MASK	0x3f
++#define LTQ_SPI_FSTAT_RXFFL_SHIFT	0
++#define LTQ_SPI_FSTAT_TXFFL_MASK	0x3f
++#define LTQ_SPI_FSTAT_TXFFL_SHIFT	8
++
++#define LTQ_SPI_GPOCON_ISCSBN_SHIFT	8
++#define LTQ_SPI_GPOCON_INVOUTN_SHIFT	0
++
++#define LTQ_SPI_FGPO_SETOUTN_SHIFT	8
++#define LTQ_SPI_FGPO_CLROUTN_SHIFT	0
++
++#define LTQ_SPI_RXREQ_RXCNT_MASK	0xFFFF	/* Receive count value */
++#define LTQ_SPI_RXCNT_TODO_MASK		0xFFFF	/* Recevie to-do value */
++
++#define LTQ_SPI_IRNEN_F		BIT(3)	/* Frame end interrupt request */
++#define LTQ_SPI_IRNEN_E		BIT(2)	/* Error end interrupt request */
++#define LTQ_SPI_IRNEN_T		BIT(0)  /* Transmit end interrupt request */
++#define LTQ_SPI_IRNEN_R		BIT(1)  /* Receive end interrupt request */
++#define LTQ_SPI_IRNEN_T_XWAY	BIT(1)  /* Transmit end interrupt request */
++#define LTQ_SPI_IRNEN_R_XWAY	BIT(0)  /* Receive end interrupt request */
++#define LTQ_SPI_IRNEN_ALL	0xF
++
++struct ltq_spi {
++	struct spi_bitbang	bitbang;
++	struct completion	done;
++	spinlock_t		lock;
++
++	struct device		*dev;
++	void __iomem		*base;
++	struct clk		*fpiclk;
++	struct clk		*spiclk;
++
++	int			status;
++	int			irq[3];
++
++	const u8		*tx;
++	u8			*rx;
++	u32			tx_cnt;
++	u32			rx_cnt;
++	u32			len;
++	struct spi_transfer	*curr_transfer;
++
++	u32 (*get_tx) (struct ltq_spi *);
++
++	u16			txfs;
++	u16			rxfs;
++	unsigned		dma_support:1;
++	unsigned		cfg_mode:1;
++
++	u32			irnen_t;
++	u32			irnen_r;
++};
++
++static inline struct ltq_spi *ltq_spi_to_hw(struct spi_device *spi)
++{
++	return spi_master_get_devdata(spi->master);
++}
++
++static inline u32 ltq_spi_reg_read(struct ltq_spi *hw, u32 reg)
++{
++	return ioread32be(hw->base + reg);
++}
++
++static inline void ltq_spi_reg_write(struct ltq_spi *hw, u32 val, u32 reg)
++{
++	iowrite32be(val, hw->base + reg);
++}
++
++static inline void ltq_spi_reg_setbit(struct ltq_spi *hw, u32 bits, u32 reg)
++{
++	u32 val;
++
++	val = ltq_spi_reg_read(hw, reg);
++	val |= bits;
++	ltq_spi_reg_write(hw, val, reg);
++}
++
++static inline void ltq_spi_reg_clearbit(struct ltq_spi *hw, u32 bits, u32 reg)
++{
++	u32 val;
++
++	val = ltq_spi_reg_read(hw, reg);
++	val &= ~bits;
++	ltq_spi_reg_write(hw, val, reg);
++}
++
++static void ltq_spi_hw_enable(struct ltq_spi *hw)
++{
++	u32 clc;
++
++	/* Power-up module */
++	clk_enable(hw->spiclk);
++
++	/*
++	 * Set clock divider for run mode to 1 to
++	 * run at same frequency as FPI bus
++	 */
++	clc = (1 << LTQ_SPI_CLC_RMC_SHIFT);
++	ltq_spi_reg_write(hw, clc, LTQ_SPI_CLC);
++}
++
++static void ltq_spi_hw_disable(struct ltq_spi *hw)
++{
++	/* Set clock divider to 0 and set module disable bit */
++	ltq_spi_reg_write(hw, LTQ_SPI_CLC_DISS, LTQ_SPI_CLC);
++
++	/* Power-down module */
++	clk_disable(hw->spiclk);
++}
++
++static void ltq_spi_reset_fifos(struct ltq_spi *hw)
++{
++	u32 val;
++
++	/*
++	 * Enable and flush FIFOs. Set interrupt trigger level to
++	 * half of FIFO count implemented in hardware.
++	 */
++	if (hw->txfs > 1) {
++		val = hw->txfs << (LTQ_SPI_TXFCON_TXFITL_SHIFT - 1);
++		val |= LTQ_SPI_TXFCON_TXFEN | LTQ_SPI_TXFCON_TXFLU;
++		ltq_spi_reg_write(hw, val, LTQ_SPI_TXFCON);
++	}
++
++	if (hw->rxfs > 1) {
++		val = hw->rxfs << (LTQ_SPI_RXFCON_RXFITL_SHIFT - 1);
++		val |= LTQ_SPI_RXFCON_RXFEN | LTQ_SPI_RXFCON_RXFLU;
++		ltq_spi_reg_write(hw, val, LTQ_SPI_RXFCON);
++	}
++}
++
++static inline int ltq_spi_wait_ready(struct ltq_spi *hw)
++{
++	u32 stat;
++	unsigned long timeout;
++
++	timeout = jiffies + msecs_to_jiffies(200);
++
++	do {
++		stat = ltq_spi_reg_read(hw, LTQ_SPI_STAT);
++		if (!(stat & LTQ_SPI_STAT_BSY))
++			return 0;
++
++		cond_resched();
++	} while (!time_after_eq(jiffies, timeout));
++
++	dev_err(hw->dev, "SPI wait ready timed out stat: %x\n", stat);
++
++	return -ETIMEDOUT;
++}
++
++static void ltq_spi_config_mode_set(struct ltq_spi *hw)
++{
++	if (hw->cfg_mode)
++		return;
++
++	/*
++	 * Putting the SPI module in config mode is only safe if no
++	 * transfer is in progress as indicated by busy flag STATE.BSY.
++	 */
++	if (ltq_spi_wait_ready(hw)) {
++		ltq_spi_reset_fifos(hw);
++		hw->status = -ETIMEDOUT;
++	}
++	ltq_spi_reg_write(hw, LTQ_SPI_WHBSTATE_CLREN, LTQ_SPI_WHBSTATE);
++
++	hw->cfg_mode = 1;
++}
++
++static void ltq_spi_run_mode_set(struct ltq_spi *hw)
++{
++	if (!hw->cfg_mode)
++		return;
++
++	ltq_spi_reg_write(hw, LTQ_SPI_WHBSTATE_SETEN, LTQ_SPI_WHBSTATE);
++
++	hw->cfg_mode = 0;
++}
++
++static u32 ltq_spi_tx_word_u8(struct ltq_spi *hw)
++{
++	const u8 *tx = hw->tx;
++	u32 data = *tx++;
++
++	hw->tx_cnt++;
++	hw->tx++;
++
++	return data;
++}
++
++static u32 ltq_spi_tx_word_u16(struct ltq_spi *hw)
++{
++	const u16 *tx = (u16 *) hw->tx;
++	u32 data = *tx++;
++
++	hw->tx_cnt += 2;
++	hw->tx += 2;
++
++	return data;
++}
++
++static u32 ltq_spi_tx_word_u32(struct ltq_spi *hw)
++{
++	const u32 *tx = (u32 *) hw->tx;
++	u32 data = *tx++;
++
++	hw->tx_cnt += 4;
++	hw->tx += 4;
++
++	return data;
++}
++
++static void ltq_spi_bits_per_word_set(struct spi_device *spi)
++{
++	struct ltq_spi *hw = ltq_spi_to_hw(spi);
++	u32 bm;
++	u8 bits_per_word = spi->bits_per_word;
++
++	/*
++	 * Use either default value of SPI device or value
++	 * from current transfer.
++	 */
++	if (hw->curr_transfer && hw->curr_transfer->bits_per_word)
++		bits_per_word = hw->curr_transfer->bits_per_word;
++
++	if (bits_per_word <= 8)
++		hw->get_tx = ltq_spi_tx_word_u8;
++	else if (bits_per_word <= 16)
++		hw->get_tx = ltq_spi_tx_word_u16;
++	else if (bits_per_word <= 32)
++		hw->get_tx = ltq_spi_tx_word_u32;
++
++	/* CON.BM value = bits_per_word - 1 */
++	bm = (bits_per_word - 1) << LTQ_SPI_CON_BM_SHIFT;
++
++	ltq_spi_reg_clearbit(hw, LTQ_SPI_CON_BM_MASK <<
++			     LTQ_SPI_CON_BM_SHIFT, LTQ_SPI_CON);
++	ltq_spi_reg_setbit(hw, bm, LTQ_SPI_CON);
++}
++
++static void ltq_spi_speed_set(struct spi_device *spi)
++{
++	struct ltq_spi *hw = ltq_spi_to_hw(spi);
++	u32 br, max_speed_hz, spi_clk;
++	u32 speed_hz = spi->max_speed_hz;
++
++	/*
++	 * Use either default value of SPI device or value
++	 * from current transfer.
++	 */
++	if (hw->curr_transfer && hw->curr_transfer->speed_hz)
++		speed_hz = hw->curr_transfer->speed_hz;
++
++	/*
++	 * SPI module clock is derived from FPI bus clock dependent on
++	 * divider value in CLC.RMS which is always set to 1.
++	 */
++	spi_clk = clk_get_rate(hw->fpiclk);
++
++	/*
++	 * Maximum SPI clock frequency in master mode is half of
++	 * SPI module clock frequency. Maximum reload value of
++	 * baudrate generator BR is 2^16.
++	 */
++	max_speed_hz = spi_clk / 2;
++	if (speed_hz >= max_speed_hz)
++		br = 0;
++	else
++		br = (max_speed_hz / speed_hz) - 1;
++
++	if (br > 0xFFFF)
++		br = 0xFFFF;
++
++	ltq_spi_reg_write(hw, br, LTQ_SPI_BRT);
++}
++
++static void ltq_spi_clockmode_set(struct spi_device *spi)
++{
++	struct ltq_spi *hw = ltq_spi_to_hw(spi);
++	u32 con;
++
++	con = ltq_spi_reg_read(hw, LTQ_SPI_CON);
++
++	/*
++	 * SPI mode mapping in CON register:
++	 * Mode CPOL CPHA CON.PO CON.PH
++	 *  0    0    0      0      1
++	 *  1    0    1      0      0
++	 *  2    1    0      1      1
++	 *  3    1    1      1      0
++	 */
++	if (spi->mode & SPI_CPHA)
++		con &= ~LTQ_SPI_CON_PH;
++	else
++		con |= LTQ_SPI_CON_PH;
++
++	if (spi->mode & SPI_CPOL)
++		con |= LTQ_SPI_CON_PO;
++	else
++		con &= ~LTQ_SPI_CON_PO;
++
++	/* Set heading control */
++	if (spi->mode & SPI_LSB_FIRST)
++		con &= ~LTQ_SPI_CON_HB;
++	else
++		con |= LTQ_SPI_CON_HB;
++
++	ltq_spi_reg_write(hw, con, LTQ_SPI_CON);
++}
++
++static void ltq_spi_xmit_set(struct ltq_spi *hw, struct spi_transfer *t)
++{
++	u32 con;
++
++	con = ltq_spi_reg_read(hw, LTQ_SPI_CON);
++
++	if (t) {
++		if (t->tx_buf && t->rx_buf) {
++			con &= ~(LTQ_SPI_CON_TXOFF | LTQ_SPI_CON_RXOFF);
++		} else if (t->rx_buf) {
++			con &= ~LTQ_SPI_CON_RXOFF;
++			con |= LTQ_SPI_CON_TXOFF;
++		} else if (t->tx_buf) {
++			con &= ~LTQ_SPI_CON_TXOFF;
++			con |= LTQ_SPI_CON_RXOFF;
++		}
++	} else
++		con |= (LTQ_SPI_CON_TXOFF | LTQ_SPI_CON_RXOFF);
++
++	ltq_spi_reg_write(hw, con, LTQ_SPI_CON);
++}
++
++static void ltq_spi_internal_cs_activate(struct spi_device *spi)
++{
++	struct ltq_spi *hw = ltq_spi_to_hw(spi);
++	u32 fgpo;
++
++	fgpo = (1 << (spi->chip_select + LTQ_SPI_FGPO_CLROUTN_SHIFT));
++	ltq_spi_reg_setbit(hw, fgpo, LTQ_SPI_FGPO);
++}
++
++static void ltq_spi_internal_cs_deactivate(struct spi_device *spi)
++{
++	struct ltq_spi *hw = ltq_spi_to_hw(spi);
++	u32 fgpo;
++
++	fgpo = (1 << (spi->chip_select + LTQ_SPI_FGPO_SETOUTN_SHIFT));
++	ltq_spi_reg_setbit(hw, fgpo, LTQ_SPI_FGPO);
++}
++
++static void ltq_spi_chipselect(struct spi_device *spi, int cs)
++{
++	struct ltq_spi *hw = ltq_spi_to_hw(spi);
++
++	if (ltq_spi_wait_ready(hw))
++		dev_err(&spi->dev, "wait failed\n");
++
++	switch (cs) {
++	case BITBANG_CS_ACTIVE:
++		ltq_spi_bits_per_word_set(spi);
++		ltq_spi_speed_set(spi);
++		ltq_spi_clockmode_set(spi);
++		ltq_spi_run_mode_set(hw);
++		ltq_spi_internal_cs_activate(spi);
++		break;
++
++	case BITBANG_CS_INACTIVE:
++		ltq_spi_internal_cs_deactivate(spi);
++		ltq_spi_config_mode_set(hw);
++		break;
++	}
++}
++
++static int ltq_spi_setup_transfer(struct spi_device *spi,
++				  struct spi_transfer *t)
++{
++	struct ltq_spi *hw = ltq_spi_to_hw(spi);
++	u8 bits_per_word = spi->bits_per_word;
++
++	hw->curr_transfer = t;
++
++	if (t && t->bits_per_word)
++		bits_per_word = t->bits_per_word;
++
++	if (bits_per_word > 32)
++		return -EINVAL;
++
++	return 0;
++}
++
++static int ltq_spi_setup(struct spi_device *spi)
++{
++	struct ltq_spi *hw = ltq_spi_to_hw(spi);
++	u32 gpocon, fgpo;
++
++	/* Set default word length to 8 if not set */
++	if (!spi->bits_per_word)
++		spi->bits_per_word = 8;
++
++	if (spi->bits_per_word > 32)
++		return -EINVAL;
++
++	/*
++	 * Up to six GPIOs can be connected to the SPI module
++	 * via GPIO alternate function to control the chip select lines.
++	 */
++	gpocon = (1 << (spi->chip_select +
++			LTQ_SPI_GPOCON_ISCSBN_SHIFT));
++
++	if (spi->mode & SPI_CS_HIGH)
++		gpocon |= (1 << spi->chip_select);
++
++	fgpo = (1 << (spi->chip_select + LTQ_SPI_FGPO_SETOUTN_SHIFT));
++
++	ltq_spi_reg_setbit(hw, gpocon, LTQ_SPI_GPOCON);
++	ltq_spi_reg_setbit(hw, fgpo, LTQ_SPI_FGPO);
++
++	return 0;
++}
++
++static void ltq_spi_cleanup(struct spi_device *spi)
++{
++
++}
++
++static void ltq_spi_txfifo_write(struct ltq_spi *hw)
++{
++	u32 fstat, data;
++	u16 fifo_space;
++
++	/* Determine how much FIFOs are free for TX data */
++	fstat = ltq_spi_reg_read(hw, LTQ_SPI_FSTAT);
++	fifo_space = hw->txfs - ((fstat >> LTQ_SPI_FSTAT_TXFFL_SHIFT) &
++					LTQ_SPI_FSTAT_TXFFL_MASK);
++
++	if (!fifo_space)
++		return;
++
++	while (hw->tx_cnt < hw->len && fifo_space) {
++		data = hw->get_tx(hw);
++		ltq_spi_reg_write(hw, data, LTQ_SPI_TB);
++		fifo_space--;
++	}
++}
++
++static void ltq_spi_rxfifo_read(struct ltq_spi *hw)
++{
++	u32 fstat, data, *rx32;
++	u16 fifo_fill;
++	u8 rxbv, shift, *rx8;
++
++	/* Determine how much FIFOs are filled with RX data */
++	fstat = ltq_spi_reg_read(hw, LTQ_SPI_FSTAT);
++	fifo_fill = ((fstat >> LTQ_SPI_FSTAT_RXFFL_SHIFT)
++			& LTQ_SPI_FSTAT_RXFFL_MASK);
++
++	if (!fifo_fill)
++		return;
++
++	/*
++	 * The 32 bit FIFO is always used completely independent from the
++	 * bits_per_word value. Thus four bytes have to be read at once
++	 * per FIFO.
++	 */
++	rx32 = (u32 *) hw->rx;
++	while (hw->len - hw->rx_cnt >= 4 && fifo_fill) {
++		*rx32++ = ltq_spi_reg_read(hw, LTQ_SPI_RB);
++		hw->rx_cnt += 4;
++		hw->rx += 4;
++		fifo_fill--;
++	}
++
++	/*
++	 * If there are remaining bytes, read byte count from STAT.RXBV
++	 * register and read the data byte-wise.
++	 */
++	while (fifo_fill && hw->rx_cnt < hw->len) {
++		rxbv = (ltq_spi_reg_read(hw, LTQ_SPI_STAT) >>
++			LTQ_SPI_STAT_RXBV_SHIFT) & LTQ_SPI_STAT_RXBV_MASK;
++		data = ltq_spi_reg_read(hw, LTQ_SPI_RB);
++
++		shift = (rxbv - 1) * 8;
++		rx8 = hw->rx;
++
++		while (rxbv) {
++			*rx8++ = (data >> shift) & 0xFF;
++			rxbv--;
++			shift -= 8;
++			hw->rx_cnt++;
++			hw->rx++;
++		}
++
++		fifo_fill--;
++	}
++}
++
++static void ltq_spi_rxreq_set(struct ltq_spi *hw)
++{
++	u32 rxreq, rxreq_max, rxtodo;
++	u32 fstat, fifo_fill;
++
++	rxtodo = ltq_spi_reg_read(hw, LTQ_SPI_RXCNT) & LTQ_SPI_RXCNT_TODO_MASK;
++
++	/*
++	 * Check if there is remaining data in the FIFO before starting a new
++	 * receive request. The controller might have processed some more data
++	 * since the last FIFO poll.
++	 */
++	fstat = ltq_spi_reg_read(hw, LTQ_SPI_FSTAT);
++	fifo_fill = ((fstat >> LTQ_SPI_FSTAT_RXFFL_SHIFT)
++			& LTQ_SPI_FSTAT_RXFFL_MASK);
++	if (fifo_fill)
++		return;
++
++	/*
++	 * In RX-only mode the serial clock is activated only after writing
++	 * the expected amount of RX bytes into RXREQ register.
++	 * To avoid receive overflows at high clocks it is better to request
++	 * only the amount of bytes that fits into all FIFOs. This value
++	 * depends on the FIFO size implemented in hardware.
++	 */
++	rxreq = hw->len - hw->rx_cnt;
++	rxreq_max = hw->rxfs << 2;
++	rxreq = min(rxreq_max, rxreq);
++
++	if (!rxtodo && rxreq)
++		ltq_spi_reg_write(hw, rxreq, LTQ_SPI_RXREQ);
++}
++
++static inline void ltq_spi_complete(struct ltq_spi *hw)
++{
++	complete(&hw->done);
++}
++
++irqreturn_t ltq_spi_tx_irq(int irq, void *data)
++{
++	struct ltq_spi *hw = data;
++	unsigned long flags;
++	int completed = 0;
++
++	spin_lock_irqsave(&hw->lock, flags);
++
++	if (hw->tx_cnt < hw->len)
++		ltq_spi_txfifo_write(hw);
++
++	if (hw->tx_cnt == hw->len)
++		completed = 1;
++
++	spin_unlock_irqrestore(&hw->lock, flags);
++
++	if (completed)
++		ltq_spi_complete(hw);
++
++	return IRQ_HANDLED;
++}
++
++irqreturn_t ltq_spi_rx_irq(int irq, void *data)
++{
++	struct ltq_spi *hw = data;
++	unsigned long flags;
++	int completed = 0;
++
++	spin_lock_irqsave(&hw->lock, flags);
++
++	if (hw->rx_cnt < hw->len) {
++		ltq_spi_rxfifo_read(hw);
++
++		if (hw->tx && hw->tx_cnt < hw->len)
++			ltq_spi_txfifo_write(hw);
++	}
++
++	if (hw->rx_cnt == hw->len)
++		completed = 1;
++	else if (!hw->tx)
++		ltq_spi_rxreq_set(hw);
++
++	spin_unlock_irqrestore(&hw->lock, flags);
++
++	if (completed)
++		ltq_spi_complete(hw);
++
++	return IRQ_HANDLED;
++}
++
++irqreturn_t ltq_spi_err_irq(int irq, void *data)
++{
++	struct ltq_spi *hw = data;
++	unsigned long flags;
++
++	spin_lock_irqsave(&hw->lock, flags);
++
++	/* Disable all interrupts */
++	ltq_spi_reg_clearbit(hw, LTQ_SPI_IRNEN_ALL, LTQ_SPI_IRNEN);
++
++	dev_err(hw->dev, "error %x\n", ltq_spi_reg_read(hw, LTQ_SPI_STAT));
++
++	/* Clear all error flags */
++	ltq_spi_reg_write(hw, LTQ_SPI_WHBSTATE_CLR_ERRORS, LTQ_SPI_WHBSTATE);
++
++	/* Flush FIFOs */
++	ltq_spi_reg_setbit(hw, LTQ_SPI_RXFCON_RXFLU, LTQ_SPI_RXFCON);
++	ltq_spi_reg_setbit(hw, LTQ_SPI_TXFCON_TXFLU, LTQ_SPI_TXFCON);
++
++	hw->status = -EIO;
++	spin_unlock_irqrestore(&hw->lock, flags);
++
++	ltq_spi_complete(hw);
++
++	return IRQ_HANDLED;
++}
++
++static int ltq_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t)
++{
++	struct ltq_spi *hw = ltq_spi_to_hw(spi);
++	u32 irq_flags = 0;
++
++	hw->tx = t->tx_buf;
++	hw->rx = t->rx_buf;
++	hw->len = t->len;
++	hw->tx_cnt = 0;
++	hw->rx_cnt = 0;
++	hw->status = 0;
++	init_completion(&hw->done);
++
++	ltq_spi_xmit_set(hw, t);
++
++	/* Enable error interrupts */
++	ltq_spi_reg_setbit(hw, LTQ_SPI_IRNEN_E, LTQ_SPI_IRNEN);
++
++	if (hw->tx) {
++		/* Initially fill TX FIFO with as much data as possible */
++		ltq_spi_txfifo_write(hw);
++		irq_flags |= hw->irnen_t;
++
++		/* Always enable RX interrupt in Full Duplex mode */
++		if (hw->rx)
++			irq_flags |= hw->irnen_r;
++	} else if (hw->rx) {
++		/* Start RX clock */
++		ltq_spi_rxreq_set(hw);
++
++		/* Enable RX interrupt to receive data from RX FIFOs */
++		irq_flags |= hw->irnen_r;
++	}
++
++	/* Enable TX or RX interrupts */
++	ltq_spi_reg_setbit(hw, irq_flags, LTQ_SPI_IRNEN);
++	wait_for_completion(&hw->done);
++
++	/* Disable all interrupts */
++	ltq_spi_reg_clearbit(hw, LTQ_SPI_IRNEN_ALL, LTQ_SPI_IRNEN);
++
++	/*
++	 * Return length of current transfer for bitbang utility code if
++	 * no errors occured during transmission.
++	 */
++	if (!hw->status)
++		hw->status = hw->len;
++
++	return hw->status;
++}
++
++static const struct ltq_spi_irq_map {
++	char *name;
++	irq_handler_t handler;
++} ltq_spi_irqs[] = {
++	{ "spi_rx", ltq_spi_rx_irq },
++	{ "spi_tx", ltq_spi_tx_irq },
++	{ "spi_err", ltq_spi_err_irq },
++};
++
++static int ltq_spi_probe(struct platform_device *pdev)
++{
++	struct resource irqres[3];
++	struct spi_master *master;
++	struct resource *r;
++	struct ltq_spi *hw;
++	int ret, i;
++	u32 data, id;
++
++	if (of_irq_to_resource_table(pdev->dev.of_node, irqres, 3) != 3) {
++		dev_err(&pdev->dev, "IRQ settings missing in device tree\n");
++		return -EINVAL;
++	}
++
++	master = spi_alloc_master(&pdev->dev, sizeof(struct ltq_spi));
++	if (!master) {
++		dev_err(&pdev->dev, "spi_alloc_master\n");
++		ret = -ENOMEM;
++		goto err;
++	}
++
++	hw = spi_master_get_devdata(master);
++
++	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++	if (r == NULL) {
++		dev_err(&pdev->dev, "platform_get_resource\n");
++		ret = -ENOENT;
++		goto err_master;
++	}
++
++	r = devm_request_mem_region(&pdev->dev, r->start, resource_size(r),
++			pdev->name);
++	if (!r) {
++		dev_err(&pdev->dev, "failed to request memory region\n");
++		ret = -ENXIO;
++		goto err_master;
++	}
++
++	hw->base = devm_ioremap_nocache(&pdev->dev, r->start, resource_size(r));
++	if (!hw->base) {
++		dev_err(&pdev->dev, "failed to remap memory region\n");
++		ret = -ENXIO;
++		goto err_master;
++	}
++
++	memset(hw->irq, 0, sizeof(hw->irq));
++	for (i = 0; i < ARRAY_SIZE(ltq_spi_irqs); i++) {
++		hw->irq[i] = irqres[i].start;
++		ret = request_irq(hw->irq[i], ltq_spi_irqs[i].handler,
++				  0, ltq_spi_irqs[i].name, hw);
++		if (ret) {
++			dev_err(&pdev->dev, "failed to request %s irq (%d)\n",
++					ltq_spi_irqs[i].name, hw->irq[i]);
++			goto err_irq;
++		}
++	}
++
++	hw->fpiclk = clk_get_fpi();
++	if (IS_ERR(hw->fpiclk)) {
++		dev_err(&pdev->dev, "failed to get fpi clock\n");
++		ret = PTR_ERR(hw->fpiclk);
++		goto err_clk;
++	}
++
++	hw->spiclk = clk_get(&pdev->dev, NULL);
++	if (IS_ERR(hw->spiclk)) {
++		dev_err(&pdev->dev, "failed to get spi clock gate\n");
++		ret = PTR_ERR(hw->spiclk);
++		goto err_clk;
++	}
++
++	hw->bitbang.master = spi_master_get(master);
++	hw->bitbang.chipselect = ltq_spi_chipselect;
++	hw->bitbang.setup_transfer = ltq_spi_setup_transfer;
++	hw->bitbang.txrx_bufs = ltq_spi_txrx_bufs;
++
++	if (of_machine_is_compatible("lantiq,ase")) {
++		master->num_chipselect = 3;
++
++		hw->irnen_t = LTQ_SPI_IRNEN_T_XWAY;
++		hw->irnen_r = LTQ_SPI_IRNEN_R_XWAY;
++	} else {
++		master->num_chipselect = 6;
++
++		hw->irnen_t = LTQ_SPI_IRNEN_T;
++		hw->irnen_r = LTQ_SPI_IRNEN_R;
++	}
++
++	master->bus_num = pdev->id;
++	master->setup = ltq_spi_setup;
++	master->cleanup = ltq_spi_cleanup;
++	master->dev.of_node = pdev->dev.of_node;
++
++	hw->dev = &pdev->dev;
++	init_completion(&hw->done);
++	spin_lock_init(&hw->lock);
++
++	ltq_spi_hw_enable(hw);
++
++	/* Read module capabilities */
++	id = ltq_spi_reg_read(hw, LTQ_SPI_ID);
++	hw->txfs = (id >> LTQ_SPI_ID_TXFS_SHIFT) & LTQ_SPI_ID_TXFS_MASK;
++	hw->rxfs = (id >> LTQ_SPI_ID_RXFS_SHIFT) & LTQ_SPI_ID_RXFS_MASK;
++	hw->dma_support = (id & LTQ_SPI_ID_CFG) ? 1 : 0;
++
++	ltq_spi_config_mode_set(hw);
++
++	/* Enable error checking, disable TX/RX, set idle value high */
++	data = LTQ_SPI_CON_RUEN | LTQ_SPI_CON_AEN |
++	    LTQ_SPI_CON_TEN | LTQ_SPI_CON_REN |
++	    LTQ_SPI_CON_TXOFF | LTQ_SPI_CON_RXOFF | LTQ_SPI_CON_IDLE;
++	ltq_spi_reg_write(hw, data, LTQ_SPI_CON);
++
++	/* Enable master mode and clear error flags */
++	ltq_spi_reg_write(hw, LTQ_SPI_WHBSTATE_SETMS |
++			  LTQ_SPI_WHBSTATE_CLR_ERRORS, LTQ_SPI_WHBSTATE);
++
++	/* Reset GPIO/CS registers */
++	ltq_spi_reg_write(hw, 0x0, LTQ_SPI_GPOCON);
++	ltq_spi_reg_write(hw, 0xFF00, LTQ_SPI_FGPO);
++
++	/* Enable and flush FIFOs */
++	ltq_spi_reset_fifos(hw);
++
++	ret = spi_bitbang_start(&hw->bitbang);
++	if (ret) {
++		dev_err(&pdev->dev, "spi_bitbang_start failed\n");
++		goto err_bitbang;
++	}
++
++	platform_set_drvdata(pdev, hw);
++
++	pr_info("Lantiq SoC SPI controller rev %u (TXFS %u, RXFS %u, DMA %u)\n",
++		id & LTQ_SPI_ID_REV_MASK, hw->txfs, hw->rxfs, hw->dma_support);
++
++	return 0;
++
++err_bitbang:
++	ltq_spi_hw_disable(hw);
++
++err_clk:
++	if (hw->fpiclk)
++		clk_put(hw->fpiclk);
++	if (hw->spiclk)
++		clk_put(hw->spiclk);
++
++err_irq:
++	clk_put(hw->fpiclk);
++
++	for (; i > 0; i--)
++		free_irq(hw->irq[i], hw);
++
++err_master:
++	spi_master_put(master);
++
++err:
++	return ret;
++}
++
++static int ltq_spi_remove(struct platform_device *pdev)
++{
++	struct ltq_spi *hw = platform_get_drvdata(pdev);
++	int i;
++
++	spi_bitbang_stop(&hw->bitbang);
++
++	platform_set_drvdata(pdev, NULL);
++
++	ltq_spi_config_mode_set(hw);
++	ltq_spi_hw_disable(hw);
++
++	for (i = 0; i < ARRAY_SIZE(hw->irq); i++)
++		if (0 < hw->irq[i])
++			free_irq(hw->irq[i], hw);
++
++	if (hw->fpiclk)
++		clk_put(hw->fpiclk);
++	if (hw->spiclk)
++		clk_put(hw->spiclk);
++
++	spi_master_put(hw->bitbang.master);
++
++	return 0;
++}
++
++static const struct of_device_id ltq_spi_match[] = {
++	{ .compatible = "lantiq,spi-xway" },
++	{},
++};
++MODULE_DEVICE_TABLE(of, ltq_spi_match);
++
++static struct platform_driver ltq_spi_driver = {
++	.probe = ltq_spi_probe,
++	.remove = ltq_spi_remove,
++	.driver = {
++		.name = "spi-xway",
++		.owner = THIS_MODULE,
++		.of_match_table = ltq_spi_match,
++	},
++};
++
++module_platform_driver(ltq_spi_driver);
++
++MODULE_DESCRIPTION("Lantiq SoC SPI controller driver");
++MODULE_AUTHOR("Daniel Schwierzeck <daniel.schwierzeck at googlemail.com>");
++MODULE_LICENSE("GPL");
++MODULE_ALIAS("platform:spi-xway");
diff --git a/target/linux/lantiq/patches-4.4/0034-reset-Fix-compile-when-reset-RESET_CONTROLLER-is-not.patch b/target/linux/lantiq/patches-4.4/0034-reset-Fix-compile-when-reset-RESET_CONTROLLER-is-not.patch
new file mode 100644
index 0000000..ddbe134
--- /dev/null
+++ b/target/linux/lantiq/patches-4.4/0034-reset-Fix-compile-when-reset-RESET_CONTROLLER-is-not.patch
@@ -0,0 +1,45 @@
+From b1b9fca8c317afc3f2b78bb54f877e8a830a819d Mon Sep 17 00:00:00 2001
+From: John Crispin <blogic at openwrt.org>
+Date: Fri, 9 Aug 2013 18:47:27 +0200
+Subject: [PATCH 34/36] reset: Fix compile when reset RESET_CONTROLLER is not
+ selected
+
+Drivers need to protect their reset api calls with #ifdef to avoid compile
+errors.
+
+This patch adds dummy wrappers in the same way that linux/of.h does it.
+
+Cc: linux-kernel at vger.kernel.org
+Cc: Philipp Zabel <p.zabel at pengutronix.de>
+Cc: Gabor Juhos <juhosg at openwrt.org>
+---
+ include/linux/reset-controller.h |   16 ++++++++++++++
+ include/linux/reset.h            |   43 ++++++++++++++++++++++++++++++++++++++
+ 2 files changed, 59 insertions(+)
+
+--- a/include/linux/reset-controller.h
++++ b/include/linux/reset-controller.h
+@@ -48,7 +48,23 @@ struct reset_controller_dev {
+ 	unsigned int nr_resets;
+ };
+ 
++#if defined(CONFIG_RESET_CONTROLLER)
++
+ int reset_controller_register(struct reset_controller_dev *rcdev);
+ void reset_controller_unregister(struct reset_controller_dev *rcdev);
+ 
++#else
++
++static inline int reset_controller_register(struct reset_controller_dev *rcdev)
++{
++	return -ENOSYS;
++}
++
++void reset_controller_unregister(struct reset_controller_dev *rcdev)
++{
++
++}
++
++#endif
++
+ #endif
diff --git a/target/linux/lantiq/patches-4.4/0035-owrt-lantiq-wifi-and-ethernet-eeprom-handling.patch b/target/linux/lantiq/patches-4.4/0035-owrt-lantiq-wifi-and-ethernet-eeprom-handling.patch
new file mode 100644
index 0000000..25852e2
--- /dev/null
+++ b/target/linux/lantiq/patches-4.4/0035-owrt-lantiq-wifi-and-ethernet-eeprom-handling.patch
@@ -0,0 +1,630 @@
+From f8c5db89e793a4bc6c1e87bd7b3a5cec16b75bc3 Mon Sep 17 00:00:00 2001
+From: John Crispin <blogic at openwrt.org>
+Date: Wed, 10 Sep 2014 22:42:14 +0200
+Subject: [PATCH 35/36] owrt: lantiq: wifi and ethernet eeprom handling
+
+Signed-off-by: John Crispin <blogic at openwrt.org>
+---
+ arch/mips/include/asm/mach-lantiq/pci-ath-fixup.h  |    6 +
+ .../mips/include/asm/mach-lantiq/xway/lantiq_soc.h |    3 +
+ arch/mips/lantiq/xway/Makefile                     |    3 +
+ arch/mips/lantiq/xway/ath_eep.c                    |  282 ++++++++++++++++++++
+ arch/mips/lantiq/xway/eth_mac.c                    |   76 ++++++
+ arch/mips/lantiq/xway/pci-ath-fixup.c              |  109 ++++++++
+ arch/mips/lantiq/xway/rt_eep.c                     |   60 +++++
+ 7 files changed, 539 insertions(+)
+ create mode 100644 arch/mips/include/asm/mach-lantiq/pci-ath-fixup.h
+ create mode 100644 arch/mips/lantiq/xway/ath_eep.c
+ create mode 100644 arch/mips/lantiq/xway/eth_mac.c
+ create mode 100644 arch/mips/lantiq/xway/pci-ath-fixup.c
+ create mode 100644 arch/mips/lantiq/xway/rt_eep.c
+
+--- /dev/null
++++ b/arch/mips/include/asm/mach-lantiq/pci-ath-fixup.h
+@@ -0,0 +1,6 @@
++#ifndef _PCI_ATH_FIXUP
++#define _PCI_ATH_FIXUP
++
++void ltq_pci_ath_fixup(unsigned slot, u16 *cal_data) __init;
++
++#endif /* _PCI_ATH_FIXUP */
+--- a/arch/mips/include/asm/mach-lantiq/xway/lantiq_soc.h
++++ b/arch/mips/include/asm/mach-lantiq/xway/lantiq_soc.h
+@@ -104,5 +104,8 @@ int xrx200_gphy_boot(struct device *dev,
+ extern void ltq_pmu_enable(unsigned int module);
+ extern void ltq_pmu_disable(unsigned int module);
+ 
++/* allow the ethernet driver to load a flash mapped mac addr */
++const u8* ltq_get_eth_mac(void);
++
+ #endif /* CONFIG_SOC_TYPE_XWAY */
+ #endif /* _LTQ_XWAY_H__ */
+--- a/arch/mips/lantiq/xway/Makefile
++++ b/arch/mips/lantiq/xway/Makefile
+@@ -2,4 +2,7 @@ obj-y := prom.o sysctrl.o clk.o reset.o
+ 
+ obj-y += vmmc.o tffs.o
+ 
++obj-y += eth_mac.o
++obj-$(CONFIG_PCI) += ath_eep.o rt_eep.o pci-ath-fixup.o
++
+ obj-$(CONFIG_XRX200_PHY_FW) += xrx200_phy_fw.o
+--- /dev/null
++++ b/arch/mips/lantiq/xway/ath_eep.c
+@@ -0,0 +1,298 @@
++/*
++ *  Copyright (C) 2011 Luca Olivetti <luca at ventoso.org>
++ *  Copyright (C) 2011 John Crispin <blogic at openwrt.org>
++ *  Copyright (C) 2011 Andrej Vlašić <andrej.vlasic0 at gmail.com>
++ *  Copyright (C) 2013 Álvaro Fernández Rojas <noltari at gmail.com>
++ *  Copyright (C) 2013 Daniel Gimpelevich <daniel at gimpelevich.san-francisco.ca.us>
++ *  Copyright (C) 2015 Vittorio Gambaletta <openwrt at vittgam.net>
++ *
++ *  This program is free software; you can redistribute it and/or modify it
++ *  under the terms of the GNU General Public License version 2 as published
++ *  by the Free Software Foundation.
++ */
++
++#include <linux/init.h>
++#include <linux/module.h>
++#include <linux/platform_device.h>
++#include <linux/etherdevice.h>
++#include <linux/ath5k_platform.h>
++#include <linux/ath9k_platform.h>
++#include <linux/pci.h>
++#include <linux/err.h>
++#include <linux/mtd/mtd.h>
++#include <pci-ath-fixup.h>
++#include <lantiq_soc.h>
++
++extern int (*ltq_pci_plat_dev_init)(struct pci_dev *dev);
++struct ath5k_platform_data ath5k_pdata;
++struct ath9k_platform_data ath9k_pdata = {
++	.led_pin = -1,
++};
++static u8 athxk_eeprom_mac[6];
++
++static int ath9k_pci_plat_dev_init(struct pci_dev *dev)
++{
++	dev->dev.platform_data = &ath9k_pdata;
++	return 0;
++}
++
++static int ath9k_eep_load;
++int __init of_ath9k_eeprom_probe(struct platform_device *pdev)
++{
++	struct device_node *np = pdev->dev.of_node, *mtd_np;
++	struct resource *eep_res, *mac_res = NULL;
++	void __iomem *eep, *mac;
++	int mac_offset, led_pin;
++	u32 mac_inc = 0, pci_slot = 0;
++	int i;
++	struct mtd_info *the_mtd;
++	size_t flash_readlen;
++	const __be32 *list;
++	const char *part;
++	phandle phandle;
++
++	if ((list = of_get_property(np, "ath,eep-flash", &i)) && i == 2 *
++			sizeof(*list) && (phandle = be32_to_cpup(list++)) &&
++			(mtd_np = of_find_node_by_phandle(phandle)) && ((part =
++			of_get_property(mtd_np, "label", NULL)) || (part =
++			mtd_np->name)) && (the_mtd = get_mtd_device_nm(part))
++			!= ERR_PTR(-ENODEV)) {
++		i = mtd_read(the_mtd, be32_to_cpup(list),
++				ATH9K_PLAT_EEP_MAX_WORDS << 1, &flash_readlen,
++				(void *) ath9k_pdata.eeprom_data);
++		if (!of_property_read_u32(np, "ath,mac-offset", &mac_offset)) {
++			size_t mac_readlen;
++			mtd_read(the_mtd, mac_offset, 6, &mac_readlen,
++				(void *) athxk_eeprom_mac);
++		}
++		put_mtd_device(the_mtd);
++		if ((sizeof(ath9k_pdata.eeprom_data) != flash_readlen) || i) {
++			dev_err(&pdev->dev, "failed to load eeprom from mtd\n");
++			return -ENODEV;
++		}
++	} else {
++		eep_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++		mac_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
++
++		if (!eep_res) {
++			dev_err(&pdev->dev, "failed to load eeprom address\n");
++			return -ENODEV;
++		}
++		if (resource_size(eep_res) != ATH9K_PLAT_EEP_MAX_WORDS << 1) {
++			dev_err(&pdev->dev, "eeprom has an invalid size\n");
++			return -EINVAL;
++		}
++
++		eep = ioremap(eep_res->start, resource_size(eep_res));
++		memcpy_fromio(ath9k_pdata.eeprom_data, eep,
++				ATH9K_PLAT_EEP_MAX_WORDS << 1);
++	}
++
++	if (of_find_property(np, "ath,eep-swap", NULL))
++		for (i = 0; i < ATH9K_PLAT_EEP_MAX_WORDS; i++)
++			ath9k_pdata.eeprom_data[i] = swab16(ath9k_pdata.eeprom_data[i]);
++
++	if (of_find_property(np, "ath,eep-endian", NULL)) {
++		ath9k_pdata.endian_check = true;
++
++		dev_info(&pdev->dev, "endian check enabled.\n");
++	}
++
++	if (!is_valid_ether_addr(athxk_eeprom_mac)) {
++		if (mac_res) {
++			if (resource_size(mac_res) != 6) {
++				dev_err(&pdev->dev, "mac has an invalid size\n");
++				return -EINVAL;
++			}
++			mac = ioremap(mac_res->start, resource_size(mac_res));
++			memcpy_fromio(athxk_eeprom_mac, mac, 6);
++		} else if (ltq_get_eth_mac()) {
++			memcpy(athxk_eeprom_mac, ltq_get_eth_mac(), 6);
++		}
++	}
++	if (!is_valid_ether_addr(athxk_eeprom_mac)) {
++		dev_warn(&pdev->dev, "using random mac\n");
++		random_ether_addr(athxk_eeprom_mac);
++	}
++
++	if (!of_property_read_u32(np, "ath,mac-increment", &mac_inc))
++		athxk_eeprom_mac[5] += mac_inc;
++
++	ath9k_pdata.macaddr = athxk_eeprom_mac;
++	ltq_pci_plat_dev_init = ath9k_pci_plat_dev_init;
++
++	if (!of_property_read_u32(np, "ath,pci-slot", &pci_slot)) {
++		ltq_pci_ath_fixup(pci_slot, ath9k_pdata.eeprom_data);
++
++		dev_info(&pdev->dev, "pci slot: %u\n", pci_slot);
++                if (ath9k_eep_load) {
++                        struct pci_dev *d = NULL;
++                        while ((d = pci_get_device(PCI_VENDOR_ID_ATHEROS,
++                                        PCI_ANY_ID, d)) != NULL)
++                                pci_fixup_device(pci_fixup_early, d);
++                }
++
++	}
++
++	if (!of_property_read_u32(np, "ath,led-pin", &led_pin)) {
++		ath9k_pdata.led_pin = led_pin;
++		dev_info(&pdev->dev, "using led pin %d.\n", led_pin);
++	}
++
++	if (of_property_read_bool(np, "ath,led-active-high")) {
++		ath9k_pdata.led_active_high = true;
++		dev_info(&pdev->dev, "inverted LED polarity\n");
++	}
++
++	if (of_property_read_bool(np, "ath,disable-2ghz")) {
++		ath9k_pdata.disable_2ghz = true;
++		dev_info(&pdev->dev, "disabled 2.4 GHz band\n");
++	}
++
++	if (of_property_read_bool(np, "ath,disable-5ghz")) {
++		ath9k_pdata.disable_5ghz = true;
++		dev_info(&pdev->dev, "disabled 5 GHz band\n");
++	}
++
++	dev_info(&pdev->dev, "loaded ath9k eeprom\n");
++
++	return 0;
++}
++
++static struct of_device_id ath9k_eeprom_ids[] = {
++	{ .compatible = "ath9k,eeprom" },
++	{ }
++};
++
++static struct platform_driver ath9k_eeprom_driver = {
++	.driver		= {
++		.name		= "ath9k,eeprom",
++		.owner	= THIS_MODULE,
++		.of_match_table	= of_match_ptr(ath9k_eeprom_ids),
++	},
++};
++
++static int __init of_ath9k_eeprom_init(void)
++{
++        int ret = platform_driver_probe(&ath9k_eeprom_driver, of_ath9k_eeprom_probe);
++
++        if (ret)
++                ath9k_eep_load = 1;
++
++        return ret;
++}
++
++static int __init of_ath9k_eeprom_init_late(void)
++{
++        if (!ath9k_eep_load)
++                return 0;
++        return platform_driver_probe(&ath9k_eeprom_driver, of_ath9k_eeprom_probe);
++}
++late_initcall(of_ath9k_eeprom_init_late);
++subsys_initcall(of_ath9k_eeprom_init);
++
++
++static int ath5k_pci_plat_dev_init(struct pci_dev *dev)
++{
++	dev->dev.platform_data = &ath5k_pdata;
++	return 0;
++}
++
++int __init of_ath5k_eeprom_probe(struct platform_device *pdev)
++{
++	struct device_node *np = pdev->dev.of_node, *mtd_np;
++	struct resource *eep_res, *mac_res = NULL;
++	void __iomem *eep, *mac;
++	int mac_offset;
++	u32 mac_inc = 0;
++	int i;
++	struct mtd_info *the_mtd;
++	size_t flash_readlen;
++	const __be32 *list;
++	const char *part;
++	phandle phandle;
++
++	if ((list = of_get_property(np, "ath,eep-flash", &i)) && i == 2 *
++			sizeof(*list) && (phandle = be32_to_cpup(list++)) &&
++			(mtd_np = of_find_node_by_phandle(phandle)) && ((part =
++			of_get_property(mtd_np, "label", NULL)) || (part =
++			mtd_np->name)) && (the_mtd = get_mtd_device_nm(part))
++			!= ERR_PTR(-ENODEV)) {
++		i = mtd_read(the_mtd, be32_to_cpup(list),
++				ATH5K_PLAT_EEP_MAX_WORDS << 1, &flash_readlen,
++				(void *) ath5k_pdata.eeprom_data);
++		put_mtd_device(the_mtd);
++		if ((sizeof(ATH5K_PLAT_EEP_MAX_WORDS << 1) != flash_readlen)
++				|| i) {
++			dev_err(&pdev->dev, "failed to load eeprom from mtd\n");
++			return -ENODEV;
++		}
++	} else {
++		eep_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++		mac_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
++
++		if (!eep_res) {
++			dev_err(&pdev->dev, "failed to load eeprom address\n");
++			return -ENODEV;
++		}
++		if (resource_size(eep_res) != ATH5K_PLAT_EEP_MAX_WORDS << 1) {
++			dev_err(&pdev->dev, "eeprom has an invalid size\n");
++			return -EINVAL;
++		}
++
++		eep = ioremap(eep_res->start, resource_size(eep_res));
++		ath5k_pdata.eeprom_data = kmalloc(ATH5K_PLAT_EEP_MAX_WORDS<<1,
++				GFP_KERNEL);
++		memcpy_fromio(ath5k_pdata.eeprom_data, eep,
++				ATH5K_PLAT_EEP_MAX_WORDS << 1);
++	}
++
++	if (of_find_property(np, "ath,eep-swap", NULL))
++		for (i = 0; i < ATH5K_PLAT_EEP_MAX_WORDS; i++)
++			ath5k_pdata.eeprom_data[i] = swab16(ath5k_pdata.eeprom_data[i]);
++
++	if (!of_property_read_u32(np, "ath,mac-offset", &mac_offset)) {
++		memcpy_fromio(athxk_eeprom_mac, (void*) ath5k_pdata.eeprom_data + mac_offset, 6);
++	} else if (mac_res) {
++		if (resource_size(mac_res) != 6) {
++			dev_err(&pdev->dev, "mac has an invalid size\n");
++			return -EINVAL;
++		}
++		mac = ioremap(mac_res->start, resource_size(mac_res));
++		memcpy_fromio(athxk_eeprom_mac, mac, 6);
++	} else if (ltq_get_eth_mac())
++		memcpy(athxk_eeprom_mac, ltq_get_eth_mac(), 6);
++	else {
++		dev_warn(&pdev->dev, "using random mac\n");
++		random_ether_addr(athxk_eeprom_mac);
++	}
++
++	if (!of_property_read_u32(np, "ath,mac-increment", &mac_inc))
++		athxk_eeprom_mac[5] += mac_inc;
++
++	ath5k_pdata.macaddr = athxk_eeprom_mac;
++	ltq_pci_plat_dev_init = ath5k_pci_plat_dev_init;
++
++	dev_info(&pdev->dev, "loaded ath5k eeprom\n");
++
++	return 0;
++}
++
++static struct of_device_id ath5k_eeprom_ids[] = {
++	{ .compatible = "ath5k,eeprom" },
++	{ }
++};
++
++static struct platform_driver ath5k_eeprom_driver = {
++	.driver		= {
++		.name		= "ath5k,eeprom",
++		.owner	= THIS_MODULE,
++		.of_match_table	= of_match_ptr(ath5k_eeprom_ids),
++	},
++};
++
++static int __init of_ath5k_eeprom_init(void)
++{
++	return platform_driver_probe(&ath5k_eeprom_driver, of_ath5k_eeprom_probe);
++}
++device_initcall(of_ath5k_eeprom_init);
+--- /dev/null
++++ b/arch/mips/lantiq/xway/eth_mac.c
+@@ -0,0 +1,76 @@
++/*
++ *  Copyright (C) 2012 John Crispin <blogic at openwrt.org>
++ *
++ *  This program is free software; you can redistribute it and/or modify it
++ *  under the terms of the GNU General Public License version 2 as published
++ *  by the Free Software Foundation.
++ */
++
++#include <linux/init.h>
++#include <linux/module.h>
++#include <linux/of_platform.h>
++#include <linux/if_ether.h>
++
++static u8 eth_mac[6];
++static int eth_mac_set;
++
++const u8* ltq_get_eth_mac(void)
++{
++	return eth_mac;
++}
++
++static int __init setup_ethaddr(char *str)
++{
++	eth_mac_set = mac_pton(str, eth_mac);
++	return !eth_mac_set;
++}
++__setup("ethaddr=", setup_ethaddr);
++
++int __init of_eth_mac_probe(struct platform_device *pdev)
++{
++	struct device_node *np = pdev->dev.of_node;
++	struct resource *mac_res;
++	void __iomem *mac;
++	u32 mac_inc = 0;
++
++	if (eth_mac_set) {
++		dev_err(&pdev->dev, "mac was already set by bootloader\n");
++		return -EINVAL;
++	}
++	mac_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++
++	if (!mac_res) {
++		dev_err(&pdev->dev, "failed to load mac\n");
++		return -EINVAL;
++	}
++	if (resource_size(mac_res) != 6) {
++		dev_err(&pdev->dev, "mac has an invalid size\n");
++		return -EINVAL;
++	}
++	mac = ioremap(mac_res->start, resource_size(mac_res));
++	memcpy_fromio(eth_mac, mac, 6);
++
++	if (!of_property_read_u32(np, "mac-increment", &mac_inc))
++		eth_mac[5] += mac_inc;
++
++	return 0;
++}
++
++static struct of_device_id eth_mac_ids[] = {
++	{ .compatible = "lantiq,eth-mac" },
++	{ /* sentinel */ }
++};
++
++static struct platform_driver eth_mac_driver = {
++	.driver		= {
++		.name		= "lantiq,eth-mac",
++		.owner	= THIS_MODULE,
++		.of_match_table	= of_match_ptr(eth_mac_ids),
++	},
++};
++
++static int __init of_eth_mac_init(void)
++{
++	return platform_driver_probe(&eth_mac_driver, of_eth_mac_probe);
++}
++device_initcall(of_eth_mac_init);
+--- /dev/null
++++ b/arch/mips/lantiq/xway/pci-ath-fixup.c
+@@ -0,0 +1,118 @@
++/*
++ *  Atheros AP94 reference board PCI initialization
++ *
++ *  Copyright (C) 2009-2010 Gabor Juhos <juhosg at openwrt.org>
++ *
++ *  This program is free software; you can redistribute it and/or modify it
++ *  under the terms of the GNU General Public License version 2 as published
++ *  by the Free Software Foundation.
++ */
++
++#include <linux/pci.h>
++#include <linux/init.h>
++#include <linux/delay.h>
++#include <lantiq_soc.h>
++
++struct ath_fixup {
++	u16		*cal_data;
++	unsigned	slot;
++};
++
++static int ath_num_fixups;
++static struct ath_fixup ath_fixups[2];
++
++static void ath_pci_fixup(struct pci_dev *dev)
++{
++	void __iomem *mem;
++	struct pci_dev *bridge = pci_upstream_bridge(dev); 
++	u16 *cal_data = NULL;
++	u16 cmd;
++	u32 bar0;
++	u32 val;
++	u32 base;
++	unsigned i;
++
++	for (i = 0; i < ath_num_fixups; i++) {
++		if (ath_fixups[i].cal_data == NULL)
++			continue;
++
++		if (ath_fixups[i].slot != PCI_SLOT(dev->devfn))
++			continue;
++
++		cal_data = ath_fixups[i].cal_data;
++		break;
++	}
++
++	if (cal_data == NULL)
++		return;
++
++	if (*cal_data != 0xa55a) {
++		pr_err("pci %s: invalid calibration data\n", pci_name(dev));
++		return;
++	}
++
++	pr_info("pci %s: fixup device configuration\n", pci_name(dev));
++
++	base = dev->resource[0].start;
++	mem = ioremap(base, 0x10000);
++	if (!mem) {
++		pr_err("pci %s: ioremap error\n", pci_name(dev));
++		return;
++	}
++
++	if (bridge) {
++		pci_enable_device(dev);
++	}
++
++	pci_read_config_dword(dev, PCI_BASE_ADDRESS_0, &bar0);
++	pci_write_config_dword(dev, PCI_BASE_ADDRESS_0, base);
++	pci_read_config_word(dev, PCI_COMMAND, &cmd);
++	cmd |= PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY;
++	pci_write_config_word(dev, PCI_COMMAND, cmd);
++
++	/* set pointer to first reg address */
++	cal_data += 3;
++	while (*cal_data != 0xffff) {
++		u32 reg;
++		reg = *cal_data++;
++		val = *cal_data++;
++		val |= (*cal_data++) << 16;
++
++		ltq_w32(swab32(val), mem + reg);
++		udelay(100);
++	}
++
++	pci_read_config_dword(dev, PCI_VENDOR_ID, &val);
++	dev->vendor = val & 0xffff;
++	dev->device = (val >> 16) & 0xffff;
++
++	pci_read_config_dword(dev, PCI_CLASS_REVISION, &val);
++	dev->revision = val & 0xff;
++	dev->class = val >> 8; /* upper 3 bytes */
++
++	pr_info("pci %s: fixup info: [%04x:%04x] revision %02x class %#08x\n", 
++		pci_name(dev), dev->vendor, dev->device, dev->revision, dev->class);
++
++	pci_read_config_word(dev, PCI_COMMAND, &cmd);
++	cmd &= ~(PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY);
++	pci_write_config_word(dev, PCI_COMMAND, cmd);
++
++	pci_write_config_dword(dev, PCI_BASE_ADDRESS_0, bar0);
++
++	if (bridge) {
++		pci_disable_device(dev);
++	}
++
++	iounmap(mem);
++}
++DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_ATHEROS, PCI_ANY_ID, ath_pci_fixup);
++
++void __init ltq_pci_ath_fixup(unsigned slot, u16 *cal_data)
++{
++	if (ath_num_fixups >= ARRAY_SIZE(ath_fixups))
++		return;
++
++	ath_fixups[ath_num_fixups].slot = slot;
++	ath_fixups[ath_num_fixups].cal_data = cal_data;
++	ath_num_fixups++;
++}
+--- /dev/null
++++ b/arch/mips/lantiq/xway/rt_eep.c
+@@ -0,0 +1,60 @@
++/*
++ *  Copyright (C) 2011 John Crispin <blogic at openwrt.org>
++ *
++ *  This program is free software; you can redistribute it and/or modify it
++ *  under the terms of the GNU General Public License version 2 as published
++ *  by the Free Software Foundation.
++ */
++
++#include <linux/init.h>
++#include <linux/module.h>
++#include <linux/pci.h>
++#include <linux/platform_device.h>
++#include <linux/rt2x00_platform.h>
++
++extern int (*ltq_pci_plat_dev_init)(struct pci_dev *dev);
++static struct rt2x00_platform_data rt2x00_pdata;
++
++static int rt2x00_pci_plat_dev_init(struct pci_dev *dev)
++{
++	dev->dev.platform_data = &rt2x00_pdata;
++	return 0;
++}
++
++int __init of_ralink_eeprom_probe(struct platform_device *pdev)
++{
++	struct device_node *np = pdev->dev.of_node;
++	const char *eeprom;
++
++	if (of_property_read_string(np, "ralink,eeprom", &eeprom)) {
++		dev_err(&pdev->dev, "failed to load eeprom filename\n");
++		return 0;
++	}
++
++	rt2x00_pdata.eeprom_file_name = kstrdup(eeprom, GFP_KERNEL);
++//	rt2x00_pdata.mac_address = mac;
++	ltq_pci_plat_dev_init = rt2x00_pci_plat_dev_init;
++
++	dev_info(&pdev->dev, "using %s as eeprom\n", eeprom);
++
++	return 0;
++}
++
++static struct of_device_id ralink_eeprom_ids[] = {
++	{ .compatible = "ralink,eeprom" },
++	{ }
++};
++
++static struct platform_driver ralink_eeprom_driver = {
++	.driver		= {
++		.name		= "ralink,eeprom",
++		.owner	= THIS_MODULE,
++		.of_match_table	= of_match_ptr(ralink_eeprom_ids),
++	},
++};
++
++static int __init of_ralink_eeprom_init(void)
++{
++	return platform_driver_probe(&ralink_eeprom_driver, of_ralink_eeprom_probe);
++}
++device_initcall(of_ralink_eeprom_init);
+--- a/drivers/net/ethernet/lantiq_etop.c
++++ b/drivers/net/ethernet/lantiq_etop.c
+@@ -840,7 +840,11 @@ ltq_etop_init(struct net_device *dev)
+ 	if (err)
+ 		goto err_hw;
+ 
+-	memcpy(&mac, &priv->pldata->mac, sizeof(struct sockaddr));
++	if (priv->mac)
++		memcpy(&mac.sa_data, priv->mac, ETH_ALEN);
++	else
++		memcpy(&mac.sa_data, ltq_get_eth_mac(), ETH_ALEN);
++
+ 	if (!is_valid_ether_addr(mac.sa_data)) {
+ 		pr_warn("etop: invalid MAC, using random\n");
+ 		eth_random_addr(mac.sa_data);
diff --git a/target/linux/lantiq/patches-4.4/0036-owrt-generic-dtb-image-hack.patch b/target/linux/lantiq/patches-4.4/0036-owrt-generic-dtb-image-hack.patch
new file mode 100644
index 0000000..7a306b3
--- /dev/null
+++ b/target/linux/lantiq/patches-4.4/0036-owrt-generic-dtb-image-hack.patch
@@ -0,0 +1,32 @@
+From dba8578e06aedf1e67312ebfc6162e2fadc9448d Mon Sep 17 00:00:00 2001
+From: John Crispin <blogic at openwrt.org>
+Date: Thu, 7 Aug 2014 18:32:12 +0200
+Subject: [PATCH 36/36] owrt: generic dtb image hack
+
+Signed-off-by: John Crispin <blogic at openwrt.org>
+---
+ arch/mips/kernel/head.S |    3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/arch/mips/kernel/head.S
++++ b/arch/mips/kernel/head.S
+@@ -86,6 +86,9 @@ EXPORT(__image_cmdline)
+ 	.fill	0x400
+ #endif /* CONFIG_IMAGE_CMDLINE_HACK */
+ 
++	.ascii  "OWRTDTB:"
++	EXPORT(__image_dtb)
++	.fill   0x4000
+ 	__REF
+ 
+ NESTED(kernel_entry, 16, sp)			# kernel entry point
+--- a/arch/mips/lantiq/Kconfig
++++ b/arch/mips/lantiq/Kconfig
+@@ -32,7 +32,6 @@ choice
+ config DT_EASY50712
+ 	bool "Easy50712"
+ 	depends on SOC_XWAY
+-	select BUILTIN_DTB
+ endchoice
+ 
+ config PCI_LANTIQ
diff --git a/target/linux/lantiq/patches-4.4/0040-USB-DWC2-enable-usb-power-gpio.patch b/target/linux/lantiq/patches-4.4/0040-USB-DWC2-enable-usb-power-gpio.patch
new file mode 100644
index 0000000..5f26784
--- /dev/null
+++ b/target/linux/lantiq/patches-4.4/0040-USB-DWC2-enable-usb-power-gpio.patch
@@ -0,0 +1,35 @@
+--- a/drivers/usb/dwc2/platform.c
++++ b/drivers/usb/dwc2/platform.c
+@@ -42,6 +42,7 @@
+ #include <linux/dma-mapping.h>
+ #include <linux/of_device.h>
+ #include <linux/mutex.h>
++#include <linux/of_gpio.h>
+ #include <linux/platform_device.h>
+ #include <linux/phy/phy.h>
+ #include <linux/platform_data/s3c-hsotg.h>
+@@ -336,6 +337,7 @@ static int dwc2_driver_probe(struct plat
+ 	struct resource *res;
+ 	int retval;
+ 	int irq;
++	int gpio_count;
+ 
+ 	match = of_match_device(dwc2_of_match_table, &dev->dev);
+ 	if (match && match->data) {
+@@ -352,6 +354,16 @@ static int dwc2_driver_probe(struct plat
+ 		defparams.dma_desc_enable = 0;
+ 	}
+ 
++	gpio_count = of_gpio_count(dev->dev.of_node);
++	while (gpio_count > 0) {
++		enum of_gpio_flags flags;
++		int gpio = of_get_gpio_flags(dev->dev.of_node, --gpio_count, &flags);
++		if (gpio_request(gpio, "usb"))
++			continue;
++		dev_info(&dev->dev, "requested GPIO %d\n", gpio);
++		gpio_direction_output(gpio, (flags & OF_GPIO_ACTIVE_LOW) ? (0) : (1));
++	}
++
+ 	hsotg = devm_kzalloc(&dev->dev, sizeof(*hsotg), GFP_KERNEL);
+ 	if (!hsotg)
+ 		return -ENOMEM;
diff --git a/target/linux/lantiq/patches-4.4/0041-USB-DWC2-add-ltq-params.patch b/target/linux/lantiq/patches-4.4/0041-USB-DWC2-add-ltq-params.patch
new file mode 100644
index 0000000..ea3e48c
--- /dev/null
+++ b/target/linux/lantiq/patches-4.4/0041-USB-DWC2-add-ltq-params.patch
@@ -0,0 +1,46 @@
+--- a/drivers/usb/dwc2/platform.c
++++ b/drivers/usb/dwc2/platform.c
+@@ -145,6 +145,34 @@ static int __dwc2_lowlevel_hw_enable(str
+ 	return ret;
+ }
+ 
++static const struct dwc2_core_params params_ltq = {
++	.otg_cap			= 2,	/* non-HNP/non-SRP */
++	.otg_ver			= -1,
++	.dma_enable			= -1,
++	.dma_desc_enable		= -1,
++	.speed				= -1,
++	.enable_dynamic_fifo		= -1,
++	.en_multiple_tx_fifo		= -1,
++	.host_rx_fifo_size		= 288,	/* 288 DWORDs */
++	.host_nperio_tx_fifo_size	= 128,	/* 128 DWORDs */
++	.host_perio_tx_fifo_size	= 96,	/* 96 DWORDs */
++	.max_transfer_size		= -1,
++	.max_packet_count		= 511,
++	.host_channels			= -1,
++	.phy_type			= -1,
++	.phy_utmi_width			= -1,
++	.phy_ulpi_ddr			= -1,
++	.phy_ulpi_ext_vbus		= -1,
++	.i2c_enable			= -1,
++	.ulpi_fs_ls			= -1,
++	.host_support_fs_ls_low_power	= -1,
++	.host_ls_low_power_phy_clk	= -1,
++	.ts_dline			= -1,
++	.reload_ctl			= -1,
++	.ahbcfg				= GAHBCFG_HBSTLEN_INCR16 << GAHBCFG_HBSTLEN_SHIFT,
++	.uframe_sched			= -1,
++};
++
+ /**
+  * dwc2_lowlevel_hw_enable - enable platform lowlevel hw resources
+  * @hsotg: The driver state
+@@ -310,6 +338,8 @@ static int dwc2_driver_remove(struct pla
+ static const struct of_device_id dwc2_of_match_table[] = {
+ 	{ .compatible = "brcm,bcm2835-usb", .data = &params_bcm2835 },
+ 	{ .compatible = "rockchip,rk3066-usb", .data = &params_rk3066 },
++	{ .compatible = "lantiq,ifxhcd-arx100-dwc2", .data = &params_ltq },
++	{ .compatible = "lantiq,ifxhcd-xrx200-dwc2", .data = &params_ltq },
+ 	{ .compatible = "snps,dwc2", .data = NULL },
+ 	{ .compatible = "samsung,s3c6400-hsotg", .data = NULL},
+ 	{},
diff --git a/target/linux/lantiq/patches-4.4/0101-mtd-split.patch b/target/linux/lantiq/patches-4.4/0101-mtd-split.patch
new file mode 100644
index 0000000..495991f
--- /dev/null
+++ b/target/linux/lantiq/patches-4.4/0101-mtd-split.patch
@@ -0,0 +1,173 @@
+--- a/arch/mips/lantiq/xway/Makefile
++++ b/arch/mips/lantiq/xway/Makefile
+@@ -1,6 +1,6 @@
+ obj-y := prom.o sysctrl.o clk.o reset.o dma.o timer.o dcdc.o
+ 
+-obj-y += vmmc.o tffs.o
++obj-y += vmmc.o tffs.o mtd_split.o
+ 
+ obj-y += eth_mac.o
+ obj-$(CONFIG_PCI) += ath_eep.o rt_eep.o pci-ath-fixup.o
+--- /dev/null
++++ b/arch/mips/lantiq/xway/mtd_split.c
+@@ -0,0 +1,129 @@
++#include <linux/magic.h>
++#include <linux/root_dev.h>
++#include <linux/mtd/mtd.h>
++#include <linux/mtd/partitions.h>
++
++#define ROOTFS_SPLIT_NAME "rootfs_data"
++
++struct squashfs_super_block {
++	__le32 s_magic;
++	__le32 pad0[9];
++	__le64 bytes_used;
++};
++
++static void split_brnimage_kernel(struct mtd_info *master, const char *name,
++                                       int offset, int size)
++{
++	unsigned long buf[4];
++	// Assume at most 2MB of kernel image
++	unsigned long end = offset + (2 << 20);
++	unsigned long part_size = offset + 0x400 - 12;
++	size_t len;
++	int ret;
++
++	if (strcmp(name, "firmware") != 0)
++		return;
++	while (part_size < end) {
++		long size_min = part_size - 0x400 - 12 - offset;
++		long size_max = part_size + 12 - offset;
++		ret = mtd_read(master, part_size, 16, &len, (void *)buf);
++		if (ret || len != 16)
++			return;
++
++		if (le32_to_cpu(buf[0]) < size_min ||
++				le32_to_cpu(buf[0]) > size_max) {
++			part_size += 0x400;
++			continue;
++		}
++
++		if (le32_to_cpu(buf[3]) == SQUASHFS_MAGIC) {
++			part_size += 12 - offset;
++			mtd_add_partition(master, "rootfs", offset + part_size,
++			                            size - part_size);
++			return;
++		}
++		part_size += 0x400;
++	}
++}
++
++static void split_eva_kernel(struct mtd_info *master, const char *name,
++				int offset, int size)
++{
++#define EVA_MAGIC   0xfeed1281
++	unsigned long magic = 0;
++	unsigned long part_size = 0, p;
++	size_t len;
++	int ret;
++
++	if (strcmp(name, CONFIG_MTD_SPLIT_FIRMWARE_NAME) != 0)
++		return;
++
++	ret = mtd_read(master, offset, 4, &len, (void *)&magic);
++	if (ret || len != sizeof(magic))
++		return;
++
++	if (le32_to_cpu(magic) != EVA_MAGIC)
++		return;
++
++	ret = mtd_read(master, offset + 4, 4, &len, (void *)&part_size);
++	if (ret || len != sizeof(part_size))
++		return;
++
++	p = part_size = le32_to_cpu(part_size) + 0x18;
++	p &= ~0xffff;
++	p += 0x10000;
++
++	ret = mtd_read(master, offset + p, 4, &len, (void *)&magic);
++	if (ret || len != sizeof(magic))
++		return;
++
++	if (magic == SQUASHFS_MAGIC)
++		part_size = p + 0x100;
++	else
++	        part_size = mtd_pad_erasesize(master, offset, len);
++
++	if (part_size + master->erasesize > size)
++		return;
++
++	mtd_add_partition(master, "rootfs", offset + part_size,
++			    size - part_size);
++}
++
++static void split_tplink_kernel(struct mtd_info *master, const char *name,
++				int offset, int size)
++{
++#define TPLINK_MAGIC   0x00000002
++	unsigned long magic = 0;
++	unsigned long part_size = 0;
++	size_t len;
++	int ret;
++
++	if (strcmp(name, CONFIG_MTD_SPLIT_FIRMWARE_NAME) != 0)
++		return;
++
++	ret = mtd_read(master, offset, 4, &len, (void *)&magic);
++	if (ret || len != sizeof(magic))
++		return;
++
++	if (le32_to_cpu(magic) != TPLINK_MAGIC)
++		return;
++
++	ret = mtd_read(master, offset + 0x78, 4, &len, (void *)&part_size);
++	if (ret || len != sizeof(part_size))
++		return;
++
++	part_size = be32_to_cpu(part_size) + 0x200;
++	if (part_size + master->erasesize > size)
++		return;
++
++	mtd_add_partition(master, "rootfs", offset + part_size,
++			    size - part_size);
++}
++
++void arch_split_mtd_part(struct mtd_info *master, const char *name,
++				int offset, int size)
++{
++	split_tplink_kernel(master, name, offset, size);
++	split_eva_kernel(master, name, offset, size);
++	split_brnimage_kernel(master, name, offset, size);
++}
+--- a/include/linux/mtd/partitions.h
++++ b/include/linux/mtd/partitions.h
+@@ -89,12 +89,15 @@ extern void deregister_mtd_parser(struct
+ int mtd_is_partition(const struct mtd_info *mtd);
+ int mtd_add_partition(struct mtd_info *master, const char *name,
+ 		      long long offset, long long length);
++
+ int mtd_del_partition(struct mtd_info *master, int partno);
+ struct mtd_info *mtdpart_get_master(const struct mtd_info *mtd);
+ uint64_t mtdpart_get_offset(const struct mtd_info *mtd);
+ uint64_t mtd_get_device_size(const struct mtd_info *mtd);
+-extern void __weak arch_split_mtd_part(struct mtd_info *master,
+-				       const char *name, int offset, int size);
++void __weak arch_split_mtd_part(struct mtd_info *master,
++ 				       const char *name, int offset, int size);
++unsigned long
++mtd_pad_erasesize(struct mtd_info *mtd, int offset, int len);
+ 
+ int parse_mtd_partitions_by_type(struct mtd_info *master,
+ 				 enum mtd_parser_type type,
+--- a/drivers/mtd/mtdpart.c
++++ b/drivers/mtd/mtdpart.c
+@@ -755,7 +755,7 @@ run_parsers_by_type(struct mtd_part *sla
+ 	return nr_parts;
+ }
+ 
+-static inline unsigned long
++unsigned long
+ mtd_pad_erasesize(struct mtd_info *mtd, int offset, int len)
+ {
+ 	unsigned long mask = mtd->erasesize - 1;
diff --git a/target/linux/lantiq/patches-4.4/0150-lantiq-pinctrl-xway.patch b/target/linux/lantiq/patches-4.4/0150-lantiq-pinctrl-xway.patch
new file mode 100644
index 0000000..84adbe6
--- /dev/null
+++ b/target/linux/lantiq/patches-4.4/0150-lantiq-pinctrl-xway.patch
@@ -0,0 +1,15 @@
+--- a/drivers/pinctrl/pinctrl-xway.c
++++ b/drivers/pinctrl/pinctrl-xway.c
+@@ -152,10 +152,10 @@ static const struct ltq_mfp_pin xway_mfp
+ 	MFP_XWAY(GPIO41, GPIO,	NONE,	NONE,	NONE),
+ 	MFP_XWAY(GPIO42, GPIO,	MDIO,	NONE,	NONE),
+ 	MFP_XWAY(GPIO43, GPIO,	MDIO,	NONE,	NONE),
+-	MFP_XWAY(GPIO44, GPIO,	NONE,	GPHY,	SIN),
++	MFP_XWAY(GPIO44, GPIO,	MII,	SIN,	GPHY),
+ 	MFP_XWAY(GPIO45, GPIO,	NONE,	GPHY,	SIN),
+ 	MFP_XWAY(GPIO46, GPIO,	NONE,	NONE,	EXIN),
+-	MFP_XWAY(GPIO47, GPIO,	NONE,	GPHY,	SIN),
++	MFP_XWAY(GPIO47, GPIO,	MII,	GPHY,	SIN),
+ 	MFP_XWAY(GPIO48, GPIO,	EBU,	NONE,	NONE),
+ 	MFP_XWAY(GPIO49, GPIO,	EBU,	NONE,	NONE),
+ 	MFP_XWAY(GPIO50, GPIO,	NONE,	NONE,	NONE),
diff --git a/target/linux/lantiq/patches-4.4/0151-lantiq-ifxmips_pcie-use-of.patch b/target/linux/lantiq/patches-4.4/0151-lantiq-ifxmips_pcie-use-of.patch
new file mode 100644
index 0000000..26a3a65
--- /dev/null
+++ b/target/linux/lantiq/patches-4.4/0151-lantiq-ifxmips_pcie-use-of.patch
@@ -0,0 +1,51 @@
+--- a/arch/mips/pci/ifxmips_pcie.c
++++ b/arch/mips/pci/ifxmips_pcie.c
+@@ -18,6 +18,8 @@
+ #include <linux/pci_regs.h>
+ #include <linux/module.h>
+ 
++#include <linux/of_platform.h>
++
+ #include "ifxmips_pcie.h"
+ #include "ifxmips_pcie_reg.h"
+ 
+@@ -1045,7 +1047,7 @@ pcie_rc_initialize(int pcie_port)
+ 	return 0;
+ }
+ 
+-static int __init ifx_pcie_bios_init(void)
++static int __init ifx_pcie_bios_probe(struct platform_device *pdev)
+ {
+     void __iomem *io_map_base;
+     int pcie_port;
+@@ -1083,6 +1085,30 @@ static int __init ifx_pcie_bios_init(voi
+ 
+     return 0;
+ }
++
++static const struct of_device_id ifxmips_pcie_match[] = {
++        { .compatible = "lantiq,pcie-xrx200" },
++        {},
++};
++MODULE_DEVICE_TABLE(of, ifxmips_pcie_match);
++
++static struct platform_driver ltq_pci_driver = {
++        .probe = ifx_pcie_bios_probe,
++        .driver = {
++                .name = "pcie-xrx200",
++                .owner = THIS_MODULE,
++                .of_match_table = ifxmips_pcie_match,
++        },
++};
++
++int __init ifx_pcie_bios_init(void)
++{
++        int ret = platform_driver_register(&ltq_pci_driver);
++        if (ret)
++                pr_info("pcie-xrx200: Error registering platform driver!");
++        return ret;
++}
++
+ arch_initcall(ifx_pcie_bios_init);
+ 
+ MODULE_LICENSE("GPL");
diff --git a/target/linux/lantiq/patches-4.4/0160-owrt-lantiq-multiple-flash.patch b/target/linux/lantiq/patches-4.4/0160-owrt-lantiq-multiple-flash.patch
new file mode 100644
index 0000000..9c49d98
--- /dev/null
+++ b/target/linux/lantiq/patches-4.4/0160-owrt-lantiq-multiple-flash.patch
@@ -0,0 +1,217 @@
+--- a/drivers/mtd/maps/lantiq-flash.c
++++ b/drivers/mtd/maps/lantiq-flash.c
+@@ -19,6 +19,7 @@
+ #include <linux/mtd/cfi.h>
+ #include <linux/platform_device.h>
+ #include <linux/mtd/physmap.h>
++#include <linux/mtd/concat.h>
+ #include <linux/of.h>
+ 
+ #include <lantiq_soc.h>
+@@ -38,10 +39,12 @@ enum {
+ 	LTQ_NOR_NORMAL
+ };
+ 
++#define MAX_RESOURCES		4
++
+ struct ltq_mtd {
+-	struct resource *res;
+-	struct mtd_info *mtd;
+-	struct map_info *map;
++	struct mtd_info *mtd[MAX_RESOURCES];
++	struct mtd_info	*cmtd;
++	struct map_info map[MAX_RESOURCES];
+ };
+ 
+ static const char ltq_map_name[] = "ltq_nor";
+@@ -108,12 +111,44 @@ ltq_copy_to(struct map_info *map, unsign
+ }
+ 
+ static int
++ltq_mtd_remove(struct platform_device *pdev)
++{
++	struct ltq_mtd *ltq_mtd = platform_get_drvdata(pdev);
++	int i;
++
++	if (ltq_mtd == NULL)
++		return 0;
++
++	if (ltq_mtd->cmtd) {
++		mtd_device_unregister(ltq_mtd->cmtd);
++		if (ltq_mtd->cmtd != ltq_mtd->mtd[0])
++			mtd_concat_destroy(ltq_mtd->cmtd);
++	}
++
++	for (i = 0; i < MAX_RESOURCES; i++) {
++		if (ltq_mtd->mtd[i] != NULL)
++			map_destroy(ltq_mtd->mtd[i]);
++	}
++
++	kfree(ltq_mtd);
++
++	return 0;
++}
++
++static int
+ ltq_mtd_probe(struct platform_device *pdev)
+ {
+ 	struct mtd_part_parser_data ppdata;
+ 	struct ltq_mtd *ltq_mtd;
+ 	struct cfi_private *cfi;
+-	int err;
++	int err = 0;
++	int i;
++	int devices_found = 0;
++
++	static const char *rom_probe_types[] = {
++		"cfi_probe", "jedec_probe", NULL
++	};
++	const char **type;
+ 
+ 	if (of_machine_is_compatible("lantiq,falcon") &&
+ 			(ltq_boot_select() != BS_FLASH)) {
+@@ -127,75 +162,89 @@ ltq_mtd_probe(struct platform_device *pd
+ 
+ 	platform_set_drvdata(pdev, ltq_mtd);
+ 
+-	ltq_mtd->res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+-	if (!ltq_mtd->res) {
+-		dev_err(&pdev->dev, "failed to get memory resource\n");
+-		return -ENOENT;
++	for (i = 0; i < pdev->num_resources; i++) {
++		printk(KERN_NOTICE "lantiq nor flash device: %.8llx at %.8llx\n",
++		       (unsigned long long)resource_size(&pdev->resource[i]),
++		       (unsigned long long)pdev->resource[i].start);
++	
++		if (!devm_request_mem_region(&pdev->dev,
++			pdev->resource[i].start,
++			resource_size(&pdev->resource[i]),
++			dev_name(&pdev->dev))) {
++			dev_err(&pdev->dev, "Could not reserve memory region\n");
++			return -ENOMEM;
++		}
++
++		ltq_mtd->map[i].name = ltq_map_name;
++		ltq_mtd->map[i].bankwidth = 2;
++		ltq_mtd->map[i].read = ltq_read16;
++		ltq_mtd->map[i].write = ltq_write16;
++		ltq_mtd->map[i].copy_from = ltq_copy_from;
++		ltq_mtd->map[i].copy_to = ltq_copy_to;
++
++		if (of_find_property(pdev->dev.of_node, "lantiq,noxip", NULL))
++			ltq_mtd->map[i].phys = NO_XIP;
++		else
++			ltq_mtd->map[i].phys = pdev->resource[i].start;
++		ltq_mtd->map[i].size = resource_size(&pdev->resource[i]);
++		ltq_mtd->map[i].virt = devm_ioremap(&pdev->dev, pdev->resource[i].start,
++						 ltq_mtd->map[i].size);
++		if (IS_ERR(ltq_mtd->map[i].virt))
++			return PTR_ERR(ltq_mtd->map[i].virt);
++
++		if (ltq_mtd->map[i].virt == NULL) {
++			dev_err(&pdev->dev, "Failed to ioremap flash region\n");
++			err = PTR_ERR(ltq_mtd->map[i].virt);
++			goto err_out;
++		}
++
++		ltq_mtd->map[i].map_priv_1 = LTQ_NOR_PROBING;
++		for (type = rom_probe_types; !ltq_mtd->mtd[i] && *type; type++)
++			ltq_mtd->mtd[i] = do_map_probe(*type, &ltq_mtd->map[i]);
++		ltq_mtd->map[i].map_priv_1 = LTQ_NOR_NORMAL;
++
++		if (!ltq_mtd->mtd[i]) {
++			dev_err(&pdev->dev, "probing failed\n");
++			return -ENXIO;
++		} else {
++			devices_found++;
++		}
++
++		ltq_mtd->mtd[i]->owner = THIS_MODULE;
++		ltq_mtd->mtd[i]->dev.parent = &pdev->dev;
++
++		cfi = ltq_mtd->map[i].fldrv_priv;
++		cfi->addr_unlock1 ^= 1;
++		cfi->addr_unlock2 ^= 1;
+ 	}
+ 
+-	ltq_mtd->map = devm_kzalloc(&pdev->dev, sizeof(struct map_info),
+-				    GFP_KERNEL);
+-	if (!ltq_mtd->map)
+-		return -ENOMEM;
++	if (devices_found == 1) {
++		ltq_mtd->cmtd = ltq_mtd->mtd[0];
++	} else if (devices_found > 1) {
++		/*
++		 * We detected multiple devices. Concatenate them together.
++		 */
++		ltq_mtd->cmtd = mtd_concat_create(ltq_mtd->mtd, devices_found, dev_name(&pdev->dev));
++		if (ltq_mtd->cmtd == NULL)
++			err = -ENXIO;
++	}
+ 
+-	if (of_find_property(pdev->dev.of_node, "lantiq,noxip", NULL))
+-		ltq_mtd->map->phys = NO_XIP;
+-	else
+-		ltq_mtd->map->phys = ltq_mtd->res->start;
+-	ltq_mtd->res->start;
+-	ltq_mtd->map->size = resource_size(ltq_mtd->res);
+-	ltq_mtd->map->virt = devm_ioremap_resource(&pdev->dev, ltq_mtd->res);
+-	if (IS_ERR(ltq_mtd->map->virt))
+-		return PTR_ERR(ltq_mtd->map->virt);
+-
+-	ltq_mtd->map->name = ltq_map_name;
+-	ltq_mtd->map->bankwidth = 2;
+-	ltq_mtd->map->read = ltq_read16;
+-	ltq_mtd->map->write = ltq_write16;
+-	ltq_mtd->map->copy_from = ltq_copy_from;
+-	ltq_mtd->map->copy_to = ltq_copy_to;
+-
+-	ltq_mtd->map->map_priv_1 = LTQ_NOR_PROBING;
+-	ltq_mtd->mtd = do_map_probe("cfi_probe", ltq_mtd->map);
+-	ltq_mtd->map->map_priv_1 = LTQ_NOR_NORMAL;
+-
+-	if (!ltq_mtd->mtd) {
+-		dev_err(&pdev->dev, "probing failed\n");
+-		return -ENXIO;
+-	}
+-
+-	ltq_mtd->mtd->dev.parent = &pdev->dev;
+-
+-	cfi = ltq_mtd->map->fldrv_priv;
+-	cfi->addr_unlock1 ^= 1;
+-	cfi->addr_unlock2 ^= 1;
++	ltq_mtd->cmtd->dev.parent = &pdev->dev;
+ 
+ 	ppdata.of_node = pdev->dev.of_node;
+-	err = mtd_device_parse_register(ltq_mtd->mtd, NULL, &ppdata, NULL, 0);
++	err = mtd_device_parse_register(ltq_mtd->cmtd, NULL, &ppdata, NULL, 0);
+ 	if (err) {
+ 		dev_err(&pdev->dev, "failed to add partitions\n");
+-		goto err_destroy;
++		goto err_out;
+ 	}
+ 
+ 	return 0;
+ 
+-err_destroy:
+-	map_destroy(ltq_mtd->mtd);
++err_out:
++	ltq_mtd_remove(pdev);
+ 	return err;
+ }
+ 
+-static int
+-ltq_mtd_remove(struct platform_device *pdev)
+-{
+-	struct ltq_mtd *ltq_mtd = platform_get_drvdata(pdev);
+-
+-	if (ltq_mtd && ltq_mtd->mtd) {
+-		mtd_device_unregister(ltq_mtd->mtd);
+-		map_destroy(ltq_mtd->mtd);
+-	}
+-	return 0;
+-}
+-
+ static const struct of_device_id ltq_mtd_match[] = {
+ 	{ .compatible = "lantiq,nor" },
+ 	{},
diff --git a/target/linux/lantiq/patches-4.4/0300-MTD-cfi-cmdset-0001-disable-buffered-writes.patch b/target/linux/lantiq/patches-4.4/0300-MTD-cfi-cmdset-0001-disable-buffered-writes.patch
new file mode 100644
index 0000000..d153c52
--- /dev/null
+++ b/target/linux/lantiq/patches-4.4/0300-MTD-cfi-cmdset-0001-disable-buffered-writes.patch
@@ -0,0 +1,11 @@
+--- a/drivers/mtd/chips/cfi_cmdset_0001.c
++++ b/drivers/mtd/chips/cfi_cmdset_0001.c
+@@ -39,7 +39,7 @@
+ /* #define CMDSET0001_DISABLE_WRITE_SUSPEND */
+ 
+ // debugging, turns off buffer write mode if set to 1
+-#define FORCE_WORD_WRITE 0
++#define FORCE_WORD_WRITE 1
+ 
+ /* Intel chips */
+ #define I82802AB	0x00ad
-- 
2.6.4

_______________________________________________
openwrt-devel mailing list
openwrt-devel at lists.openwrt.org
https://lists.openwrt.org/cgi-bin/mailman/listinfo/openwrt-devel


More information about the openwrt-devel mailing list