diff --git a/target/linux/brcm47xx/patches-3.10/712-bgmac_implement_unaligned_addressing.patch b/target/linux/brcm47xx/patches-3.10/712-bgmac_implement_unaligned_addressing.patch
new file mode 100644
index 0000000000000000000000000000000000000000..939ae114bb67993d3a1e49d095b0589244d2c1e5
--- /dev/null
+++ b/target/linux/brcm47xx/patches-3.10/712-bgmac_implement_unaligned_addressing.patch
@@ -0,0 +1,140 @@
+bgmac: implement unaligned addressing for DMA rings that support it
+
+This is important patch for new devices that support unaligned
+addressing. That devices suffer from the backward-compatibility bug in
+DMA engine. In theory we should be able to use old mechanism, but in
+practice DMA address seems to be randomly copied into status register
+when hardware reaches end of a ring. This breaks reading slot number
+from status register and we can't use DMA anymore.
+
+Signed-off-by: Rafał Miłecki <zajec5@gmail.com>
+
+--- a/drivers/net/ethernet/broadcom/bgmac.c
++++ b/drivers/net/ethernet/broadcom/bgmac.c
+@@ -157,6 +157,7 @@ static netdev_tx_t bgmac_dma_tx_add(stru
+ 	if (++ring->end >= BGMAC_TX_RING_SLOTS)
+ 		ring->end = 0;
+ 	bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_INDEX,
++		    ring->index_base +
+ 		    ring->end * sizeof(struct bgmac_dma_desc));
+ 
+ 	/* Always keep one slot free to allow detecting bugged calls. */
+@@ -181,6 +182,8 @@ static void bgmac_dma_tx_free(struct bgm
+ 	/* The last slot that hardware didn't consume yet */
+ 	empty_slot = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_STATUS);
+ 	empty_slot &= BGMAC_DMA_TX_STATDPTR;
++	empty_slot -= ring->index_base;
++	empty_slot &= BGMAC_DMA_TX_STATDPTR;
+ 	empty_slot /= sizeof(struct bgmac_dma_desc);
+ 
+ 	while (ring->start != empty_slot) {
+@@ -274,6 +277,8 @@ static int bgmac_dma_rx_read(struct bgma
+ 
+ 	end_slot = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_RX_STATUS);
+ 	end_slot &= BGMAC_DMA_RX_STATDPTR;
++	end_slot -= ring->index_base;
++	end_slot &= BGMAC_DMA_RX_STATDPTR;
+ 	end_slot /= sizeof(struct bgmac_dma_desc);
+ 
+ 	ring->end = end_slot;
+@@ -418,9 +423,6 @@ static int bgmac_dma_alloc(struct bgmac
+ 		ring = &bgmac->tx_ring[i];
+ 		ring->num_slots = BGMAC_TX_RING_SLOTS;
+ 		ring->mmio_base = ring_base[i];
+-		if (bgmac_dma_unaligned(bgmac, ring, BGMAC_DMA_RING_TX))
+-			bgmac_warn(bgmac, "TX on ring 0x%X supports unaligned addressing but this feature is not implemented\n",
+-				   ring->mmio_base);
+ 
+ 		/* Alloc ring of descriptors */
+ 		size = ring->num_slots * sizeof(struct bgmac_dma_desc);
+@@ -435,6 +437,13 @@ static int bgmac_dma_alloc(struct bgmac
+ 		if (ring->dma_base & 0xC0000000)
+ 			bgmac_warn(bgmac, "DMA address using 0xC0000000 bit(s), it may need translation trick\n");
+ 
++		ring->unaligned = bgmac_dma_unaligned(bgmac, ring,
++						      BGMAC_DMA_RING_TX);
++		if (ring->unaligned)
++			ring->index_base = lower_32_bits(ring->dma_base);
++		else
++			ring->index_base = 0;
++
+ 		/* No need to alloc TX slots yet */
+ 	}
+ 
+@@ -444,9 +453,6 @@ static int bgmac_dma_alloc(struct bgmac
+ 		ring = &bgmac->rx_ring[i];
+ 		ring->num_slots = BGMAC_RX_RING_SLOTS;
+ 		ring->mmio_base = ring_base[i];
+-		if (bgmac_dma_unaligned(bgmac, ring, BGMAC_DMA_RING_RX))
+-			bgmac_warn(bgmac, "RX on ring 0x%X supports unaligned addressing but this feature is not implemented\n",
+-				   ring->mmio_base);
+ 
+ 		/* Alloc ring of descriptors */
+ 		size = ring->num_slots * sizeof(struct bgmac_dma_desc);
+@@ -462,6 +468,13 @@ static int bgmac_dma_alloc(struct bgmac
+ 		if (ring->dma_base & 0xC0000000)
+ 			bgmac_warn(bgmac, "DMA address using 0xC0000000 bit(s), it may need translation trick\n");
+ 
++		ring->unaligned = bgmac_dma_unaligned(bgmac, ring,
++						      BGMAC_DMA_RING_RX);
++		if (ring->unaligned)
++			ring->index_base = lower_32_bits(ring->dma_base);
++		else
++			ring->index_base = 0;
++
+ 		/* Alloc RX slots */
+ 		for (j = 0; j < ring->num_slots; j++) {
+ 			err = bgmac_dma_rx_skb_for_slot(bgmac, &ring->slots[j]);
+@@ -489,12 +502,14 @@ static void bgmac_dma_init(struct bgmac
+ 	for (i = 0; i < BGMAC_MAX_TX_RINGS; i++) {
+ 		ring = &bgmac->tx_ring[i];
+ 
+-		/* We don't implement unaligned addressing, so enable first */
+-		bgmac_dma_tx_enable(bgmac, ring);
++		if (!ring->unaligned)
++			bgmac_dma_tx_enable(bgmac, ring);
+ 		bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGLO,
+ 			    lower_32_bits(ring->dma_base));
+ 		bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGHI,
+ 			    upper_32_bits(ring->dma_base));
++		if (ring->unaligned)
++			bgmac_dma_tx_enable(bgmac, ring);
+ 
+ 		ring->start = 0;
+ 		ring->end = 0;	/* Points the slot that should *not* be read */
+@@ -505,12 +520,14 @@ static void bgmac_dma_init(struct bgmac
+ 
+ 		ring = &bgmac->rx_ring[i];
+ 
+-		/* We don't implement unaligned addressing, so enable first */
+-		bgmac_dma_rx_enable(bgmac, ring);
++		if (!ring->unaligned)
++			bgmac_dma_rx_enable(bgmac, ring);
+ 		bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGLO,
+ 			    lower_32_bits(ring->dma_base));
+ 		bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGHI,
+ 			    upper_32_bits(ring->dma_base));
++		if (ring->unaligned)
++			bgmac_dma_rx_enable(bgmac, ring);
+ 
+ 		for (j = 0, dma_desc = ring->cpu_base; j < ring->num_slots;
+ 		     j++, dma_desc++) {
+@@ -531,6 +548,7 @@ static void bgmac_dma_init(struct bgmac
+ 		}
+ 
+ 		bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_INDEX,
++			    ring->index_base +
+ 			    ring->num_slots * sizeof(struct bgmac_dma_desc));
+ 
+ 		ring->start = 0;
+--- a/drivers/net/ethernet/broadcom/bgmac.h
++++ b/drivers/net/ethernet/broadcom/bgmac.h
+@@ -384,6 +384,8 @@ struct bgmac_dma_ring {
+ 	u16 mmio_base;
+ 	struct bgmac_dma_desc *cpu_base;
+ 	dma_addr_t dma_base;
++	u32 index_base; /* Used for unaligned rings only, otherwise 0 */
++	bool unaligned;
+ 
+ 	struct bgmac_slot_info slots[BGMAC_RX_RING_SLOTS];
+ };
diff --git a/target/linux/brcm47xx/patches-3.10/770-bgmac-phylib.patch b/target/linux/brcm47xx/patches-3.10/770-bgmac-phylib.patch
index 6041d2a77aceebea7bbf5901a28a3e7050085f6d..33308319db00ef75f410006dcbd6a6657ea1706e 100644
--- a/target/linux/brcm47xx/patches-3.10/770-bgmac-phylib.patch
+++ b/target/linux/brcm47xx/patches-3.10/770-bgmac-phylib.patch
@@ -10,7 +10,7 @@
  	  They can be found on BCM47xx SoCs and provide gigabit ethernet.
 --- a/drivers/net/ethernet/broadcom/bgmac.c
 +++ b/drivers/net/ethernet/broadcom/bgmac.c
-@@ -1201,27 +1201,14 @@ static int bgmac_set_mac_address(struct
+@@ -1219,27 +1219,14 @@ static int bgmac_set_mac_address(struct
  static int bgmac_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd)
  {
  	struct bgmac *bgmac = netdev_priv(net_dev);
@@ -45,7 +45,7 @@
  }
  
  static const struct net_device_ops bgmac_netdev_ops = {
-@@ -1243,61 +1230,18 @@ static int bgmac_get_settings(struct net
+@@ -1261,61 +1248,18 @@ static int bgmac_get_settings(struct net
  {
  	struct bgmac *bgmac = netdev_priv(net_dev);
  
@@ -111,7 +111,7 @@
  
  static void bgmac_get_drvinfo(struct net_device *net_dev,
  			      struct ethtool_drvinfo *info)
-@@ -1308,6 +1252,7 @@ static void bgmac_get_drvinfo(struct net
+@@ -1326,6 +1270,7 @@ static void bgmac_get_drvinfo(struct net
  
  static const struct ethtool_ops bgmac_ethtool_ops = {
  	.get_settings		= bgmac_get_settings,
@@ -119,7 +119,7 @@
  	.get_drvinfo		= bgmac_get_drvinfo,
  };
  
-@@ -1326,10 +1271,42 @@ static int bgmac_mii_write(struct mii_bu
+@@ -1344,10 +1289,42 @@ static int bgmac_mii_write(struct mii_bu
  	return bgmac_phy_write(bus->priv, mii_id, regnum, value);
  }
  
@@ -162,7 +162,7 @@
  
  	mii_bus = mdiobus_alloc();
  	if (!mii_bus)
-@@ -1360,7 +1337,29 @@ static int bgmac_mii_register(struct bgm
+@@ -1378,7 +1355,29 @@ static int bgmac_mii_register(struct bgm
  
  	bgmac->mii_bus = mii_bus;
  
@@ -195,7 +195,7 @@
  	kfree(mii_bus->irq);
 --- a/drivers/net/ethernet/broadcom/bgmac.h
 +++ b/drivers/net/ethernet/broadcom/bgmac.h
-@@ -399,7 +399,10 @@ struct bgmac {
+@@ -401,7 +401,10 @@ struct bgmac {
  	struct bcma_device *cmn; /* Reference to CMN core for BCM4706 */
  	struct net_device *net_dev;
  	struct napi_struct napi;