rt-thread-official/bsp/raspberry-pi/raspi4-32/driver/drv_eth.c

685 lines
20 KiB
C
Raw Normal View History

/*
2021-06-22 10:17:22 +08:00
* Copyright (c) 2006-2020, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2020-10-30 bigmagic first version
*/
2020-10-30 18:12:30 +08:00
#include <stdint.h>
#include <string.h>
2021-09-11 18:09:22 +08:00
#include <rthw.h>
2020-10-30 18:12:30 +08:00
#include <rtthread.h>
2021-09-11 18:09:22 +08:00
#include "board.h"
2020-10-30 18:12:30 +08:00
#include <lwip/sys.h>
#include <netif/ethernetif.h>
#include "mbox.h"
#include "raspi4.h"
#include "drv_eth.h"
2021-08-03 11:26:46 +08:00
2020-11-26 11:20:13 +08:00
#define DBG_LEVEL DBG_LOG
#include <rtdbg.h>
#define LOG_TAG "drv.eth"
static int link_speed = 0;
static int link_flag = 0;
2021-08-03 11:26:46 +08:00
#define RECV_CACHE_BUF (1024)
#define SEND_CACHE_BUF (1024)
#define SEND_DATA_NO_CACHE (0x08200000)
#define RECV_DATA_NO_CACHE (0x08400000)
#define DMA_DISC_ADDR_SIZE (4 * 1024 *1024)
2020-10-30 18:12:30 +08:00
2021-08-03 11:26:46 +08:00
#define RX_DESC_BASE (MAC_REG + GENET_RX_OFF)
#define TX_DESC_BASE (MAC_REG + GENET_TX_OFF)
2020-10-30 18:12:30 +08:00
2020-11-03 08:58:05 +08:00
#define MAX_ADDR_LEN (6)
#define upper_32_bits(n) ((rt_uint32_t)(((n) >> 16) >> 16))
#define lower_32_bits(n) ((rt_uint32_t)(n))
#define BIT(nr) (1UL << (nr))
2020-10-30 18:12:30 +08:00
2020-11-26 11:20:13 +08:00
static rt_thread_t link_thread_tid = RT_NULL;
#define LINK_THREAD_STACK_SIZE (1024)
#define LINK_THREAD_PRIORITY (20)
#define LINK_THREAD_TIMESLICE (10)
2020-10-30 18:12:30 +08:00
static rt_uint32_t tx_index = 0;
static rt_uint32_t rx_index = 0;
static rt_uint32_t index_flag = 0;
2021-08-03 11:26:46 +08:00
static rt_uint32_t send_cache_pbuf[RECV_CACHE_BUF];
2020-10-30 18:12:30 +08:00
struct rt_eth_dev
{
struct eth_device parent;
rt_uint8_t dev_addr[MAX_ADDR_LEN];
char *name;
void *iobase;
int state;
int index;
struct rt_timer link_timer;
2021-08-03 11:26:46 +08:00
struct rt_timer rx_poll_timer;
2020-10-30 18:12:30 +08:00
void *priv;
};
static struct rt_eth_dev eth_dev;
2021-08-03 11:26:46 +08:00
static struct rt_semaphore sem_lock;
2020-11-26 11:20:13 +08:00
static struct rt_semaphore link_ack;
2020-10-30 18:12:30 +08:00
static inline rt_uint32_t read32(void *addr)
{
2021-09-11 18:09:22 +08:00
return (*((volatile unsigned int *)(addr)));
2020-10-30 18:12:30 +08:00
}
static inline void write32(void *addr, rt_uint32_t value)
{
2021-09-11 18:09:22 +08:00
(*((volatile unsigned int *)(addr))) = value;
2020-10-30 18:12:30 +08:00
}
2020-11-26 11:20:13 +08:00
static void eth_rx_irq(int irq, void *param)
2020-10-30 18:12:30 +08:00
{
2020-11-26 11:20:13 +08:00
rt_uint32_t val = 0;
2021-08-03 11:26:46 +08:00
val = read32(MAC_REG + GENET_INTRL2_CPU_STAT);
val &= ~read32(MAC_REG + GENET_INTRL2_CPU_STAT_MASK);
write32(MAC_REG + GENET_INTRL2_CPU_CLEAR, val);
2020-11-26 11:20:13 +08:00
if (val & GENET_IRQ_RXDMA_DONE)
{
eth_device_ready(&eth_dev.parent);
}
if (val & GENET_IRQ_TXDMA_DONE)
{
2021-08-03 11:26:46 +08:00
rt_sem_release(&sem_lock);
2020-11-26 11:20:13 +08:00
}
2020-10-30 18:12:30 +08:00
}
/* We only support RGMII (as used on the RPi4). */
static int bcmgenet_interface_set(void)
{
int phy_mode = PHY_INTERFACE_MODE_RGMII;
2020-11-26 11:20:13 +08:00
switch (phy_mode)
{
2020-10-30 18:12:30 +08:00
case PHY_INTERFACE_MODE_RGMII:
case PHY_INTERFACE_MODE_RGMII_RXID:
2021-08-03 11:26:46 +08:00
write32(MAC_REG + SYS_PORT_CTRL, PORT_MODE_EXT_GPHY);
2020-10-30 18:12:30 +08:00
break;
default:
2021-08-03 11:26:46 +08:00
rt_kprintf("unknown phy mode: %d\n", MAC_REG);
2020-10-30 18:12:30 +08:00
return -1;
}
return 0;
}
static void bcmgenet_umac_reset(void)
{
rt_uint32_t reg;
2021-08-03 11:26:46 +08:00
reg = read32(MAC_REG + SYS_RBUF_FLUSH_CTRL);
2020-10-30 18:12:30 +08:00
reg |= BIT(1);
2021-08-03 11:26:46 +08:00
write32((MAC_REG + SYS_RBUF_FLUSH_CTRL), reg);
2020-10-30 18:12:30 +08:00
reg &= ~BIT(1);
2021-08-03 11:26:46 +08:00
write32((MAC_REG + SYS_RBUF_FLUSH_CTRL), reg);
2020-10-30 18:12:30 +08:00
DELAY_MICROS(10);
2021-08-03 11:26:46 +08:00
write32((MAC_REG + SYS_RBUF_FLUSH_CTRL), 0);
2020-10-30 18:12:30 +08:00
DELAY_MICROS(10);
2021-08-03 11:26:46 +08:00
write32(MAC_REG + UMAC_CMD, 0);
write32(MAC_REG + UMAC_CMD, (CMD_SW_RESET | CMD_LCL_LOOP_EN));
2020-10-30 18:12:30 +08:00
DELAY_MICROS(2);
2021-08-03 11:26:46 +08:00
write32(MAC_REG + UMAC_CMD, 0);
2020-10-30 18:12:30 +08:00
/* clear tx/rx counter */
2021-08-03 11:26:46 +08:00
write32(MAC_REG + UMAC_MIB_CTRL, MIB_RESET_RX | MIB_RESET_TX | MIB_RESET_RUNT);
write32(MAC_REG + UMAC_MIB_CTRL, 0);
write32(MAC_REG + UMAC_MAX_FRAME_LEN, ENET_MAX_MTU_SIZE);
2020-10-30 18:12:30 +08:00
/* init rx registers, enable ip header optimization */
2021-08-03 11:26:46 +08:00
reg = read32(MAC_REG + RBUF_CTRL);
2020-10-30 18:12:30 +08:00
reg |= RBUF_ALIGN_2B;
2021-08-03 11:26:46 +08:00
write32(MAC_REG + RBUF_CTRL, reg);
write32(MAC_REG + RBUF_TBUF_SIZE_CTRL, 1);
2020-10-30 18:12:30 +08:00
}
static void bcmgenet_disable_dma(void)
{
rt_uint32_t tdma_reg = 0, rdma_reg = 0;
2021-08-03 11:26:46 +08:00
tdma_reg = read32(MAC_REG + TDMA_REG_BASE + DMA_CTRL);
2020-10-30 18:12:30 +08:00
tdma_reg &= ~(1UL << DMA_EN);
2021-08-03 11:26:46 +08:00
write32(MAC_REG + TDMA_REG_BASE + DMA_CTRL, tdma_reg);
rdma_reg = read32(MAC_REG + RDMA_REG_BASE + DMA_CTRL);
2020-10-30 18:12:30 +08:00
rdma_reg &= ~(1UL << DMA_EN);
2021-08-03 11:26:46 +08:00
write32(MAC_REG + RDMA_REG_BASE + DMA_CTRL, rdma_reg);
write32(MAC_REG + UMAC_TX_FLUSH, 1);
2020-10-30 18:12:30 +08:00
DELAY_MICROS(100);
2021-08-03 11:26:46 +08:00
write32(MAC_REG + UMAC_TX_FLUSH, 0);
2020-10-30 18:12:30 +08:00
}
static void bcmgenet_enable_dma(void)
{
rt_uint32_t reg = 0;
rt_uint32_t dma_ctrl = 0;
dma_ctrl = (1 << (DEFAULT_Q + DMA_RING_BUF_EN_SHIFT)) | DMA_EN;
2021-08-03 11:26:46 +08:00
write32(MAC_REG + TDMA_REG_BASE + DMA_CTRL, dma_ctrl);
2020-10-30 18:12:30 +08:00
2021-08-03 11:26:46 +08:00
reg = read32(MAC_REG + RDMA_REG_BASE + DMA_CTRL);
write32(MAC_REG + RDMA_REG_BASE + DMA_CTRL, dma_ctrl | reg);
2020-10-30 18:12:30 +08:00
}
static int bcmgenet_mdio_write(rt_uint32_t addr, rt_uint32_t reg, rt_uint32_t value)
{
int count = 10000;
rt_uint32_t val;
2020-11-26 11:20:13 +08:00
val = MDIO_WR | (addr << MDIO_PMD_SHIFT) | (reg << MDIO_REG_SHIFT) | (0xffff & value);
2021-08-03 11:26:46 +08:00
write32(MAC_REG + MDIO_CMD, val);
2020-10-30 18:12:30 +08:00
2021-08-03 11:26:46 +08:00
rt_uint32_t reg_val = read32(MAC_REG + MDIO_CMD);
2020-10-30 18:12:30 +08:00
reg_val = reg_val | MDIO_START_BUSY;
2021-08-03 11:26:46 +08:00
write32(MAC_REG + MDIO_CMD, reg_val);
2020-10-30 18:12:30 +08:00
2021-08-03 11:26:46 +08:00
while ((read32(MAC_REG + MDIO_CMD) & MDIO_START_BUSY) && (--count))
2020-10-30 18:12:30 +08:00
DELAY_MICROS(1);
2021-08-03 11:26:46 +08:00
reg_val = read32(MAC_REG + MDIO_CMD);
2020-10-30 18:12:30 +08:00
return reg_val & 0xffff;
}
static int bcmgenet_mdio_read(rt_uint32_t addr, rt_uint32_t reg)
{
int count = 10000;
rt_uint32_t val = 0;
rt_uint32_t reg_val = 0;
val = MDIO_RD | (addr << MDIO_PMD_SHIFT) | (reg << MDIO_REG_SHIFT);
2021-08-03 11:26:46 +08:00
write32(MAC_REG + MDIO_CMD, val);
2020-10-30 18:12:30 +08:00
2021-08-03 11:26:46 +08:00
reg_val = read32(MAC_REG + MDIO_CMD);
2020-10-30 18:12:30 +08:00
reg_val = reg_val | MDIO_START_BUSY;
2021-08-03 11:26:46 +08:00
write32(MAC_REG + MDIO_CMD, reg_val);
2020-10-30 18:12:30 +08:00
2021-08-03 11:26:46 +08:00
while ((read32(MAC_REG + MDIO_CMD) & MDIO_START_BUSY) && (--count))
2020-10-30 18:12:30 +08:00
DELAY_MICROS(1);
2021-08-03 11:26:46 +08:00
reg_val = read32(MAC_REG + MDIO_CMD);
2020-10-30 18:12:30 +08:00
2020-11-26 11:20:13 +08:00
return reg_val & 0xffff;
2020-10-30 18:12:30 +08:00
}
static int bcmgenet_gmac_write_hwaddr(void)
{
2021-08-03 11:26:46 +08:00
//{0xdc,0xa6,0x32,0x28,0x22,0x50};
2020-10-30 18:12:30 +08:00
rt_uint8_t addr[6];
rt_uint32_t reg;
bcm271x_mbox_hardware_get_mac_address(&addr[0]);
reg = addr[0] << 24 | addr[1] << 16 | addr[2] << 8 | addr[3];
2021-08-03 11:26:46 +08:00
write32(MAC_REG + UMAC_MAC0, reg);
2020-10-30 18:12:30 +08:00
reg = addr[4] << 8 | addr[5];
2021-08-03 11:26:46 +08:00
write32(MAC_REG + UMAC_MAC1, reg);
2020-10-30 18:12:30 +08:00
return 0;
}
2020-11-03 08:58:05 +08:00
static int get_ethernet_uid(void)
2020-10-30 18:12:30 +08:00
{
rt_uint32_t uid_high = 0;
rt_uint32_t uid_low = 0;
rt_uint32_t uid = 0;
uid_high = bcmgenet_mdio_read(1, BCM54213PE_PHY_IDENTIFIER_HIGH);
uid_low = bcmgenet_mdio_read(1, BCM54213PE_PHY_IDENTIFIER_LOW);
uid = (uid_high << 16 | uid_low);
2020-11-26 11:20:13 +08:00
if (BCM54213PE_VERSION_B1 == uid)
2020-10-30 18:12:30 +08:00
{
2020-11-26 11:20:13 +08:00
LOG_I("version is B1\n");
2020-10-30 18:12:30 +08:00
}
return uid;
}
static void bcmgenet_mdio_init(void)
{
rt_uint32_t ret = 0;
/*get ethernet uid*/
ret = get_ethernet_uid();
2021-08-03 11:26:46 +08:00
if (ret == 0)
{
return;
}
2020-10-30 18:12:30 +08:00
/* reset phy */
bcmgenet_mdio_write(1, BCM54213PE_MII_CONTROL, MII_CONTROL_PHY_RESET);
/* read control reg */
bcmgenet_mdio_read(1, BCM54213PE_MII_CONTROL);
/* reset phy again */
bcmgenet_mdio_write(1, BCM54213PE_MII_CONTROL, MII_CONTROL_PHY_RESET);
/* read control reg */
bcmgenet_mdio_read(1, BCM54213PE_MII_CONTROL);
/* read status reg */
bcmgenet_mdio_read(1, BCM54213PE_MII_STATUS);
/* read status reg */
bcmgenet_mdio_read(1, BCM54213PE_IEEE_EXTENDED_STATUS);
bcmgenet_mdio_read(1, BCM54213PE_AUTO_NEGOTIATION_ADV);
2020-11-26 11:20:13 +08:00
2020-10-30 18:12:30 +08:00
bcmgenet_mdio_read(1, BCM54213PE_MII_STATUS);
bcmgenet_mdio_read(1, BCM54213PE_CONTROL);
/* half full duplex capability */
bcmgenet_mdio_write(1, BCM54213PE_CONTROL, (CONTROL_HALF_DUPLEX_CAPABILITY | CONTROL_FULL_DUPLEX_CAPABILITY));
bcmgenet_mdio_read(1, BCM54213PE_MII_CONTROL);
2020-11-26 11:20:13 +08:00
2020-10-30 18:12:30 +08:00
/* set mii control */
2020-11-26 11:20:13 +08:00
bcmgenet_mdio_write(1, BCM54213PE_MII_CONTROL, (MII_CONTROL_AUTO_NEGOTIATION_ENABLED | MII_CONTROL_AUTO_NEGOTIATION_RESTART | MII_CONTROL_PHY_FULL_DUPLEX | MII_CONTROL_SPEED_SELECTION));
2020-10-30 18:12:30 +08:00
}
static void rx_ring_init(void)
{
2021-08-03 11:26:46 +08:00
write32(MAC_REG + RDMA_REG_BASE + DMA_SCB_BURST_SIZE, DMA_MAX_BURST_LENGTH);
write32(MAC_REG + RDMA_RING_REG_BASE + DMA_START_ADDR, 0x0);
write32(MAC_REG + RDMA_READ_PTR, 0x0);
write32(MAC_REG + RDMA_WRITE_PTR, 0x0);
write32(MAC_REG + RDMA_RING_REG_BASE + DMA_END_ADDR, RX_DESCS * DMA_DESC_SIZE / 4 - 1);
write32(MAC_REG + RDMA_PROD_INDEX, 0x0);
write32(MAC_REG + RDMA_CONS_INDEX, 0x0);
write32(MAC_REG + RDMA_RING_REG_BASE + DMA_RING_BUF_SIZE, (RX_DESCS << DMA_RING_SIZE_SHIFT) | RX_BUF_LENGTH);
write32(MAC_REG + RDMA_XON_XOFF_THRESH, DMA_FC_THRESH_VALUE);
write32(MAC_REG + RDMA_REG_BASE + DMA_RING_CFG, 1 << DEFAULT_Q);
2020-10-30 18:12:30 +08:00
}
static void tx_ring_init(void)
{
2021-08-03 11:26:46 +08:00
write32(MAC_REG + TDMA_REG_BASE + DMA_SCB_BURST_SIZE, DMA_MAX_BURST_LENGTH);
write32(MAC_REG + TDMA_RING_REG_BASE + DMA_START_ADDR, 0x0);
write32(MAC_REG + TDMA_READ_PTR, 0x0);
write32(MAC_REG + TDMA_READ_PTR, 0x0);
write32(MAC_REG + TDMA_READ_PTR, 0x0);
write32(MAC_REG + TDMA_WRITE_PTR, 0x0);
write32(MAC_REG + TDMA_RING_REG_BASE + DMA_END_ADDR, TX_DESCS * DMA_DESC_SIZE / 4 - 1);
write32(MAC_REG + TDMA_PROD_INDEX, 0x0);
write32(MAC_REG + TDMA_CONS_INDEX, 0x0);
write32(MAC_REG + TDMA_RING_REG_BASE + DMA_MBUF_DONE_THRESH, 0x1);
write32(MAC_REG + TDMA_FLOW_PERIOD, 0x0);
write32(MAC_REG + TDMA_RING_REG_BASE + DMA_RING_BUF_SIZE, (TX_DESCS << DMA_RING_SIZE_SHIFT) | RX_BUF_LENGTH);
write32(MAC_REG + TDMA_REG_BASE + DMA_RING_CFG, 1 << DEFAULT_Q);
2020-10-30 18:12:30 +08:00
}
static void rx_descs_init(void)
{
char *rxbuffs = (char *)RECV_DATA_NO_CACHE;
rt_uint32_t len_stat, i;
void *desc_base = (void *)RX_DESC_BASE;
len_stat = (RX_BUF_LENGTH << DMA_BUFLENGTH_SHIFT) | DMA_OWN;
2020-11-26 11:20:13 +08:00
for (i = 0; i < RX_DESCS; i++)
2020-10-30 18:12:30 +08:00
{
2020-11-26 11:20:13 +08:00
write32((desc_base + i * DMA_DESC_SIZE + DMA_DESC_ADDRESS_LO), lower_32_bits((uintptr_t)&rxbuffs[i * RX_BUF_LENGTH]));
write32((desc_base + i * DMA_DESC_SIZE + DMA_DESC_ADDRESS_HI), upper_32_bits((uintptr_t)&rxbuffs[i * RX_BUF_LENGTH]));
write32((desc_base + i * DMA_DESC_SIZE + DMA_DESC_LENGTH_STATUS), len_stat);
2020-10-30 18:12:30 +08:00
}
}
2020-11-03 08:58:05 +08:00
static int bcmgenet_adjust_link(void)
2020-10-30 18:12:30 +08:00
{
rt_uint32_t speed;
2020-11-26 11:20:13 +08:00
rt_uint32_t phy_dev_speed = link_speed;
switch (phy_dev_speed)
{
2020-10-30 18:12:30 +08:00
case SPEED_1000:
speed = UMAC_SPEED_1000;
break;
case SPEED_100:
speed = UMAC_SPEED_100;
break;
case SPEED_10:
speed = UMAC_SPEED_10;
break;
default:
rt_kprintf("bcmgenet: Unsupported PHY speed: %d\n", phy_dev_speed);
return -1;
}
2021-08-03 11:26:46 +08:00
rt_uint32_t reg1 = read32(MAC_REG + EXT_RGMII_OOB_CTRL);
2020-10-30 18:12:30 +08:00
//reg1 &= ~(1UL << OOB_DISABLE);
//rt_kprintf("OOB_DISABLE is %d\n", OOB_DISABLE);
reg1 |= (RGMII_LINK | RGMII_MODE_EN | ID_MODE_DIS);
2021-08-03 11:26:46 +08:00
write32(MAC_REG + EXT_RGMII_OOB_CTRL, reg1);
2020-10-30 18:12:30 +08:00
DELAY_MICROS(1000);
2021-08-03 11:26:46 +08:00
write32(MAC_REG + UMAC_CMD, speed << CMD_SPEED_SHIFT);
2020-10-30 18:12:30 +08:00
return 0;
}
2020-11-26 11:20:13 +08:00
void link_irq(void *param)
{
if ((bcmgenet_mdio_read(1, BCM54213PE_MII_STATUS) & MII_STATUS_LINK_UP) != 0)
{
rt_sem_release(&link_ack);
}
}
2020-10-30 18:12:30 +08:00
static int bcmgenet_gmac_eth_start(void)
{
rt_uint32_t ret;
rt_uint32_t count = 10000;
bcmgenet_umac_reset();
bcmgenet_gmac_write_hwaddr();
/* Disable RX/TX DMA and flush TX queues */
bcmgenet_disable_dma();
rx_ring_init();
rx_descs_init();
tx_ring_init();
/* Enable RX/TX DMA */
bcmgenet_enable_dma();
/* Update MAC registers based on PHY property */
ret = bcmgenet_adjust_link();
2021-09-11 18:09:22 +08:00
if (ret)
2020-11-26 11:20:13 +08:00
{
2020-10-30 18:12:30 +08:00
rt_kprintf("bcmgenet: adjust PHY link failed: %d\n", ret);
return ret;
}
/* wait tx index clear */
2021-08-03 11:26:46 +08:00
while ((read32(MAC_REG + TDMA_CONS_INDEX) != 0) && (--count))
2020-11-26 11:20:13 +08:00
DELAY_MICROS(1);
2020-10-30 18:12:30 +08:00
2021-08-03 11:26:46 +08:00
tx_index = read32(MAC_REG + TDMA_CONS_INDEX);
write32(MAC_REG + TDMA_PROD_INDEX, tx_index);
2020-10-30 18:12:30 +08:00
2021-08-03 11:26:46 +08:00
index_flag = read32(MAC_REG + RDMA_PROD_INDEX);
2020-10-30 18:12:30 +08:00
2021-06-22 10:17:22 +08:00
rx_index = index_flag % RX_DESCS;
2020-10-30 18:12:30 +08:00
2021-08-03 11:26:46 +08:00
write32(MAC_REG + RDMA_CONS_INDEX, index_flag);
write32(MAC_REG + RDMA_PROD_INDEX, index_flag);
2020-10-30 18:12:30 +08:00
/* Enable Rx/Tx */
rt_uint32_t rx_tx_en;
2021-08-03 11:26:46 +08:00
rx_tx_en = read32(MAC_REG + UMAC_CMD);
2020-10-30 18:12:30 +08:00
2021-08-03 11:26:46 +08:00
rx_tx_en |= (CMD_TX_EN | CMD_RX_EN);
2021-06-22 10:17:22 +08:00
2021-08-03 11:26:46 +08:00
write32(MAC_REG + UMAC_CMD, rx_tx_en);
//IRQ
write32(MAC_REG + GENET_INTRL2_CPU_CLEAR_MASK, GENET_IRQ_TXDMA_DONE | GENET_IRQ_RXDMA_DONE);
2020-10-30 18:12:30 +08:00
return 0;
}
static rt_uint32_t prev_recv_cnt = 0;
static rt_uint32_t cur_recv_cnt = 0;
static rt_uint32_t bcmgenet_gmac_eth_recv(rt_uint8_t **packetp)
{
2021-09-11 18:09:22 +08:00
void *desc_base;
2020-10-30 18:12:30 +08:00
rt_uint32_t length = 0, addr = 0;
2021-08-03 11:26:46 +08:00
rt_uint32_t prod_index = read32(MAC_REG + RDMA_PROD_INDEX);
2021-09-11 18:09:22 +08:00
if (prod_index == index_flag)
2020-10-30 18:12:30 +08:00
{
cur_recv_cnt = index_flag;
2020-11-26 11:20:13 +08:00
index_flag = 0x7fffffff;
2021-08-03 11:26:46 +08:00
/* no buff */
2020-10-30 18:12:30 +08:00
return 0;
}
else
{
2021-09-11 18:09:22 +08:00
if (prev_recv_cnt == (prod_index & 0xffff))
2020-10-30 18:12:30 +08:00
{
return 0;
}
2020-11-26 11:20:13 +08:00
2020-10-30 18:12:30 +08:00
desc_base = RX_DESC_BASE + rx_index * DMA_DESC_SIZE;
length = read32(desc_base + DMA_DESC_LENGTH_STATUS);
length = (length >> DMA_BUFLENGTH_SHIFT) & DMA_BUFLENGTH_MASK;
addr = read32(desc_base + DMA_DESC_ADDRESS_LO);
2021-09-11 18:09:22 +08:00
2020-10-30 18:12:30 +08:00
/* To cater for the IP headepr alignment the hardware does.
2021-09-11 18:09:22 +08:00
* This would actually not be needed if we don't program
* RBUF_ALIGN_2B
*/
rt_hw_cpu_dcache_ops(RT_HW_CACHE_INVALIDATE, (void *) addr, length);
2020-10-30 18:12:30 +08:00
*packetp = (rt_uint8_t *)(addr + RX_BUF_OFFSET);
rx_index = rx_index + 1;
2021-09-11 18:09:22 +08:00
if (rx_index >= RX_DESCS)
2020-10-30 18:12:30 +08:00
{
rx_index = 0;
}
2021-08-03 11:26:46 +08:00
write32(MAC_REG + RDMA_CONS_INDEX, cur_recv_cnt);
2020-10-30 18:12:30 +08:00
cur_recv_cnt = cur_recv_cnt + 1;
2020-11-26 11:20:13 +08:00
2021-09-11 18:09:22 +08:00
if (cur_recv_cnt > 0xffff)
2020-11-26 11:20:13 +08:00
{
cur_recv_cnt = 0;
}
2020-10-30 18:12:30 +08:00
prev_recv_cnt = cur_recv_cnt;
2021-08-03 11:26:46 +08:00
return length;
2020-10-30 18:12:30 +08:00
}
}
2021-08-03 11:26:46 +08:00
static int bcmgenet_gmac_eth_send(void *packet, int length)
2020-10-30 18:12:30 +08:00
{
2020-11-26 11:20:13 +08:00
void *desc_base = (TX_DESC_BASE + tx_index * DMA_DESC_SIZE);
2020-10-30 18:12:30 +08:00
rt_uint32_t len_stat = length << DMA_BUFLENGTH_SHIFT;
2021-06-22 10:17:22 +08:00
2021-09-11 18:09:22 +08:00
rt_uint32_t prod_index;
2020-10-30 18:12:30 +08:00
2021-08-03 11:26:46 +08:00
prod_index = read32(MAC_REG + TDMA_PROD_INDEX);
2021-06-22 10:17:22 +08:00
2021-08-03 11:26:46 +08:00
len_stat |= 0x3F << DMA_TX_QTAG_SHIFT;
len_stat |= DMA_TX_APPEND_CRC | DMA_SOP | DMA_EOP;
2021-09-11 18:09:22 +08:00
rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, (void *)packet, length);
write32((desc_base + DMA_DESC_ADDRESS_LO), (rt_uint32_t)packet);
2020-11-26 11:20:13 +08:00
write32((desc_base + DMA_DESC_ADDRESS_HI), 0);
write32((desc_base + DMA_DESC_LENGTH_STATUS), len_stat);
2021-08-03 11:26:46 +08:00
tx_index = tx_index + 1;
2021-06-22 10:17:22 +08:00
prod_index = prod_index + 1;
2020-10-30 18:12:30 +08:00
2021-08-03 11:26:46 +08:00
if (prod_index == 0xe000)
2020-10-30 18:12:30 +08:00
{
2021-08-03 11:26:46 +08:00
write32(MAC_REG + TDMA_PROD_INDEX, 0);
2021-06-22 10:17:22 +08:00
prod_index = 0;
2020-10-30 18:12:30 +08:00
}
2020-11-26 11:20:13 +08:00
2021-08-03 11:26:46 +08:00
if (tx_index >= TX_DESCS)
{
tx_index = 0;
}
2020-10-30 18:12:30 +08:00
/* Start Transmisson */
2021-08-03 11:26:46 +08:00
write32(MAC_REG + TDMA_PROD_INDEX, prod_index);
rt_sem_take(&sem_lock, RT_WAITING_FOREVER);
2020-10-30 18:12:30 +08:00
return 0;
}
2020-11-26 11:20:13 +08:00
static void link_task_entry(void *param)
2020-10-30 18:12:30 +08:00
{
2020-11-26 11:20:13 +08:00
struct eth_device *eth_device = (struct eth_device *)param;
2020-10-30 18:12:30 +08:00
RT_ASSERT(eth_device != RT_NULL);
2020-11-26 11:20:13 +08:00
struct rt_eth_dev *dev = &eth_dev;
//start mdio
bcmgenet_mdio_init();
//start timer link
rt_timer_init(&dev->link_timer, "link_timer",
link_irq,
NULL,
100,
RT_TIMER_FLAG_PERIODIC);
rt_timer_start(&dev->link_timer);
//link wait forever
rt_sem_take(&link_ack, RT_WAITING_FOREVER);
eth_device_linkchange(&eth_dev.parent, RT_TRUE); //link up
rt_timer_stop(&dev->link_timer);
//set mac
2021-08-03 11:26:46 +08:00
bcmgenet_gmac_write_hwaddr();
2020-11-26 11:20:13 +08:00
bcmgenet_gmac_write_hwaddr();
//check link speed
if ((bcmgenet_mdio_read(1, BCM54213PE_STATUS) & (1 << 10)) || (bcmgenet_mdio_read(1, BCM54213PE_STATUS) & (1 << 11)))
{
link_speed = 1000;
rt_kprintf("Support link mode Speed 1000M\n");
}
else if ((bcmgenet_mdio_read(1, 0x05) & (1 << 7)) || (bcmgenet_mdio_read(1, 0x05) & (1 << 8)) || (bcmgenet_mdio_read(1, 0x05) & (1 << 9)))
{
link_speed = 100;
rt_kprintf("Support link mode Speed 100M\n");
}
else
{
link_speed = 10;
rt_kprintf("Support link mode Speed 10M\n");
}
bcmgenet_gmac_eth_start();
rt_hw_interrupt_install(ETH_IRQ, eth_rx_irq, NULL, "eth_irq");
rt_hw_interrupt_umask(ETH_IRQ);
link_flag = 1;
}
static rt_err_t bcmgenet_eth_init(rt_device_t device)
{
2020-10-30 18:12:30 +08:00
rt_uint32_t ret = 0;
rt_uint32_t hw_reg = 0;
2020-11-26 11:20:13 +08:00
2020-10-30 18:12:30 +08:00
/* Read GENET HW version */
rt_uint8_t major = 0;
2021-08-03 11:26:46 +08:00
hw_reg = read32(MAC_REG + SYS_REV_CTRL);
2020-10-30 18:12:30 +08:00
major = (hw_reg >> 24) & 0x0f;
2020-11-26 11:20:13 +08:00
if (major != 6)
{
2020-10-30 18:12:30 +08:00
if (major == 5)
major = 4;
else if (major == 0)
major = 1;
rt_kprintf("Uns upported GENETv%d.%d\n", major, (hw_reg >> 16) & 0x0f);
return -RT_ERROR;
2020-10-30 18:12:30 +08:00
}
/* set interface */
ret = bcmgenet_interface_set();
if (ret)
{
return ret;
2020-11-26 11:20:13 +08:00
}
2020-10-30 18:12:30 +08:00
/* rbuf clear */
2021-08-03 11:26:46 +08:00
write32(MAC_REG + SYS_RBUF_FLUSH_CTRL, 0);
2020-10-30 18:12:30 +08:00
/* disable MAC while updating its registers */
2021-08-03 11:26:46 +08:00
write32(MAC_REG + UMAC_CMD, 0);
2020-10-30 18:12:30 +08:00
/* issue soft reset with (rg)mii loopback to ensure a stable rxclk */
2021-08-03 11:26:46 +08:00
write32(MAC_REG + UMAC_CMD, CMD_SW_RESET | CMD_LCL_LOOP_EN);
2020-10-30 18:12:30 +08:00
2020-11-26 11:20:13 +08:00
link_thread_tid = rt_thread_create("link", link_task_entry, (void *)device,
LINK_THREAD_STACK_SIZE,
LINK_THREAD_PRIORITY, LINK_THREAD_TIMESLICE);
if (link_thread_tid != RT_NULL)
rt_thread_startup(link_thread_tid);
2020-10-30 18:12:30 +08:00
return RT_EOK;
}
static rt_err_t bcmgenet_eth_control(rt_device_t dev, int cmd, void *args)
{
switch (cmd)
{
case NIOCTL_GADDR:
2020-11-26 11:20:13 +08:00
if (args)
rt_memcpy(args, eth_dev.dev_addr, 6);
else
return -RT_ERROR;
2020-10-30 18:12:30 +08:00
break;
2020-11-26 11:20:13 +08:00
default:
2020-10-30 18:12:30 +08:00
break;
}
return RT_EOK;
}
rt_err_t rt_eth_tx(rt_device_t device, struct pbuf *p)
{
2021-08-03 11:26:46 +08:00
rt_uint32_t sendbuf = (rt_uint32_t)SEND_DATA_NO_CACHE + (rt_uint32_t)(tx_index * SEND_CACHE_BUF);
/* lock eth device */
2020-11-26 11:20:13 +08:00
if (link_flag == 1)
{
2021-08-03 11:26:46 +08:00
pbuf_copy_partial(p, (void *)&send_cache_pbuf[0], p->tot_len, 0);
rt_memcpy((void *)sendbuf, send_cache_pbuf, p->tot_len);
bcmgenet_gmac_eth_send((void *)sendbuf, p->tot_len);
2020-11-26 11:20:13 +08:00
}
2020-10-30 18:12:30 +08:00
return RT_EOK;
}
struct pbuf *rt_eth_rx(rt_device_t device)
{
int recv_len = 0;
2021-08-03 11:26:46 +08:00
rt_uint8_t *addr_point = RT_NULL;
2020-10-30 18:12:30 +08:00
struct pbuf *pbuf = RT_NULL;
2020-11-26 11:20:13 +08:00
if (link_flag == 1)
2020-10-30 18:12:30 +08:00
{
2021-08-03 11:26:46 +08:00
recv_len = bcmgenet_gmac_eth_recv((rt_uint8_t **)&addr_point);
2020-11-26 11:20:13 +08:00
if (recv_len > 0)
{
pbuf = pbuf_alloc(PBUF_LINK, recv_len, PBUF_RAM);
2021-09-11 18:09:22 +08:00
if (pbuf)
2021-08-03 11:26:46 +08:00
{
rt_memcpy(pbuf->payload, addr_point, recv_len);
}
2020-11-26 11:20:13 +08:00
}
2020-10-30 18:12:30 +08:00
}
return pbuf;
}
int rt_hw_eth_init(void)
{
rt_uint8_t mac_addr[6];
2021-08-03 11:26:46 +08:00
rt_sem_init(&sem_lock, "eth_send_lock", TX_DESCS, RT_IPC_FLAG_FIFO);
2020-11-26 11:20:13 +08:00
rt_sem_init(&link_ack, "link_ack", 0, RT_IPC_FLAG_FIFO);
2021-08-03 11:26:46 +08:00
2020-10-30 18:12:30 +08:00
memset(&eth_dev, 0, sizeof(eth_dev));
2021-08-03 11:26:46 +08:00
memset((void *)SEND_DATA_NO_CACHE, 0, DMA_DISC_ADDR_SIZE);
memset((void *)RECV_DATA_NO_CACHE, 0, DMA_DISC_ADDR_SIZE);
2020-10-30 18:12:30 +08:00
bcm271x_mbox_hardware_get_mac_address(&mac_addr[0]);
2021-08-03 11:26:46 +08:00
eth_dev.iobase = MAC_REG;
2020-10-30 18:12:30 +08:00
eth_dev.name = "e0";
eth_dev.dev_addr[0] = mac_addr[0];
eth_dev.dev_addr[1] = mac_addr[1];
eth_dev.dev_addr[2] = mac_addr[2];
eth_dev.dev_addr[3] = mac_addr[3];
eth_dev.dev_addr[4] = mac_addr[4];
eth_dev.dev_addr[5] = mac_addr[5];
eth_dev.parent.parent.type = RT_Device_Class_NetIf;
eth_dev.parent.parent.init = bcmgenet_eth_init;
eth_dev.parent.parent.open = RT_NULL;
eth_dev.parent.parent.close = RT_NULL;
eth_dev.parent.parent.read = RT_NULL;
eth_dev.parent.parent.write = RT_NULL;
eth_dev.parent.parent.control = bcmgenet_eth_control;
eth_dev.parent.parent.user_data = RT_NULL;
eth_dev.parent.eth_tx = rt_eth_tx;
eth_dev.parent.eth_rx = rt_eth_rx;
eth_device_init(&(eth_dev.parent), "e0");
2020-11-26 11:20:13 +08:00
eth_device_linkchange(&eth_dev.parent, RT_FALSE); //link down
2020-10-30 18:12:30 +08:00
return 0;
}
INIT_COMPONENT_EXPORT(rt_hw_eth_init);