1、引言
- 前段时间在mpsoc平台上验证了一下OpenAMP(R5裸跑,A53 Linux)。但是一直没有大块儿的时间整理成文档,又怕放的时间太久什么都记不得。只能在这里简单的做个笔记吧,如果你也恰好在研究这块知识希望可以给你一些帮助。
2、概述
-
OpenAMP实际上使用了IPI中断和共享内存的方式实现了异构系统之间的数据交互
-
如下图所示OpenAMP通过Libmetal来访问底层设备,中断,共享内存等。
-
Master Processor 和Remote Processor数据交互如下图所示
3、名词解释
- virtIO
- 用于多核共享内存数据交互接口
- remoteproc:
- 对远程处理器进行全生命周期管理(例如,可对r5程序进行加载或者停止)
echo test.elf > /sys/class/remoteproc/remoteproc0/firmware echo start > /sys/class/remoteproc/remoteproc0/state echo stop > /sys/class/remoteproc/remoteproc0/state
- remoteproc通过远程处理器发布的资源表信息分配系统资源(内存)并创建virtIO设备
- remoteproc可加载任意固件(*.elf),不仅限制与OpenAMP固件
- RPMsg:
- 该API允许AMP系统中独立内核上运行的软件之间进行进程间通信(IPC)
4、remote_resource_table
4.1 该表存放位值
- 弘定义
#define __section_t(S) __attribute__((__section__(#S)))
#define __resource __section_t(.resource_table)
- ld文件中的描述
.resource_table 0x3ed20000 : {
. = ALIGN(4);
*(.resource_table)
} > psu_ddr_S_AXI_BASEADDR
- __section_t详细解释
- https://blog.csdn.net/seven_feifei/article/details/95947358
4.2 内容
struct remote_resource_table {
unsigned int version;
unsigned int num;
unsigned int reserved[2];
unsigned int offset[NO_RESOURCE_ENTRIES];
/* rpmsg vdev entry */
struct fw_rsc_vdev rpmsg_vdev;
struct fw_rsc_vdev_vring rpmsg_vring0;
struct fw_rsc_vdev_vring rpmsg_vring1;
struct fw_rsc_trace rsc_trace;
}__attribute__((packed, aligned(0x100)));
- version
- 版本号
- num
- NUM_TABLE_ENTRIES = 2
- offset[0]
- rpmsg_vdev在remote_resource_table中的偏移地址
- offset[1]
- rsc_trace在remote_resource_table中的偏移地址
- rpmsg_vdev(virtio device header)
.rpmsg_vdev = {
.type = RSC_VDEV,
.id = VIRTIO_ID_RPMSG_,
.notifyid = 0,
.dfeatures = RPMSG_IPU_C0_FEATURES,
.gfeatures = 0,
.config_len = 0,
.status = 0,
.num_of_vrings = NUM_VRINGS,
.reserved = {0, 0},
.vring
}
- 该资源是一个virtio设备头:它提供有关vdev的信息,然后被主机及其对等远程远程进程用于协商和共享某些virtio属性。
- 通过提供此资源条目,固件本质上要求remoteproc在注册rproc时静态分配vdev(动态vdev分配尚不受支持)。
- type = RSC_VDEV
- 表明该设备支持virtio device,且将其作为virtio header
- id = 7
- virtio device id
- notifyid
- 对于remoteproc 每个virtio设备 对应唯一的notifyid(用于在远程启动时候通知remoteproc此virtio 设备状态已经改变)
- dfeatures = 0
- 指定固件支持的virtio设备功能
- gfeatures
- 是主机用来写回双方支持的协商功能的占位符
- config_len
- 此virtio设备配置空间长度
- 配置空间位于紧随此vdev头之后的资源表中也就是rpmsg_vring0和rpmsg_vring1
- 主机表示virtio progress状态的一个占位符
- num_of_vrings
- vring
- is an array of @num_of_vrings entries of ‘struct fw_rsc_vdev_vring’
- fw_rsc_vdev_vring(vring descriptor entry)
struct fw_rsc_vdev_vring {
uint32_t da;
uint32_t align;
uint32_t num;
uint32_t notifyid;
uint32_t reserved;
} METAL_PACKED_END;
- @da: device address
- @align: the alignment between the consumer and producer parts of the vring
- @num: num of buffers supported by this vring (must be power of two)
- @notifyid is a unique rproc-wide notify index for this vring. This notify index is used when kicking a remote remoteproc, to let it know that this vring is triggered.
- rsc_trace(trace buffer declaration)
struct fw_rsc_trace {
.type = RSC_TRACE;
.da = (unsigned int)rsc_trace_buf;
.len = sizeof(rsc_trace_buf);
.name[32] = "r5_trace";
} METAL_PACKED_END;
- .type = RSC_TRACE;
- 告知linux 远程 remoteproc准备把logs信息写入trace buffer
- 此资源条目提供有关远程remoteproc将向其中写入日志消息的跟踪缓冲区的主机信息。
- 在启动远程remoteproc后,trace buffer通过debugfs条目(称为trace0、trace1等)向用户公开。
5、R5裸跑源码讲解
5.1 init_system(void)
5.2 struct metal_init_params metal_param = METAL_INIT_DEFAULTS;
- 等同于
metal_param = {
.log_handler = metal_default_log_handler, \
.log_level = METAL_LOG_INFO, \
}
static struct {
char * c_buf;
unsigned int c_len;
unsigned int c_pos;
unsigned int c_cnt;
} circ;
5.3 shbuf_io = remoteproc_get_io_with_pa(rproc, SHARED_MEM_PA);
- 根据SHARED_MEM_PA,遍历rproc的mem连表,从中找到匹配的mem并返回metal_io_region *shbuf_io。
5.4 shbuf = metal_io_phys_to_virt(shbuf_io, SHARED_MEM_PA + SHARED_BUF_OFFSET);
- 获取share buff物理地址0xed4_8000
5.5 vdev = remoteproc_create_virtio(rproc, vdev_index, role, rst_cb);
- vdev = remoteproc_create_virtio(rproc, 0, 1, null);
5.6 vdev = rproc_virtio_create_vdev(role, notifyid,vdev_rsc, vdev_rsc_io, rproc,remoteproc_virtio_notify,rst_cb);
- rproc_virtio_create_vdev(1,0,vdev_rsc,tmpio,&rproc_inst,remoteproc_virtio_notify,null)
5.7 vq = virtqueue_allocate(num_extra_desc);
- 申请vq并初始化为0
5.8 rproc_virtio_wait_remote_ready(vdev);
5.8.1 status = rproc_virtio_get_status(vdev);
- 循环读取资源表中rpmsg_vdev->status状态,直到状态变为VIRTIO_CONFIG_STATUS_DRIVER_OK
#define VIRTIO_CONFIG_STATUS_ACK 0x01
#define VIRTIO_CONFIG_STATUS_DRIVER 0x02
#define VIRTIO_CONFIG_STATUS_DRIVER_OK 0x04
#define VIRTIO_CONFIG_STATUS_NEEDS_RESET 0x40
#define VIRTIO_CONFIG_STATUS_FAILED 0x80
- 推测资源表应该用于主机(A53)和远程(R5)状态交互,也就是说A53和R5都可从读写该片内存(是否通过IPI不确定)
5.9 zynqmp_r5_a53_proc_init(&rproc_inst,&zynqmp_r5_a53_proc_ops, &rproc_priv)
5.9.1 metal_device_open(“generic”,“ipi_dev”,&ipi_dev)
- a). metal_bus_find(“generic”, &bus)
- 遍历_metal.bus_list,寻找generic总线
- 若找到则*bus 指向该bus
- b) bus->ops.dev_open
- metal_generic_dev_open(&metal_generic_bus,“ipi_dev”,**device)
- 遍历_metal.dev_list,寻找ipi_dev设备
- 找到后打开该设备
- metal_generic_dev_sys_open(dev)
- metal_sys_io_mem_map(&ipi_regions)
- metal_machine_io_mem_map(&0xff31_0000,0xff31_0000,0x1000,DEVICE_NONSHARED | PRIV_RW_USER_RW)
- metal_generic_dev_open(&metal_generic_bus,“ipi_dev”,**device)
- c) 最终将ipi_dev指向ipi_device
5.9.2 metal_device_io_region(&ipe_device,0)
- rproc_priv->ipi_io point to (ipi_regions)
5.9.3 metal_irq_register(65, zynqmp_r5_a53_proc_irq_handler, &rproc_inst);
- a.metal_irq_get_controller(65)
- b.irqs[65]->arg = rporc_inst
- c.irqs[65]->hd = zynqmp_r5_a53_proc_irq_handler
5.9.4 metal_irq_enable(65)
- _metal_irq_set_enable(65,METAL_IRQ_ENABLE)
- metal_xlnx_irq_set_enbale(&xlnx_irq_cntr,65,METAL_IRQ_ENABLE)
- 实际上什么也没有操作
5.9.5 metal_io_write32(&ipi_regions,IPI_IER_OFFSET,0x1000_0000)
- metal_io_write(&ipi_regions,IPI_IER_OFFSET,0x0100_0000,__ATOMIC_SEQ_CST,4)
- metal_io_virt(&ipi_regions, IPI_IER_OFFSET)
- ptr point to 0xFF31_0018
- atomic_store_explicit();
- write 0x0100_0000 to 0xFF31_0018(RPU_0_IER)
- 也就是disable Channel7(PL0) 的recive 中断,其他通道中断均打开
- metal_io_virt(&ipi_regions, IPI_IER_OFFSET)
5.10 remoteproc_mmap(&rproc_inst, &pa, NULL, rsc_size,NORM_NSHARED_NCACHE|PRIV_RW_USER_RW, &rproc_inst.rsc_io);
5.10.1 remoteproc_get_mem(rproc, NULL, lpa, lda, NULL, rsc_size)
5.10.1.1 zynqmp_r5_a53_proc_mmap(&rproc_inst,&pa,METAL_BAD_PHYS,rsc_size,NORM_NSHARED_NCACHE|PRIV_RW_USER_RW,&rproc_inst.rsc_io)
- a.remoteproc_init_mem(mem,NULL,lpa,lda,size,tmpio)
- b.metal_io_init(&share_tmpio, (void *)lpa, &mem->pa, size,
sizeof(metal_phys_addr_t)<<3, attribute, NULL);- metal_machine_io_mem_map(&0x3e40_0000,share_mem.da,0x10_0000,NORM_SHARED_NCACHE | PRIV_RW_USER_RW)
5.11 platform_create_rpmsg_vdev(&rproc_inst,0,VIRTIO_DEV_SLAVE,NULL,NULL)
5.11.1 metal_io_phys_to_virt(&share_tmpio,0x3ED40000+0x8000)
- a.metal_io_virt(&share_tmpio,metal_io_phys_to_offset(&share_tmpio, 0x3ED40000+0x8000))
- metal_io_phys_to_offset(&share_tmpio, 0x3ED40000+0x8000)
- 返回值为0x8000
- metal_io_virt(&share_tmpio,0x8000)
- 返回值为0x3ed4_8000
- metal_io_phys_to_offset(&share_tmpio, 0x3ED40000+0x8000)
5.12 remoteproc_create_virtio(&rproc_inst, 0, VIRTIO_DEV_SLAVE, NULL)
- a. rproc_virtio_create_vdev(VIRTIO_DEV_SLAVE, 0,
&rpmsg_vdev_entry, &src_tmpio, &rproc_inst,
remoteproc_virtio_notify,NULL);- 创建了vdev、vq info等并建立了他们间的关系
5.12.1 rproc_virtio_wait_remote_ready(&rpvdev_vdev)
- rproc_virtio_get_status(&rpvdev_vdev)
- 获取主机状态
6、关系图
-
上面啰嗦说了一堆但是也只是一些代码片段,下面这张图才是真正的干货
-
原图可到我的资源里获取
7、linux下需要更新设备树
/ {
reserved-memory {
#address-cells = <2>;
#size-cells = <2>;
ranges;
rpu0vdev0vring0: rpu0vdev0vring0@3ed40000 {
no-map;
reg = <0x0 0x3ed40000 0x0 0x4000>;
};
rpu0vdev0vring1: rpu0vdev0vring1@3ed44000 {
no-map;
reg = <0x0 0x3ed44000 0x0 0x4000>;
};
rpu0vdev0buffer: rpu0vdev0buffer@3ed48000 {
no-map;
reg = <0x0 0x3ed48000 0x0 0x100000>;
};
rproc_0_reserved: rproc@3ed00000 {
no-map;
reg = <0x0 0x3ed00000 0x0 0x40000>;
};
};
zynqmp-rpu {
compatible = "xlnx,zynqmp-r5-remoteproc-1.0";
#address-cells = <2>;
#size-cells = <2>;
ranges;
core_conf = "split";
reg = <0x0 0xFF9A0000 0x0 0x10000>;
r5_0: r5@0 {
#address-cells = <2>;
#size-cells = <2>;
ranges;
memory-region = <&rproc_0_reserved>, <&rpu0vdev0buffer>, <&rpu0vdev0vring0>, <&rpu0vdev0vring1>;
pnode-id = <0x7>;
mboxes = <&ipi_mailbox_rpu0 0>, <&ipi_mailbox_rpu0 1>;
mbox-names = "tx", "rx";
tcm_0_a: tcm_0@0 {
reg = <0x0 0xFFE00000 0x0 0x10000>;
pnode-id = <0xf>;
};
tcm_0_b: tcm_0@1 {
reg = <0x0 0xFFE20000 0x0 0x10000>;
pnode-id = <0x10>;
};
};
};
zynqmp_ipi1 {
compatible = "xlnx,zynqmp-ipi-mailbox";
interrupt-parent = <&gic>;
interrupts = <0 29 4>;
xlnx,ipi-id = <7>;
#address-cells = <1>;
#size-cells = <1>;
ranges;
/* APU<->RPU0 IPI mailbox controller */
ipi_mailbox_rpu0: mailbox@ff990600 {
reg = <0xff990600 0x20>,
<0xff990620 0x20>,
<0xff9900c0 0x20>,
<0xff9900e0 0x20>;
reg-names = "local_request_region",
"local_response_region",
"remote_request_region",
"remote_response_region";
#mbox-cells = <1>;
xlnx,ipi-id = <1>;
};
};
};
- 这里需要多说一句,这里的ipi_id(7)与我在“Zynq UltraScale+ MPSoC IPI 通信”一文中描述的有出入,因为 APU IPI(0)被PMU FW占用了。
- 我在“Zynq UltraScale+ MPSoC IPI 通信”里详细描述了IPI中断的底层实现原理有需要的出门右转。
8、补充
- ug1186里详细描述OpenAMP Demos。介绍了R5裸跑如何工程创建,Linux应用程序如何创建,设备树如何修改。所以在这里我就不多说了。