该方案实现在STM32上建立一个TCP服务端,并可以接受最多5个客户端的同时连接
思路:
建立一个结构体,该结构体包括5个netconn,每个netconn对应一个标签和一个状态标志位,标签从0~4,标志位初始状态为0,有连接建立后置为1表示该连接已建立 ,连接断开后重新置为0。
首先在一个线程(svr_task)里启动服务端监听,并处理客户端的连接请求,有客户端连接时,首先检查是否有state为0的netconn,有的话,就使用该netconn来处理该连接,并新开一个线程(tcp_server_thread)来处理数据接收,同时将state置为1,在tcp_server_thread中检测到该连接断开时,中止并销毁该线程,并将对应的state置为0。
下面为部分实现代码:
首先定义结构体:
#define CLIENTMAX 5
typedef struct{
struct netconn *client[CLIENTMAX];
uint8_t state[CLIENTMAX];
osThreadId client_taskid[CLIENTMAX];
Controller_StatusDef realdatastatus[CLIENTMAX];
}client_ad;
client_ad clientad;
基中:
client_taskid记录该连接请求建立的数据处理线程的线程id;
client_ad.Controller_StatusDef是用来标识该连接请求下的数据处理状态的,以免其中一个客户端的请求数据状态影响到其他客户端的通讯,所以需要单独记录,与本文无关。
然后,定义服务端启动和连接请求处理线程:
void svr_task(void const *arg)
{
err_t oserr;
struct netconn *conn,*newconn;
while(!isLWIPInitSuccess)
osDelay(100);
for(int i=0;i<CLIENTMAX;i++)
{
clientad.state[i]=0;
clientad.realdatastatus[i]=RealPass_Stop;
}
conn = netconn_new(NETCONN_TCP);
netconn_bind(conn,IP_ADDR_ANY,sysParameterStruct._Para_ControlIP_PORT);
netconn_listen(conn);
sprintf((char*)iptxt, "%d", sysParameterStruct._Para_ControlIP_PORT);
UsrLog("tcp start listen on port: %s",iptxt);
while(1)
{
uint8_t clientnumindex=0;
if(netconn_accept(conn,&newconn) == ERR_OK)
{
if(client_init((void *)newconn,&clientnumindex) != ERR_OK)
{
netconn_close(newconn);
netconn_delete(newconn);
}
else
{
clientad.client[clientnumindex]=newconn;
clientad.state[clientnumindex]=1;
clientad.client_taskid[clientnumindex] = osThreadCreate(osThread(myTaskClient), (void *)clientnumindex);
if(clientad.client_taskid[clientnumindex]==NULL)
{
UsrLog("Failed to create the recv thread with id: %d",clientnumindex);
}
else
{
UsrLog("create the recv thread with id: %x",(int)(clientad.client_taskid[clientnumindex]));
}
}
}
osDelay(100);
}
}
err_t client_init(void *arg,uint8_t* clientnumindex)
{
uint8_t clientnum;
err_t err;
uint8_t ifreceiveclient=0;
for(clientnum=0;clientnum<CLIENTMAX;clientnum++)
{
if(clientad.state[clientnum]==0)
{
ifreceiveclient=1;
break;
}
}
if(ifreceiveclient==0)
{
UsrLog("This no space to accept client");
return 1;
}
*clientnumindex=clientnum;
return ERR_OK;
}
基中client_init用来判断 是否还有未用的资源用来处理连接请求
然后定义数据处理线程:
void tcp_server_thread(void const *arg)
{
err_t err;
struct netbuf *clientrecvbuf;
Controller_AckDataDef recvframe;
Controller_AckDataDef ackframe;
uint8_t recvdata[40];
uint8_t ackdata[80];
/*recv tcp data buf*/
ackframe.originaldata_head=0x06;
ackframe.originaldata_address=sysParameterStruct._Para_ControlEquiAddress;
ackframe.originaldata_end=0x03;
ackframe.originaldata_data=ackdata;
ip_addr_t addr; u16_t port;
/* get remote IP address and port*/
uint8_t clientindex=(uint8_t)arg;
if((err=netconn_getaddr(clientad.client[clientindex],&addr, &port, 0))==ERR_OK)
{
sprintf((char*)iptxt, "%d.%d.%d.%d:%d", (uint8_t)(addr.addr),(uint8_t)(addr.addr>>8),(uint8_t)(addr.addr>>16),(uint8_t)(addr.addr>>24),port);
UsrLog("one ramote is connected:%s",iptxt);
}
uint8_t *data;
u16_t len;
while(1)
{
if((err=netconn_recv(clientad.client[clientindex],&clientrecvbuf))==ERR_OK)
{
do{
netbuf_data(clientrecvbuf,&data,&len);
if(len>=6)
{
if(data[0]==0x02&&data[len-1]==0x03)
{
recvframe.originaldata_head=data[0];
recvframe.originaldata_address=data[1];
recvframe.originaldata_cmd=data[2];
recvframe.originaldata_length=data[3];
recvframe.originaldata_cs=data[len-2];
recvframe.originaldata_end=data[len-1];
if(recvframe.originaldata_length+6==len)
{
for(int i=0;i<recvframe.originaldata_length;i++)
{
recvdata[i]=data[4+i];
}
recvframe.originaldata_data=recvdata;
processCmd(recvframe,&ackframe,clientindex);
netconn_ack(&ackframe,clientad.client[clientindex]);
continue;
}
}
}
ackframe.originaldata_cmd=0xf3;
ackframe.originaldata_length=0;
netconn_ack(&ackframe,clientad.client[clientindex]);
}
while(netbuf_next(clientrecvbuf)>=0);
netbuf_delete(clientrecvbuf);
}
else if(err==ERR_CLSD||err==ERR_RST)
break;
}
if(clientad.state[clientindex]==1)
{
if((err=netconn_getaddr(clientad.client[clientindex],&addr, &port, 0))==ERR_OK)
{
sprintf((char*)iptxt, "%d.%d.%d.%d:%d", (uint8_t)(addr.addr),(uint8_t)(addr.addr>>8),(uint8_t)(addr.addr>>16),(uint8_t)(addr.addr>>24),port);
UsrLog("one ramote is closed:%s",iptxt);
}
clientad.state[clientindex]=0;
netconn_close(clientad.client[clientindex]);
netconn_delete(clientad.client[clientindex]);
vTaskDelete(clientad.client_taskid[clientindex]);
}
}
以上数据处理部分不用在意,当中一定要注意,当连接断开后,一定要释放相关内存,不然会造成内存溢出,并且最后要调用vTaskDelete删除改线程
最后,启动任务
uint8_t isLWIPInitSuccess=0;
void MX_FREERTOS_Init(void) {
osThreadDef(defaultTask, StartDefaultTask, osPriorityNormal, 0, 128);
defaultTaskHandle = osThreadCreate(osThread(defaultTask), NULL);
osThreadDef(tcpmultiTask, svr_task, osPriorityNormal, 0, 256);
osThreadCreate(osThread(tcpmultiTask), NULL);
}
void StartDefaultTask(void const * argument)
{
/* init code for LWIP */
MX_LWIP_Init();
isLWIPInitSuccess=1;
vTaskDelete(defaultTaskHandle);
}
基中,StartDefaultTask任务为初始化以太网,初始化后将isLWIPInitSuccess置为1。