一般多媒体服务器都是很少有信令或者信令比较弱,主要是考虑信令跟业务强相关,但是iot领域有非常需要一个很好用的信令组件,实现互联互通,又方便自定义,于是我选择了iot领域用得比较多的mqtt作为信令载体。服务器部署独立易扩展,逻辑简单功能强大,遵循无必要勿增实体的原则。
基于json的传递message 结构,可以根据业务需要灵活增减,按需获取
type Message struct {
SeqID string `json:"seqid"`
Mode string `json:"mode"`
Pull_Stream_From_Device []Pull_Stream_From_Device `json:"pull_stream_from_device"` //客户端要求的批量拉取设备流的列表
Video bool `json:"video"`
Serial bool `json:"serial"`
SSH bool `json:"ssh"`
Audio bool `json:"audio"`
ICEServers []webrtc.ICEServer `json:"iceserver"`
RtcSession webrtc.SessionDescription `json:"offer" mapstructure:"offer"`
Describestreamname string `json:"streamname"`
Suuid string `json:"suuid"` //视频流编号,浏览器可以通过预先获取,然后在使用时带过来,主要是提供一个选择分辨率和地址的作用,kvm的话内置4路分辨率,其余的如果是Onvif IPC类则通过Onvif协议在本地获取后通过mqtt传给浏览器,也可以考虑用探测软件实现探测后直接注册到夜莺平台,需要时前端到夜莺平台取
Topicprefix string `json:"topicprefix"`
}
信令是一个自定义的命令字的传输和功能解析,利用携程实现并行处理
func Notice(msg Message) {
switch msg.Mode {
case MODE_WEBRTC:
go createPeerConnection(msg)
case MODE_RTMP:
go createPeerConnection(msg)
case MODE_REQPULL:
go DevicePullStream(msg)
case MODE_DEVPUSH:
go DevicePublishStream(msg)
case MODE_ANSWER:
fmt.Println("get answer", msg)
SDPCh <- &msg
default:
answermsg := PublishMsg{
WEB_SEQID: msg.SeqID,
Topic: TOPIC_REFUSE,
Msg: "not supported mode" + msg.Mode,
}
log.Debugf("answer %s", msg.SeqID)
SendMsg(answermsg) //response)
}
}
// handle is called when a message is received
func (o *handler) handle(client mqtt.Client, msg mqtt.Message) {
// We extract the count and write that out first to simplify checking for missing values
var m Message
var resp Session
if err := json.Unmarshal(msg.Payload(), &resp); err != nil {
fmt.Printf("Message could not be parsed (%s): %s", msg.Payload(), err)
return
}
log.Debug(resp)
switch resp.Type {
case CMDMSG_OFFER:
enc.Decode(resp.Data, &m)
Notice(m)
case CMDMSG_SERVER_PULLSTREAMFROM_DEVICE: //收到客户端要求拉流的消息
enc.Decode(resp.Data, &m)
Notice(m)
case CMDMSG_ANSWER:
enc.Decode(resp.Data, &m.RtcSession)
m.Mode = MODE_ANSWER
Notice(m)
case CMDMSG_ERROR:
var errstr string
enc.DecodeBase64(resp.Data, &errstr)
log.Debug("error", errstr)
default:
}
}
类似消息机制的设计,将具体的信令和接收发送逻辑分离,方便维护
func StartMqtt(ctx context.Context) {
log.Debug("StartMqtt ...")
mqtt_ctx = ctx
// Create a handler that will deal with incoming messages
h := NewHandler()
defer h.Close()
msgChans = make(chan PublishMsg, 10)
// Now we establish the connection to the mqtt broker
sn, err := identity.GetSN()
if err != nil {
log.Debug("GetSN error", err.Error())
} else {
config.Config.Mqtt.CLIENTID = sn
}
//只定阅与自身相关的
config.Config.Mqtt.SUBTOPIC = config.Config.Mqtt.SUBTOPIC + "/" + config.Config.Mqtt.CLIENTID + "/#"
config.Config.Mqtt.PUBTOPIC = config.Config.Mqtt.PUBTOPIC + "/" + config.Config.Mqtt.CLIENTID
log.Debug("subtopic", config.Config.Mqtt.SUBTOPIC, "pubtopic", config.Config.Mqtt.PUBTOPIC)
opts := mqtt.NewClientOptions()
opts.AddBroker(config.Config.Mqtt.SERVERADDRESS)
opts.SetClientID(config.Config.Mqtt.CLIENTID)
opts.ConnectTimeout = time.Second // Minimal delays on connect
opts.WriteTimeout = time.Second // Minimal delays on writes
opts.KeepAlive = 30 // Keepalive every 10 seconds so we quickly detect network outages
opts.PingTimeout = time.Second // local broker so response should be quick
// Automate connection management (will keep trying to connect and will reconnect if network drops)
opts.ConnectRetry = true
opts.AutoReconnect = true
// If using QOS2 and CleanSession = FALSE then it is possible that we will receive messages on topics that we
// have not subscribed to here (if they were previously subscribed to they are part of the session and survive
// disconnect/reconnect). Adding a DefaultPublishHandler lets us detect this.
opts.DefaultPublishHandler = func(_ mqtt.Client, msg mqtt.Message) {
fmt.Printf("UNEXPECTED MESSAGE: %s\n", msg)
}
// Log events
opts.OnConnectionLost = func(cl mqtt.Client, err error) {
log.Debug("connection lost")
}
opts.OnConnect = func(c mqtt.Client) {
log.Debug("connection established")
// Establish the subscription - doing this here means that it willSUB happen every time a connection is established
// (useful if opts.CleanSession is TRUE or the broker does not reliably store session data)
t := c.Subscribe(config.Config.Mqtt.SUBTOPIC, config.Config.Mqtt.QOS, h.handle)
// the connection handler is called in a goroutine so blocking here would hot cause an issue. However as blocking
// in other handlers does cause problems its best to just assume we should not block
go func() {
_ = t.Wait() // Can also use '<-t.Done()' in releases > 1.2.0
if t.Error() != nil {
fmt.Printf("ERROR SUBSCRIBING: %s\n", t.Error())
} else {
log.Debug("\r\nsubscribed to: ", config.Config.Mqtt.SUBTOPIC)
}
}()
}
opts.OnReconnecting = func(mqtt.Client, *mqtt.ClientOptions) {
log.Debug("attempting to reconnect")
}
//
// Connect to the broker
//
client := mqtt.NewClient(opts)
// If using QOS2 and CleanSession = FALSE then messages may be transmitted to us before the subscribe completes.
// Adding routes prior to connecting is a way of ensuring that these messages are processed
client.AddRoute(config.Config.Mqtt.SUBTOPIC, h.handle)
if token := client.Connect(); token.Wait() && token.Error() != nil {
panic(token.Error())
}
log.Debug("Connection is up")
done := make(chan struct{})
var wg sync.WaitGroup
wg.Add(1)
go func() {
var count uint64
for {
select {
case data := <-msgChans:
deviceid := data.Msg.(*Session).DeviceId
msg, err := json.Marshal(data.Msg)
if err != nil {
panic(err)
}
if data.Topic == TOPIC_REQPULL {
// Establish the subscription - doing this here means that it willSUB happen every time a connection is established
// (useful if opts.CleanSession is TRUE or the broker does not reliably store session data)
// t := client.Subscribe(DEVICE_CONTRL_TOPIC_PRE+data.WEB_SEQID+"/"+MODE_DEVPUSH, config.Config.Mqtt.QOS, h.handle)
// // the connection handler is called in a goroutine so blocking here would hot cause an issue. However as blocking
// // in other handlers does cause problems its best to just assume we should not block
// go func() {
// _ = t.Wait() // Can also use '<-t.Done()' in releases > 1.2.0
// if t.Error() != nil {
// fmt.Printf("ERROR SUBSCRIBING: %s\n", t.Error())
// } else {
// log.Debug("subscribed to: ", config.Config.Mqtt.SUBTOPIC)
// }
// }()
log.Debug("mqtt:", DEVICE_CONTRL_TOPIC_PRE+deviceid+"/"+data.Topic)
t1 := client.Publish(DEVICE_CONTRL_TOPIC_PRE+deviceid+"/"+data.Topic, config.Config.Mqtt.QOS, false, msg)
go func() {
_ = t1.Wait() // Can also use '<-t.Done()' in releases > 1.2.0
if t1.Error() != nil {
fmt.Printf("msg PUBLISHING: %s\n", t1.Error().Error())
} else {
//log.Debug("msg PUBLISHING:", msg)
}
}()
} else {
//t := client.Publish(Config.Mqtt.PUBTOPIC+"/"+Config.Report.SN, Config.Mqtt.QOS, false, msg)
if data.BTodevice {
log.Debug("mqtt:", DEVICE_CONTRL_TOPIC_PRE+deviceid+"/"+data.Topic)
t1 := client.Publish(DEVICE_CONTRL_TOPIC_PRE+deviceid+"/"+data.Topic, config.Config.Mqtt.QOS, false, msg)
go func() {
_ = t1.Wait() // Can also use '<-t.Done()' in releases > 1.2.0
if t1.Error() != nil {
fmt.Printf("msg PUBLISHING: %s\n", t1.Error().Error())
} else {
//log.Debug("msg PUBLISHING:", msg)
}
}()
} else {
log.Debug("mqtt:", config.Config.Mqtt.PUBTOPIC+"/"+data.WEB_SEQID+"/"+data.Topic)
t := client.Publish(config.Config.Mqtt.PUBTOPIC+"/"+data.WEB_SEQID+"/"+data.Topic, config.Config.Mqtt.QOS, false, msg)
go func() {
_ = t.Wait() // Can also use '<-t.Done()' in releases > 1.2.0
if t.Error() != nil {
fmt.Printf("msg PUBLISHING: %s\n", t.Error().Error())
} else {
//log.Debug("msg PUBLISHING:", msg)
}
}()
}
}
case <-time.After(time.Second * time.Duration(config.Config.Mqtt.HEARTTIME)):
req := &Session{}
req.Type = "heart"
req.DeviceId = config.Config.Mqtt.CLIENTID //"kvm1"
count += 1
msg, err := json.Marshal(heartmsg{Count: count})
if err != nil {
panic(err)
}
req.Data = enc.Encode(msg)
//data := signal.Encode(*peerConnection.LocalDescription())
answermsg := PublishMsg{
Topic: "heart",
Msg: req,
}
msg, err = json.Marshal(answermsg.Msg)
if err != nil {
panic(err)
}
t := client.Publish(config.Config.Mqtt.PUBTOPIC+"/"+answermsg.Topic, config.Config.Mqtt.QOS, false, msg)
// Handle the token in a go routine so this loop keeps sending messages regardless of delivery status
go func() {
_ = t.Wait() // Can also use '<-t.Done()' in releases > 1.2.0
if t.Error() != nil {
fmt.Printf("ERROR PUBLISHING: %s\n", t.Error().Error())
} else {
//log.Debug("HEART PUBLISHING: ", msg)
}
}()
case <-done:
log.Debug("publisher done")
wg.Done()
return
}
}
}()
// Messages will be delivered asynchronously so we just need to wait for a signal to shutdown
<-ctx.Done()
log.Debug("signal caught - exiting")
client.Disconnect(1000)
log.Debug("mqtt shutdown complete")
}