01 storm 源码阅读 storm的进程间消息通信实现netty server实现

/**
 * Licensed to the Apache Software Foundation (ASF) under one
 * or more contributor license agreements.  See the NOTICE file
 * distributed with this work for additional information
 * regarding copyright ownership.  The ASF licenses this file
 * to you under the Apache License, Version 2.0 (the
 * "License"); you may not use this file except in compliance
 * with the License.  You may obtain a copy of the License at
 *
 * http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
package backtype.storm.messaging.netty;

import backtype.storm.Config;
import backtype.storm.messaging.IConnection;
import backtype.storm.messaging.TaskMessage;
import backtype.storm.utils.Utils;
import org.jboss.netty.bootstrap.ServerBootstrap;
import org.jboss.netty.channel.Channel;
import org.jboss.netty.channel.ChannelFactory;
import org.jboss.netty.channel.group.ChannelGroup;
import org.jboss.netty.channel.group.DefaultChannelGroup;
import org.jboss.netty.channel.socket.nio.NioServerSocketChannelFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.net.InetSocketAddress;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.Executors;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.ThreadFactory;
import java.util.concurrent.atomic.AtomicInteger;

import org.jboss.netty.bootstrap.ServerBootstrap;
import org.jboss.netty.channel.Channel;
import org.jboss.netty.channel.ChannelFactory;
import org.jboss.netty.channel.group.ChannelGroup;
import org.jboss.netty.channel.group.DefaultChannelGroup;
import org.jboss.netty.channel.socket.nio.NioServerSocketChannelFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import backtype.storm.Config;
import backtype.storm.messaging.ConnectionWithStatus;
import backtype.storm.messaging.IConnection;
import backtype.storm.messaging.TaskMessage;
import backtype.storm.metric.api.IStatefulObject;
import backtype.storm.utils.Utils;

/**
 * netty服务端,做进程之间通信用。传递tutle。 <br>
 * 服务端主要负责计数为主,按照一定得数据结构存储收到的消息,和转发收到的消息。
 * 
 * @author mosi.li
 * 
 */
class Server extends ConnectionWithStatus implements IStatefulObject {

	private static final Logger LOG = LoggerFactory.getLogger(Server.class);
	@SuppressWarnings("rawtypes")
	Map storm_conf;
	int port;
	// 这个messagesEnqueued,只是提供统计用,记录进来的消息数量。
	private final ConcurrentHashMap<String, AtomicInteger> messagesEnqueued = new ConcurrentHashMap<String, AtomicInteger>();
	// 这个messagesDequeued,只是提供统计用,记录出去的消息数量。
	private final AtomicInteger messagesDequeued = new AtomicInteger(0);
	// pendingMessages提供统计用,表示处理中的消息数量。
	private final AtomicInteger[] pendingMessages;

	// Create multiple queues for incoming messages. The size equals the number
	// of receiver threads.
	// For message which is sent to same task, it will be stored in the same
	// queue to preserve the message order.
	// 表示。。进来的消息的详细情况,目的地是相同task的消息放在同一个队列里,这是个数组队列。incoming messages的消息
	private LinkedBlockingQueue<ArrayList<TaskMessage>>[] message_queue;

	// 下面3个类是netty的实现,实现进程间的消息通信
	volatile ChannelGroup allChannels = new DefaultChannelGroup("storm-server");
	final ChannelFactory factory;//使用factory来设置netty的通道类型
	final ServerBootstrap bootstrap;//netty服务端的核心类

	// message_queue的队列数组的长度。
	private int queueCount;
	// 任务和任务下的数据流通的队列的对应关系。
	private volatile HashMap<Integer, Integer> taskToQueueId = null;
	int roundRobinQueueId;

	private volatile boolean closing = false;
	List<TaskMessage> closeMessage = Arrays.asList(new TaskMessage(-1, null));

	@SuppressWarnings("rawtypes")
	Server(Map storm_conf, int port) {
		this.storm_conf = storm_conf;
		this.port = port;

		queueCount = Utils.getInt(
				storm_conf.get(Config.WORKER_RECEIVER_THREAD_COUNT), 1);
		roundRobinQueueId = 0;
		taskToQueueId = new HashMap<Integer, Integer>();

		message_queue = new LinkedBlockingQueue[queueCount];
		pendingMessages = new AtomicInteger[queueCount];
		for (int i = 0; i < queueCount; i++) {
			message_queue[i] = new LinkedBlockingQueue<ArrayList<TaskMessage>>();
			pendingMessages[i] = new AtomicInteger(0);
		}

		// Configure the server.
		int buffer_size = Utils.getInt(storm_conf
				.get(Config.STORM_MESSAGING_NETTY_BUFFER_SIZE));
		int maxWorkers = Utils.getInt(storm_conf
				.get(Config.STORM_MESSAGING_NETTY_SERVER_WORKER_THREADS));

		ThreadFactory bossFactory = new NettyRenameThreadFactory(name()
				+ "-boss");
		ThreadFactory workerFactory = new NettyRenameThreadFactory(name()
				+ "-worker");

		if (maxWorkers > 0) {
			factory = new NioServerSocketChannelFactory(
					Executors.newCachedThreadPool(bossFactory),
					Executors.newCachedThreadPool(workerFactory), maxWorkers);
		} else {
			factory = new NioServerSocketChannelFactory(
					Executors.newCachedThreadPool(bossFactory),
					Executors.newCachedThreadPool(workerFactory));
		}

		LOG.info("Create Netty Server " + name() + ", buffer_size: "
				+ buffer_size + ", maxWorkers: " + maxWorkers);

		bootstrap = new ServerBootstrap(factory);//设置channel类型
		bootstrap.setOption("child.tcpNoDelay", true);
		bootstrap.setOption("child.receiveBufferSize", buffer_size);
		bootstrap.setOption("child.keepAlive", true);

		// Set up the pipeline factory.
		bootstrap.setPipelineFactory(new StormServerPipelineFactory(this));

		// Bind and start to accept incoming connections.
		Channel channel = bootstrap.bind(new InetSocketAddress(port));
		allChannels.add(channel);
	}

	/**
	 * 将数据塞入对应的task里的队列。
	 * 
	 * @param msgs
	 * @return
	 */
	private ArrayList<TaskMessage>[] groupMessages(List<TaskMessage> msgs) {
		ArrayList<TaskMessage> messageGroups[] = new ArrayList[queueCount];

		for (int i = 0; i < msgs.size(); i++) {
			TaskMessage message = msgs.get(i);
			int task = message.task();

			if (task == -1) {
				closing = true;
				return null;
			}

			Integer queueId = getMessageQueueId(task);

			if (null == messageGroups[queueId]) {
				messageGroups[queueId] = new ArrayList<TaskMessage>();
			}
			messageGroups[queueId].add(message);
		}
		return messageGroups;
	}

	/**
	 * 获取某个任务,对应的消息队列id
	 * 
	 * @param task
	 * @return
	 */
	private Integer getMessageQueueId(int task) {
		// try to construct the map from taskId -> queueId in round robin
		// manner.
		Integer queueId = taskToQueueId.get(task);
		if (null == queueId) {
			synchronized (this) {
				queueId = taskToQueueId.get(task);
				if (queueId == null) {
					queueId = roundRobinQueueId++;
					if (roundRobinQueueId == queueCount) {
						roundRobinQueueId = 0;
					}
					HashMap<Integer, Integer> newRef = new HashMap<Integer, Integer>(
							taskToQueueId);
					newRef.put(task, queueId);
					taskToQueueId = newRef;
				}
			}
		}
		return queueId;
	}

	/**
	 * 添加消息时,改变进入消息队列的数量
	 * 
	 * @param from
	 * @param amount
	 */
	private void addReceiveCount(String from, int amount) {
		// This is possibly lossy in the case where a value is deleted
		// because it has received no messages over the metrics collection
		// period and new messages are starting to come in. This is
		// because I don't want the overhead of a synchronize just to have
		// the metric be absolutely perfect.
		AtomicInteger i = messagesEnqueued.get(from);
		if (i == null) {
			i = new AtomicInteger(amount);
			AtomicInteger prev = messagesEnqueued.putIfAbsent(from, i);
			if (prev != null) {
				prev.addAndGet(amount);
			}
		} else {
			i.addAndGet(amount);
		}
	}

	/**
	 * 收到一个消息的时候,做队列变化 enqueue a received message
	 * 
	 * @throws InterruptedException
	 */
	protected void enqueue(List<TaskMessage> msgs) throws InterruptedException {
		if (null == msgs || msgs.size() == 0 || closing) {
			return;
		}

		ArrayList<TaskMessage> messageGroups[] = groupMessages(msgs);

		if (null == messageGroups || closing) {
			return;
		}

		for (int receiverId = 0; receiverId < messageGroups.length; receiverId++) {
			ArrayList<TaskMessage> msgGroup = messageGroups[receiverId];
			if (null != msgGroup) {
				// 所有的消息队列,加上消息
				message_queue[receiverId].put(msgGroup);
				// 处理中的消息队列数量
				pendingMessages[receiverId].addAndGet(msgGroup.size());
			}
		}
	}

	/**
	 * 从队列里消费消息,返回即将消费的消息迭代器,用于处理
	 */
	public Iterator<TaskMessage> recv(int flags, int receiverId) {
		if (closing) {
			return closeMessage.iterator();
		}

		ArrayList<TaskMessage> ret = null;
		int queueId = receiverId % queueCount;
		if ((flags & 0x01) == 0x01) {
			// non-blocking
			ret = message_queue[queueId].poll();
		} else {
			try {
				ArrayList<TaskMessage> request = message_queue[queueId].take();
				LOG.debug("request to be processed: {}", request);
				ret = request;
			} catch (InterruptedException e) {
				LOG.info("exception within msg receiving", e);
				ret = null;
			}
		}

		if (null != ret) {
			messagesDequeued.addAndGet(ret.size());
			pendingMessages[queueId].addAndGet(0 - ret.size());
			return ret.iterator();
		}
		return null;
	}

	/**
	 * register a newly created channel
	 * 
	 * @param channel
	 */
	protected void addChannel(Channel channel) {
		allChannels.add(channel);
	}

	/**
	 * close a channel
	 * 
	 * @param channel
	 */
	protected void closeChannel(Channel channel) {
		channel.close().awaitUninterruptibly();
		allChannels.remove(channel);
	}

	/**
	 * close all channels, and release resources
	 */
	public synchronized void close() {
		if (allChannels != null) {
			allChannels.close().awaitUninterruptibly();
			factory.releaseExternalResources();
			allChannels = null;
		}
	}

	public void send(int task, byte[] message) {
		throw new UnsupportedOperationException(
				"Server connection should not send any messages");
	}

	public void send(Iterator<TaskMessage> msgs) {
		throw new UnsupportedOperationException(
				"Server connection should not send any messages");
	}

	public String name() {
		return "Netty-server-localhost-" + port;
	}

	/**
	 * 一个状态的标示。标示netty的连接状态.
	 */
	@Override
	public Status status() {
		if (closing) {
			return Status.Closed;
		} else if (!connectionEstablished(allChannels)) {
			return Status.Connecting;
		} else {
			return Status.Ready;
		}
	}

	private boolean connectionEstablished(Channel channel) {
		return channel != null && channel.isBound();
	}

	private boolean connectionEstablished(ChannelGroup allChannels) {
		boolean allEstablished = true;
		for (Channel channel : allChannels) {
			if (!(connectionEstablished(channel))) {
				allEstablished = false;
				break;
			}
		}
		return allEstablished;
	}

	/**
	 * 为统计消息提供信息.为了效率不加锁。是不准确的,
	 * 提供dequeuedMessages(出去的消息数),(pending)处理中的,enqueued (进来的)
	 */
	public Object getState() {
		LOG.info("Getting metrics for server on port {}", port);
		HashMap<String, Object> ret = new HashMap<String, Object>();
		ret.put("dequeuedMessages", messagesDequeued.getAndSet(0));
		ArrayList<Integer> pending = new ArrayList<Integer>(
				pendingMessages.length);
		for (AtomicInteger p : pendingMessages) {
			pending.add(p.get());
		}
		ret.put("pending", pending);
		HashMap<String, Integer> enqueued = new HashMap<String, Integer>();
		Iterator<Map.Entry<String, AtomicInteger>> it = messagesEnqueued
				.entrySet().iterator();
		while (it.hasNext()) {
			Map.Entry<String, AtomicInteger> ent = it.next();
			// Yes we can delete something that is not 0 because of races, but
			// that is OK for metrics
			AtomicInteger i = ent.getValue();
			if (i.get() == 0) {
				it.remove();
			} else {
				enqueued.put(ent.getKey(), i.getAndSet(0));
			}
		}
		ret.put("enqueued", enqueued);
		return ret;
	}

	@Override
	public String toString() {
		return String.format("Netty server listening on port %s", port);
	}

}

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值