WebFlux 异步调度算法详解
概述
WebFlux 的异步调度算法是其高性能并发处理能力的核心,负责管理事件循环、线程池调度、任务分配和异步执行。基于 Reactor 的调度器机制,WebFlux 实现了智能的任务调度策略,确保系统在不同负载下都能保持高效运行。本文深入剖析 WebFlux 异步调度算法的设计原理、实现机制和优化策略。
调度算法架构
1. 整体调度架构
2. 调度流程时序
调度器类型和实现
1. 调度器类型对比
2. ImmediateScheduler 实现
// 立即执行调度器 - 在当前线程执行任务
final class ImmediateScheduler implements Scheduler {
static final ImmediateScheduler INSTANCE = new ImmediateScheduler();
@Override
public Worker createWorker() {
return new ImmediateWorker();
}
static final class ImmediateWorker implements Worker {
@Override
public Disposable schedule(Runnable task) {
// 直接在当前线程执行任务
task.run();
return Disposables.disposed();
}
@Override
public Disposable schedule(Runnable task, long delay, TimeUnit unit) {
if (delay <= 0) {
// 无延迟,立即执行
task.run();
} else {
// 有延迟,使用当前线程睡眠
try {
unit.sleep(delay);
task.run();
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw Exceptions.propagate(e);
}
}
return Disposables.disposed();
}
}
}
3. ParallelScheduler 实现
// 并行调度器 - 基于固定大小的线程池
final class ParallelScheduler implements Scheduler {
private final int n;
private final ThreadFactory factory;
private final AtomicReferenceArray<EventLoopGroup> eventLoopGroups;
ParallelScheduler(int n, ThreadFactory factory) {
this.n = n;
this.factory = factory;
this.eventLoopGroups = new AtomicReferenceArray<>(n);
}
@Override
public Worker createWorker() {
return new ParallelWorker(pick());
}
// 轮询选择 EventLoop
EventLoopGroup pick() {
int index = (int) (Thread.currentThread().getId() % n);
EventLoopGroup group = eventLoopGroups.get(index);
if (group == null) {
group = new EventLoopGroup(1, factory);
if (!eventLoopGroups.compareAndSet(index, null, group)) {
group.shutdownGracefully();
group = eventLoopGroups.get(index);
}
}
return group;
}
static final class ParallelWorker implements Worker {
private final EventLoopGroup eventLoopGroup;
ParallelWorker(EventLoopGroup eventLoopGroup) {
this.eventLoopGroup = eventLoopGroup;
}
@Override
public Disposable schedule(Runnable task) {
// 提交到 EventLoop 执行
return new EventLoopDisposable(eventLoopGroup.submit(task));
}
@Override
public Disposable schedule(Runnable task, long delay, TimeUnit unit) {
// 延迟执行
return new EventLoopDisposable(
eventLoopGroup.schedule(task, delay, unit)
);
}
}
}
4. BoundedElasticScheduler 实现
// 有界弹性调度器 - 适合阻塞操作
final class BoundedElasticScheduler implements Scheduler {
private final int maxThreads;
private final int maxTaskQueueSize;
private final ThreadFactory factory;
private final ScheduledExecutorService[] executors;
private final AtomicInteger roundRobin = new AtomicInteger();
BoundedElasticScheduler(int maxThreads, int maxTaskQueueSize, ThreadFactory factory) {
this.maxThreads = maxThreads;
this.maxTaskQueueSize = maxTaskQueueSize;
this.factory = factory;
this.executors = new ScheduledExecutorService[maxThreads];
// 创建固定数量的线程池
for (int i = 0; i < maxThreads; i++) {
executors[i] = new ScheduledThreadPoolExecutor(1, factory,
new LinkedBlockingQueue<>(maxTaskQueueSize),
new ThreadPoolExecutor.AbortPolicy());
}
}
@Override
public Worker createWorker() {
int index = roundRobin.getAndIncrement() % maxThreads;
return new BoundedElasticWorker(executors[index]);
}
static final class BoundedElasticWorker implements Worker {
private final ScheduledExecutorService executor;
BoundedElasticWorker(ScheduledExecutorService executor) {
this.executor = executor;
}
@Override
public Disposable schedule(Runnable task) {
try {
// 提交任务到线程池
Future<?> future = executor.submit(task);
return new FutureDisposable(future);
} catch (RejectedExecutionException e) {
// 任务队列满,执行拒绝策略
throw Exceptions.failWithOverflow("BoundedElastic scheduler queue is full", e);
}
}
}
}
任务调度算法
1. 轮询调度算法
// 轮询调度实现
public class RoundRobinScheduler {
private final AtomicInteger counter = new AtomicInteger(0);
private final List<Worker> workers;
public RoundRobinScheduler(List<Worker> workers) {
this.workers = workers;
}
public Worker selectWorker() {
int index = counter.getAndIncrement() % workers.size();
return workers.get(index);
}
}
// 在 ParallelScheduler 中的应用
@Override
public Worker createWorker() {
// 轮询选择 EventLoop
int index = roundRobin.getAndIncrement() % n;
return new ParallelWorker(eventLoopGroups[index]);
}
2. 工作窃取算法
// 工作窃取调度器
public class WorkStealingScheduler {
private final Deque<Runnable>[] workQueues;
private final WorkerThread[] threads;
private final Random random = new Random();
public WorkStealingScheduler(int parallelism) {
this.workQueues = new Deque[parallelism];
this.threads = new WorkerThread[parallelism];
for (int i = 0; i < parallelism; i++) {
workQueues[i] = new ConcurrentLinkedDeque<>();
threads[i] = new WorkerThread(i, this);
threads[i].start();
}
}
public void submit(Runnable task) {
int threadId = Thread.currentThread().hashCode() % threads.length;
workQueues[threadId].push(task);
}
// 工作线程实现
class WorkerThread extends Thread {
private final int id;
private final WorkStealingScheduler scheduler;
WorkerThread(int id, WorkStealingScheduler scheduler) {
this.id = id;
this.scheduler = scheduler;
}
@Override
public void run() {
while (!isInterrupted()) {
Runnable task = stealWork();
if (task != null) {
task.run();
} else {
// 没有工作,短暂睡眠
LockSupport.parkNanos(1000);
}
}
}
private Runnable stealWork() {
// 1. 先尝试从自己的队列获取任务
Runnable task = workQueues[id].poll();
if (task != null) {
return task;
}
// 2. 尝试从其他线程窃取工作
for (int i = 0; i < threads.length; i++) {
if (i != id) {
Runnable stolenTask = workQueues[i].pollLast();
if (stolenTask != null) {
return stolenTask;
}
}
}
return null;
}
}
}
3. 优先级调度算法
// 优先级任务
class PriorityTask implements Comparable<PriorityTask> {
private final Runnable task;
private final int priority;
PriorityTask(Runnable task, int priority) {
this.task = task;
this.priority = priority;
}
@Override
public int compareTo(PriorityTask other) {
return Integer.compare(other.priority, this.priority); // 高优先级在前
}
Runnable getTask() {
return task;
}
}
// 优先级调度器
public class PriorityScheduler implements Scheduler {
private final PriorityBlockingQueue<PriorityTask> taskQueue;
private final ExecutorService executor;
public PriorityScheduler(int parallelism) {
this.taskQueue = new PriorityBlockingQueue<>();
this.executor = Executors.newFixedThreadPool(parallelism);
// 启动工作线程
for (int i = 0; i < parallelism; i++) {
executor.submit(this::processTasks);
}
}
private void processTasks() {
while (!Thread.currentThread().isInterrupted()) {
try {
PriorityTask priorityTask = taskQueue.take();
priorityTask.getTask().run();
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
break;
}
}
}
@Override
public Worker createWorker() {
return new PriorityWorker();
}
class PriorityWorker implements Worker {
@Override
public Disposable schedule(Runnable task) {
return schedule(task, 0); // 默认优先级
}
@Override
public Disposable schedule(Runnable task, long delay, TimeUnit unit) {
if (delay <= 0) {
taskQueue.offer(new PriorityTask(task, 0));
} else {
// 延迟执行
ScheduledExecutorService timer = Executors.newSingleThreadScheduledExecutor();
timer.schedule(() -> taskQueue.offer(new PriorityTask(task, 0)), delay, unit);
timer.shutdown();
}
return Disposables.disposed();
}
public Disposable schedule(Runnable task, int priority) {
taskQueue.offer(new PriorityTask(task, priority));
return Disposables.disposed();
}
}
}
异步执行机制
1. 任务包装和执行
// 可调度的任务包装
class ScheduledRunnable implements Runnable {
private final Runnable task;
private final CompletableFuture<Void> future;
ScheduledRunnable(Runnable task, CompletableFuture<Void> future) {
this.task = task;
this.future = future;
}
@Override
public void run() {
try {
task.run();
future.complete(null);
} catch (Throwable t) {
future.completeExceptionally(t);
}
}
}
// 异步执行包装器
public class AsyncExecutionWrapper {
public static <T> Mono<T> wrapAsync(Callable<T> callable, Scheduler scheduler) {
return Mono.fromCallable(callable)
.subscribeOn(scheduler)
.timeout(Duration.ofSeconds(30))
.onErrorMap(TimeoutException.class,
ex -> new AsyncExecutionTimeoutException("Async execution timeout", ex));
}
public static <T> Flux<T> wrapAsyncStream(Iterable<T> data, Function<T, T> processor, Scheduler scheduler) {
return Flux.fromIterable(data)
.parallel()
.runOn(scheduler)
.map(processor)
.sequential()
.onErrorResume(error -> {
log.error("Error in async stream processing", error);
return Flux.empty();
});
}
}
2. 回调和通知机制
// 异步回调管理器
public class AsyncCallbackManager {
private final List<Consumer<AsyncResult>> callbacks = new CopyOnWriteArrayList<>();
public void addCallback(Consumer<AsyncResult> callback) {
callbacks.add(callback);
}
public void notifyCallbacks(AsyncResult result) {
for (Consumer<AsyncResult> callback : callbacks) {
try {
callback.accept(result);
} catch (Exception e) {
log.error("Error executing callback", e);
}
}
}
@Data
@AllArgsConstructor
public static class AsyncResult {
private final boolean success;
private final Object result;
private final Throwable error;
private final long executionTime;
}
}
性能优化算法
1. 自适应线程池算法
// 自适应线程池 - 根据负载动态调整线程数
public class AdaptiveThreadPool {
private final int minThreads;
private final int maxThreads;
private final AtomicInteger currentThreads;
private final ThreadPoolExecutor executor;
private final ScheduledExecutorService monitor;
public AdaptiveThreadPool(int minThreads, int maxThreads) {
this.minThreads = minThreads;
this.maxThreads = maxThreads;
this.currentThreads = new AtomicInteger(minThreads);
this.executor = new ThreadPoolExecutor(
minThreads, minThreads,
60L, TimeUnit.SECONDS,
new LinkedBlockingQueue<>(),
new ThreadFactory() {
private final AtomicInteger counter = new AtomicInteger();
@Override
public Thread newThread(Runnable r) {
Thread t = new Thread(r);
t.setName("adaptive-pool-" + counter.incrementAndGet());
return t;
}
}
);
// 启动监控线程
this.monitor = Executors.newSingleThreadScheduledExecutor();
this.monitor.scheduleAtFixedRate(this::adjustThreadPool, 30, 30, TimeUnit.SECONDS);
}
private void adjustThreadPool() {
int activeCount = executor.getActiveCount();
int poolSize = executor.getPoolSize();
int queueSize = executor.getQueue().size();
// 负载计算
double loadFactor = (double) activeCount / poolSize;
if (loadFactor > 0.8 && queueSize > poolSize * 2) {
// 高负载,增加线程
int newSize = Math.min(currentThreads.get() * 2, maxThreads);
if (newSize > currentThreads.get()) {
executor.setCorePoolSize(newSize);
executor.setMaximumPoolSize(newSize);
currentThreads.set(newSize);
log.info("Increased thread pool size to {}", newSize);
}
} else if (loadFactor < 0.3 && queueSize < poolSize / 2) {
// 低负载,减少线程
int newSize = Math.max(currentThreads.get() / 2, minThreads);
if (newSize < currentThreads.get()) {
executor.setCorePoolSize(newSize);
executor.setMaximumPoolSize(newSize);
currentThreads.set(newSize);
log.info("Decreased thread pool size to {}", newSize);
}
}
}
}
2. 任务批处理算法
// 智能批处理调度器
public class SmartBatchingScheduler {
private final int batchSize;
private final Duration batchTimeout;
private final Scheduler delegateScheduler;
private final ConcurrentHashMap<String, Batch> batches;
public SmartBatchingScheduler(int batchSize, Duration batchTimeout, Scheduler delegateScheduler) {
this.batchSize = batchSize;
this.batchTimeout = batchTimeout;
this.delegateScheduler = delegateScheduler;
this.batches = new ConcurrentHashMap<>();
}
public <T, R> Mono<R> scheduleBatched(String batchKey, T input, Function<List<T>, R> batchProcessor) {
return Mono.defer(() -> {
Batch<T, R> batch = batches.computeIfAbsent(batchKey, k -> new Batch<>(batchProcessor));
return batch.add(input)
.doFinally(signal -> {
if (batch.isComplete()) {
batches.remove(batchKey);
}
});
});
}
private class Batch<T, R> {
private final List<T> items = new ArrayList<>();
private final Function<List<T>, R> processor;
private final List<MonoSink<R>> sinks = new ArrayList<>();
private final AtomicInteger count = new AtomicInteger(0);
Batch(Function<List<T>, R> processor) {
this.processor = processor;
startTimeout();
}
private void startTimeout() {
delegateScheduler.schedule(() -> {
if (!items.isEmpty()) {
processBatch();
}
}, batchTimeout.toMillis(), TimeUnit.MILLISECONDS);
}
Mono<R> add(T item) {
return Mono.create(sink -> {
synchronized (items) {
items.add(item);
sinks.add(sink);
if (items.size() >= batchSize) {
processBatch();
}
}
});
}
private void processBatch() {
synchronized (items) {
if (items.isEmpty()) return;
List<T> batchItems = new ArrayList<>(items);
List<MonoSink<R>> batchSinks = new ArrayList<>(sinks);
items.clear();
sinks.clear();
delegateScheduler.schedule(() -> {
try {
R result = processor.apply(batchItems);
batchSinks.forEach(sink -> sink.success(result));
} catch (Exception e) {
batchSinks.forEach(sink -> sink.error(e));
}
});
}
}
boolean isComplete() {
return count.get() == 0 && items.isEmpty();
}
}
}
负载均衡算法
1. 加权轮询算法
// 加权轮询调度器
public class WeightedRoundRobinScheduler {
private final List<WeightedWorker> workers;
private final AtomicInteger currentWeight = new AtomicInteger(0);
private final AtomicInteger currentIndex = new AtomicInteger(0);
private final int maxWeight;
public WeightedRoundRobinScheduler(List<WeightedWorker> workers) {
this.workers = workers;
this.maxWeight = workers.stream()
.mapToInt(WeightedWorker::getWeight)
.max()
.orElse(1);
}
public Worker selectWorker() {
while (true) {
int currentIdx = currentIndex.get();
int currentWgt = currentWeight.get();
if (currentIdx >= workers.size()) {
currentIdx = 0;
currentWgt = currentWgt - gcd(maxWeight);
if (currentWgt <= 0) {
currentWgt = maxWeight;
}
currentWeight.set(currentWgt);
}
currentIndex.set(currentIdx + 1);
WeightedWorker worker = workers.get(currentIdx);
if (worker.getWeight() >= currentWgt) {
return worker.getWorker();
}
}
}
private int gcd(int a) {
int b = workers.stream()
.mapToInt(WeightedWorker::getWeight)
.reduce(this::gcd)
.orElse(1);
return gcd(a, b);
}
private int gcd(int a, int b) {
return b == 0 ? a : gcd(b, a % b);
}
@Data
@AllArgsConstructor
public static class WeightedWorker {
private final Worker worker;
private final int weight;
}
}
2. 最少连接算法
// 最少连接调度器
public class LeastConnectionsScheduler {
private final List<ConnectionTrackingWorker> workers;
private final AtomicInteger[] connectionCounts;
public LeastConnectionsScheduler(List<Worker> delegateWorkers) {
this.workers = delegateWorkers.stream()
.map(ConnectionTrackingWorker::new)
.collect(Collectors.toList());
this.connectionCounts = new AtomicInteger[workers.size()];
for (int i = 0; i < workers.size(); i++) {
connectionCounts[i] = new AtomicInteger(0);
}
}
public Worker selectWorker() {
int minConnections = Integer.MAX_VALUE;
int selectedIndex = 0;
for (int i = 0; i < connectionCounts.length; i++) {
int connections = connectionCounts[i].get();
if (connections < minConnections) {
minConnections = connections;
selectedIndex = i;
}
}
return workers.get(selectedIndex);
}
private class ConnectionTrackingWorker implements Worker {
private final Worker delegate;
private final int index;
ConnectionTrackingWorker(Worker delegate) {
this.delegate = delegate;
this.index = workers.indexOf(this);
}
@Override
public Disposable schedule(Runnable task) {
connectionCounts[index].incrementAndGet();
Runnable trackingTask = () -> {
try {
task.run();
} finally {
connectionCounts[index].decrementAndGet();
}
};
return delegate.schedule(trackingTask);
}
@Override
public Disposable schedule(Runnable task, long delay, TimeUnit unit) {
return delegate.schedule(task, delay, unit);
}
}
}
监控和诊断
1. 调度性能监控
@Component
public class SchedulerPerformanceMonitor {
private final MeterRegistry meterRegistry;
public SchedulerPerformanceMonitor(MeterRegistry meterRegistry) {
this.meterRegistry = meterRegistry;
}
public <T> Scheduler monitorScheduler(Scheduler scheduler, String schedulerName) {
return new MonitoringScheduler(scheduler, schedulerName);
}
private class MonitoringScheduler implements Scheduler {
private final Scheduler delegate;
private final String name;
MonitoringScheduler(Scheduler delegate, String name) {
this.delegate = delegate;
this.name = name;
}
@Override
public Worker createWorker() {
return new MonitoringWorker(delegate.createWorker(), name);
}
}
private class MonitoringWorker implements Worker {
private final Worker delegate;
private final String schedulerName;
MonitoringWorker(Worker delegate, String schedulerName) {
this.delegate = delegate;
this.schedulerName = schedulerName;
}
@Override
public Disposable schedule(Runnable task) {
long startTime = System.nanoTime();
Runnable monitoredTask = () -> {
try {
task.run();
} finally {
long duration = System.nanoTime() - startTime;
Metrics.timer("scheduler.task.duration",
"scheduler", schedulerName).record(duration, TimeUnit.NANOSECONDS);
}
};
return delegate.schedule(monitoredTask);
}
@Override
public Disposable schedule(Runnable task, long delay, TimeUnit unit) {
Metrics.counter("scheduler.delayed.tasks",
"scheduler", schedulerName).increment();
return delegate.schedule(task, delay, unit);
}
}
}
2. 调度诊断工具
@Component
public class SchedulerDiagnostics {
private static final Logger logger = LoggerFactory.getLogger(SchedulerDiagnostics.class);
public void diagnoseSchedulerIssues(Scheduler scheduler, String name) {
if (scheduler instanceof ParallelScheduler) {
diagnoseParallelScheduler((ParallelScheduler) scheduler, name);
} else if (scheduler instanceof BoundedElasticScheduler) {
diagnoseBoundedElasticScheduler((BoundedElasticScheduler) scheduler, name);
}
}
private void diagnoseParallelScheduler(ParallelScheduler scheduler, String name) {
// 监控线程使用情况
ScheduledExecutorService monitor = Executors.newSingleThreadScheduledExecutor();
monitor.scheduleAtFixedRate(() -> {
try {
// 通过反射获取内部状态
Field workersField = ParallelScheduler.class.getDeclaredField("eventLoopGroups");
workersField.setAccessible(true);
AtomicReferenceArray<?> workers = (AtomicReferenceArray<?>) workersField.get(scheduler);
int activeWorkers = 0;
for (int i = 0; i < workers.length(); i++) {
if (workers.get(i) != null) {
activeWorkers++;
}
}
logger.debug("Parallel scheduler {} - Active workers: {}/{}",
name, activeWorkers, workers.length());
} catch (Exception e) {
logger.error("Error diagnosing parallel scheduler", e);
}
}, 0, 30, TimeUnit.SECONDS);
}
public void detectSchedulingBottlenecks(Flux<?> stream, String operation, Duration threshold) {
AtomicLong startTime = new AtomicLong();
AtomicLong itemCount = new AtomicLong();
stream
.doOnSubscribe(subscription -> {
startTime.set(System.nanoTime());
})
.doOnNext(item -> {
long duration = System.nanoTime() - startTime.get();
itemCount.incrementAndGet();
if (duration > threshold.toNanos()) {
logger.warn("Scheduling bottleneck detected in {}: {} ms for {} items",
operation,
TimeUnit.NANOSECONDS.toMillis(duration),
itemCount.get());
}
})
.doFinally(signal -> {
long totalDuration = System.nanoTime() - startTime.get();
logger.info("Operation {} completed in {} ms, processed {} items",
operation,
TimeUnit.NANOSECONDS.toMillis(totalDuration),
itemCount.get());
});
}
}
最佳实践总结
1. 调度器选择指南
# WebFlux 调度器选择最佳实践
scheduler_selection:
# CPU密集型任务
cpu_intensive:
use_parallel_scheduler: true # 使用并行调度器
thread_count: "CPU核心数" # 线程数等于CPU核心数
avoid_blocking: true # 避免阻塞操作
# I/O密集型任务
io_intensive:
use_bounded_elastic: true # 使用有界弹性调度器
max_threads: 200 # 最大线程数
queue_size: 100000 # 队列大小
# 轻量级任务
lightweight:
use_immediate_scheduler: true # 使用立即调度器
avoid_context_switching: true # 避免上下文切换
# 顺序执行任务
sequential:
use_single_scheduler: true # 使用单线程调度器
ensure_ordering: true # 确保顺序执行
2. 性能优化清单
- 合理选择调度器:根据任务类型选择合适的调度器
- 避免过度调度:减少不必要的线程切换
- 优化线程池大小:根据系统负载调整线程池大小
- 使用批处理:对大量小任务使用批处理优化
- 监控调度性能:实时监控调度性能指标
- 实现背压控制:防止任务队列溢出
通过深入理解 WebFlux 的异步调度算法,开发者可以构建出高性能、高并发的响应式应用,充分利用现代多核处理器和异步编程的优势。
1254

被折叠的 条评论
为什么被折叠?



