用libuv库实现管道进程间通信
https://blog.csdn.net/hello_wcx/article/details/78924575
IPC之FIFO(有名管道)详解
https://blog.csdn.net/daiyudong2020/article/details/52264248
inux C++ 有名管道
基于Linux下C/C++的网络编程之进程(管道和FIFO)
https://blog.csdn.net/cuixixi0525/article/details/52453405
匿名管道
https://www.cnblogs.com/wuyepeng/p/9747557.html
https://blog.csdn.net/bit_clearoff/article/details/55105816
http://blog.chinaunix.net/uid-23065002-id-4226032.html
2.5管道特点
1.管道只允许具有血缘关系的进程间通信,如父子进程间的通信。
2.管道只允许单向通信。
3.管道内部保证同步机制,从而保证访问数据的一致性。
4.面向字节流
5.管道随进程,进程在管道在,进程消失管道对应的端口也关闭,两个进程都消失管道也消失。
2.6管道容量大小
测试管道容量大小只需要将写端一直写,读端不读且不关闭fd[0],即可。
测试代码:
#include "uv.h"
#include "task.h"
# define TEST_PIPENAME "/tmp/uv-test-sock"
typedef struct {
uv_write_t req;
uv_buf_t buf;
} write_req_t;
static uv_handle_t* server;
static uv_loop_t* loop;
static uv_pipe_t pipeServer;
static stream_type serverType;
static int server_closed;
static void echo_alloc(uv_handle_t* handle,
size_t suggested_size,
uv_buf_t* buf) {
buf->base = (char*)malloc(suggested_size);
buf->len = suggested_size;
}
static void on_close(uv_handle_t* peer) {
free(peer);
}
static void after_shutdown(uv_shutdown_t* req, int status) {
uv_close((uv_handle_t*) req->handle, on_close);
free(req);
}
static void on_server_close(uv_handle_t* handle) {
ASSERT(handle == server);
}
static void after_write(uv_write_t* req, int status) {
write_req_t* wr;
/* Free the read/write buffer and the request */
wr = (write_req_t*) req;
free(wr->buf.base);
free(wr);
if (status == 0)
return;
fprintf(stderr,
"uv_write error: %s - %s\n",
uv_err_name(status),
uv_strerror(status));
}
static void after_read(uv_stream_t* handle,
ssize_t nread,
const uv_buf_t* buf) {
int i;
write_req_t *wr;
uv_shutdown_t* sreq;
if (nread < 0) {
/* Error or EOF */
// ASSERT(nread == UV_EOF);
printf("nread: %d vs %d\n", nread, UV_EOF);
free(buf->base);
sreq = (uv_shutdown_t*)malloc(sizeof* sreq);
ASSERT(0 == uv_shutdown(sreq, handle, after_shutdown));
return;
}
if (nread == 0) {
/* Everything OK, but nothing read. */
free(buf->base);
return;
}
/*
* Scan for the letter Q which signals that we should quit the server.
* If we get QS it means close the stream.
*/
if (!server_closed) {
for (i = 0; i < nread; i++) {
if (buf->base[i] == 'Q') {
if (i + 1 < nread && buf->base[i + 1] == 'S') {
free(buf->base);
uv_close((uv_handle_t*)handle, on_close);
return;
} else {
uv_close(server, on_server_close);
server_closed = 1;
}
}
}
}
wr = (write_req_t*) malloc(sizeof *wr);
ASSERT(wr != NULL);
wr->buf = uv_buf_init(buf->base, nread);
if (uv_write(&wr->req, handle, &wr->buf, 1, after_write)) {
FATAL("uv_write failed");
}
}
static void on_connection(uv_stream_t* server, int status) {
uv_stream_t* stream;
int r;
if (status != 0) {
fprintf(stderr, "Connect error %s\n", uv_err_name(status));
}
ASSERT(status == 0);
switch (serverType) {
case TCP:
stream = (uv_stream_t*)malloc(sizeof(uv_tcp_t));
ASSERT(stream != NULL);
r = uv_tcp_init(loop, (uv_tcp_t*)stream);
ASSERT(r == 0);
break;
case PIPE:
stream = (uv_stream_t*)malloc(sizeof(uv_pipe_t));
ASSERT(stream != NULL);
r = uv_pipe_init(loop, (uv_pipe_t*)stream, 0);
ASSERT(r == 0);
break;
default:
ASSERT(0 && "Bad serverType");
abort();
}
/* associate server with stream */
stream->data = server;
r = uv_accept(server, stream);
ASSERT(r == 0);
r = uv_read_start(stream, echo_alloc, after_read);
ASSERT(r == 0);
}
static int pipe_echo_start(char* pipeName) {
int r;
#ifndef _WIN32
{
uv_fs_t req;
uv_fs_unlink(NULL, &req, pipeName, NULL);
uv_fs_req_cleanup(&req);
}
#endif
server = (uv_handle_t*)&pipeServer;
serverType = PIPE;
r = uv_pipe_init(loop, &pipeServer, 0);
if (r) {
fprintf(stderr, "uv_pipe_init: %s\n", uv_strerror(r));
return 1;
}
r = uv_pipe_bind(&pipeServer, pipeName);
if (r) {
fprintf(stderr, "uv_pipe_bind: %s\n", uv_strerror(r));
return 1;
}
r = uv_listen((uv_stream_t*)&pipeServer, SOMAXCONN, on_connection);
if (r) {
fprintf(stderr, "uv_pipe_listen: %s\n", uv_strerror(r));
return 1;
}
return 0;
}
int main()
{
loop = uv_default_loop();
if (pipe_echo_start(TEST_PIPENAME))
return 1;
//notify_parent_process();
uv_run(loop, UV_RUN_DEFAULT);
return 0;
}
#include "uv.h"
#include "task.h"
union handles {
uv_handle_t handle;
uv_stream_t stream;
uv_pipe_t pipe;
uv_tcp_t tcp;
uv_tty_t tty;
};
struct test_ctx {
uv_pipe_t channel;
uv_connect_t connect_req;
uv_write_t write_req;
uv_write_t write_req2;
uv_handle_type expected_type;
union handles send;
union handles send2;
union handles recv;
union handles recv2;
};
struct echo_ctx {
uv_pipe_t listen;
uv_pipe_t channel;
uv_write_t write_req;
uv_write_t write_req2;
uv_handle_type expected_type;
union handles recv;
union handles recv2;
};
typedef struct {
uv_write_t req;
uv_buf_t buf;
} write_req_t;
static struct test_ctx ctx;
static struct echo_ctx ctx2;
static int is_in_process;
static int read_cb_count;
static int write2_cb_called;
static int is_child_process;
static int recv_cb_count;
static void alloc_cb(uv_handle_t* handle,
size_t suggested_size,
uv_buf_t* buf) {
/* we're not actually reading anything so a small buffer is okay */
#if 0
static char slab[8];
buf->base = slab;
buf->len = sizeof(slab);
#else
buf->base = (char*)malloc(suggested_size);
buf->len = suggested_size;
#endif
}
static void write2_cb(uv_write_t* req, int status) {
ASSERT(status == 0);
/* After two successful writes in the child process, allow the child
* process to be closed. */
if (++write2_cb_called == 2 && (is_child_process || is_in_process)) {
uv_close(&ctx2.recv.handle, NULL);
uv_close(&ctx2.recv2.handle, NULL);
uv_close((uv_handle_t*)&ctx2.channel, NULL);
uv_close((uv_handle_t*)&ctx2.listen, NULL);
}
}
static void read_cb(uv_stream_t* handle,
ssize_t nread,
const uv_buf_t* rdbuf) {
uv_buf_t wrbuf;
uv_pipe_t* pipe;
uv_handle_type pending;
int r;
union handles* recv;
uv_write_t* write_req;
if (nread == UV_EOF || nread == UV_ECONNABORTED) {
return;
}
pipe = (uv_pipe_t*) handle;
do {
if (++read_cb_count == 2) {
recv = &ctx2.recv;
write_req = &ctx2.write_req;
} else {
recv = &ctx2.recv2;
write_req = &ctx2.write_req2;
}
ASSERT(pipe == &ctx2.channel);
ASSERT(nread >= 0);
ASSERT(uv_pipe_pending_count(pipe) > 0);
pending = uv_pipe_pending_type(pipe);
ASSERT(pending == UV_NAMED_PIPE || pending == UV_TCP);
if (pending == UV_NAMED_PIPE)
r = uv_pipe_init(ctx2.channel.loop, &recv->pipe, 0);
else if (pending == UV_TCP)
r = uv_tcp_init(ctx2.channel.loop, &recv->tcp);
else
abort();
ASSERT(r == 0);
r = uv_accept(handle, &recv->stream);
ASSERT(r == 0);
wrbuf = uv_buf_init(".", 1);
r = uv_write2(write_req,
(uv_stream_t*)&ctx2.channel,
&wrbuf,
1,
&recv->stream,
write2_cb);
ASSERT(r == 0);
} while (uv_pipe_pending_count(pipe) > 0);
}
static void send_recv_start(void) {
int r;
ASSERT(1 == uv_is_readable((uv_stream_t*)&ctx2.channel));
ASSERT(1 == uv_is_writable((uv_stream_t*)&ctx2.channel));
ASSERT(0 == uv_is_closing((uv_handle_t*)&ctx2.channel));
r = uv_read_start((uv_stream_t*)&ctx2.channel, alloc_cb, read_cb);
ASSERT(r == 0);
}
static void listen_cb(uv_stream_t* handle, int status) {
int r;
ASSERT(handle == (uv_stream_t*)&ctx2.listen);
ASSERT(status == 0);
r = uv_accept((uv_stream_t*)&ctx2.listen, (uv_stream_t*)&ctx2.channel);
ASSERT(r == 0);
send_recv_start();
}
int run_ipc_send_recv_helper(uv_loop_t* loop, int inprocess) {
int r;
is_in_process = inprocess;
memset(&ctx2, 0, sizeof(ctx2));
r = uv_pipe_init(loop, &ctx2.listen, 0);
ASSERT(r == 0);
r = uv_pipe_init(loop, &ctx2.channel, 1);
ASSERT(r == 0);
if (inprocess) {
r = uv_pipe_bind(&ctx2.listen, TEST_PIPENAME_3);
ASSERT(r == 0);
r = uv_listen((uv_stream_t*)&ctx2.listen, SOMAXCONN, listen_cb);
ASSERT(r == 0);
} else {
r = uv_pipe_open(&ctx2.channel, 0);
ASSERT(r == 0);
send_recv_start();
}
//notify_parent_process();
r = uv_run(loop, UV_RUN_DEFAULT);
ASSERT(r == 0);
return 0;
}
void ipc_send_recv_helper_threadproc(void* arg) {
int r;
uv_loop_t loop;
r = uv_loop_init(&loop);
ASSERT(r == 0);
r = run_ipc_send_recv_helper(&loop, 1);
ASSERT(r == 0);
r = uv_loop_close(&loop);
ASSERT(r == 0);
}
static void on_close(uv_handle_t* peer) {
free(peer);
}
static void after_shutdown(uv_shutdown_t* req, int status) {
uv_close((uv_handle_t*) req->handle, on_close);
free(req);
}
static void after_write(uv_write_t* req, int status) {
write_req_t* wr;
/* Free the read/write buffer and the request */
wr = (write_req_t*) req;
free(wr->buf.base);
free(wr);
if (status == 0)
return;
fprintf(stderr,
"uv_write error: %s - %s\n",
uv_err_name(status),
uv_strerror(status));
}
static void recv_cb(uv_stream_t* handle,
ssize_t nread,
const uv_buf_t* buf) {
uv_handle_type pending;
uv_pipe_t* pipe;
int r;
union handles* recv;
uv_shutdown_t* sreq;
write_req_t *wr;
pipe = (uv_pipe_t*) handle;
ASSERT(pipe == &ctx.channel);
{
if (nread < 0) {
printf("nread: %d vs %d\n", nread, UV_EOF);
free(buf->base);
sreq = (uv_shutdown_t*)malloc(sizeof* sreq);
ASSERT(0 == uv_shutdown(sreq, handle, after_shutdown));
return;
}
else if (nread == 0) {
/* Everything OK, but nothing read. */
free(buf->base);
return;
}
else {
ASSERT(nread > 0);
printf("buf[%d]: %s\n", nread, buf->base);
//handle
wr = (write_req_t*) malloc(sizeof *wr);
ASSERT(wr != NULL);
wr->buf = uv_buf_init(buf->base, nread);
if (uv_write(&wr->req, handle, &wr->buf, 1, after_write)) {
FATAL("uv_write failed");
}
// free(buf->base);
}
}
/* Close after two writes received */
if (recv_cb_count == 2) {
uv_close((uv_handle_t*)&ctx.channel, NULL);
}
}
static void connect_cb(uv_connect_t* req, int status) {
int r;
uv_buf_t buf;
ASSERT(req == &ctx.connect_req);
ASSERT(status == 0);
buf = uv_buf_init(".", 1);
#if 1
r = uv_write2(&ctx.write_req,
(uv_stream_t*)&ctx.channel,
&buf, 1,
//&ctx.send.stream,
(uv_stream_t*)&ctx.channel,
NULL);
#else
int ret=uv_write(wr,(uv_stream_t*)req->handle,&buf,1,write_server_cb);
if(ret<0){
cout<<"Write error..."<<endl;
}
#endif
ASSERT(r == 0);
/* Perform two writes to the same pipe to make sure that on Windows we are
* not running into issue 505:
* https://github.com/libuv/libuv/issues/505 */
#if 0
buf = uv_buf_init(".", 1);
r = uv_write2(&ctx.write_req2,
(uv_stream_t*)&ctx.channel,
&buf, 1,
&ctx.send2.stream,
NULL);
ASSERT(r == 0);
#endif
r = uv_read_start((uv_stream_t*)&ctx.channel, alloc_cb, recv_cb);
ASSERT(r == 0);
}
#if 0
void spawn_helper(uv_pipe_t* channel,
uv_process_t* process,
const char* helper) {
uv_process_options_t options;
size_t exepath_size;
char exepath[1024];
char* args[3];
int r;
uv_stdio_container_t stdio[3];
r = uv_pipe_init(uv_default_loop(), channel, 1);
ASSERT_EQ(r, 0);
ASSERT_NE(channel->ipc, 0);
exepath_size = sizeof(exepath);
r = uv_exepath(exepath, &exepath_size);
ASSERT_EQ(r, 0);
exepath[exepath_size] = '\0';
args[0] = exepath;
args[1] = (char*)helper;
args[2] = NULL;
memset(&options, 0, sizeof(options));
options.file = exepath;
options.args = args;
options.exit_cb = exit_cb;
options.stdio = stdio;
options.stdio_count = ARRAY_SIZE(stdio);
stdio[0].flags = UV_CREATE_PIPE | UV_READABLE_PIPE | UV_WRITABLE_PIPE;
stdio[0].data.stream = (uv_stream_t*) channel;
stdio[1].flags = UV_INHERIT_FD;
stdio[1].data.fd = 1;
stdio[2].flags = UV_INHERIT_FD;
stdio[2].data.fd = 2;
r = uv_spawn(uv_default_loop(), process, &options);
ASSERT_EQ(r, 0);
}
#endif
static int run_test(int inprocess) {
uv_process_t process;
uv_thread_t tid;
int r = 0;
if (inprocess) {
// r = uv_thread_create(&tid, ipc_send_recv_helper_threadproc, (void *) 42);
ASSERT(r == 0);
uv_sleep(1000);
r = uv_pipe_init(uv_default_loop(), &ctx.channel, 1);
ASSERT(r == 0);
uv_pipe_connect(&ctx.connect_req, &ctx.channel, TEST_PIPENAME, connect_cb);
} else {
//spawn_helper(&ctx.channel, &process, "ipc_send_recv_helper");
connect_cb(&ctx.connect_req, 0);
}
r = uv_run(uv_default_loop(), UV_RUN_DEFAULT);
ASSERT(r == 0);
ASSERT(recv_cb_count == 2);
if (inprocess) {
r = uv_thread_join(&tid);
ASSERT(r == 0);
}
return 0;
}
static int run_ipc_send_recv_pipe(int inprocess) {
int r;
ctx.expected_type = UV_NAMED_PIPE;
r = uv_pipe_init(uv_default_loop(), &ctx.send.pipe, 1);
ASSERT(r == 0);
//r = uv_pipe_bind(&ctx.send.pipe, TEST_PIPENAME); //server已绑定
ASSERT(r == 0);
r = uv_pipe_init(uv_default_loop(), &ctx.send2.pipe, 1);
ASSERT(r == 0);
//r = uv_pipe_bind(&ctx.send2.pipe, TEST_PIPENAME_2);
ASSERT(r == 0);
r = run_test(inprocess);
ASSERT(r == 0);
MAKE_VALGRIND_HAPPY();
return 0;
}
int main()
{
run_ipc_send_recv_pipe(1);
run_ipc_send_recv_pipe(1);
return 0;
}
nodejs
https://cloud.tencent.com/developer/ask/81308
var net = require('net');
var L = console.log;
var PIPE_PATH = "/tmp/uv-test-sock";
fs.unlink(PIPE_PATH,function(error){
if(error){
console.log(error);
return false;
}
console.log('删除文件成功');
})
var server = net.createServer(function(stream) {
L('Server: on connection')
stream.on('data', function(c) {
L('Server: on data:', c.toString());
stream.write('Take it easy!');
});
stream.on('end', function() {
L('Server: on end')
server.close();
});
stream.write('Take it easy!');
});
server.on('close',function(){
L('Server: on close');
})
server.listen(PIPE_PATH,function(){
L('Server: on listening');
})