FastDFS源码阅读笔记(二)

().storage_service.c

//上传文件完成后的回调函数

static void storage_upload_file_done_callback(struct fast_task_info *pTask,const int err_no)

{

    StorageClientInfo *pClientInfo;

    StorageFileContext *pFileContext;

    TrackerHeader *pHeader;

    int result;

    pClientInfo = (StorageClientInfo *)pTask->arg;

    pFileContext =  &(pClientInfo->file_context);

    if (err_no == 0)

    {

        //检查文件是否写重复,然后在修改文件名

        result = storage_service_upload_file_done(pTask);

        if (result == 0)

        {

            if (pFileContext->create_flag & STORAGE_CREATE_FLAG_FILE)

            {

                result = storage_binlog_write(/

                    pFileContext->timestamp2log, /

                    STORAGE_OP_TYPE_SOURCE_CREATE_FILE, /

                    pFileContext->fname2log);

            }

        }

    }

    else

    {

        result = err_no;

    }

 

    if (result == 0)

    {

        int filename_len;

        char *p;

 

        if (pFileContext->create_flag & STORAGE_CREATE_FLAG_FILE)

        {

            //total_upload_count,success_upload_count自增加

            CHECK_AND_WRITE_TO_STAT_FILE3( /

                g_storage_stat.total_upload_count, /

                g_storage_stat.success_upload_count, /

                g_storage_stat.last_source_update)

        }

        //组返回包,返回组名,文件名

        filename_len = strlen(pFileContext->fname2log);

        //重新设置了返回包的总长度

        pClientInfo->total_length = sizeof(TrackerHeader) + FDFS_GROUP_NAME_MAX_LEN + filename_len;

        p = pTask->data + sizeof(TrackerHeader);

        memcpy(p, g_group_name, FDFS_GROUP_NAME_MAX_LEN);

        p += FDFS_GROUP_NAME_MAX_LEN;

        memcpy(p, pFileContext->fname2log, filename_len);

    }

    else

    {

        pthread_mutex_lock(&stat_count_thread_lock);

        if (pFileContext->create_flag & STORAGE_CREATE_FLAG_FILE)

        {

            g_storage_stat.total_upload_count++;  //否则如果上传失败,上传总数加

        }

        pthread_mutex_unlock(&stat_count_thread_lock);

        pClientInfo->total_length = sizeof(TrackerHeader);

    }

    pClientInfo->total_offset = 0;  //total_offset被重新设置为

    pTask->length = pClientInfo->total_length;

    pHeader = (TrackerHeader *)pTask->data;

    pHeader->status = result;

    pHeader->cmd = STORAGE_PROTO_CMD_RESP;

    long2buff(pClientInfo->total_length - sizeof(TrackerHeader),pHeader->pkg_len);

    //在次触发事件void storage_recv_notify_read()而在接收最后一个包时pClientInfo->stage = FDFS_STORAGE_STAGE_NIO_SEND

    //已经更改了这个状态值,因此在该函数里面,直接去调用storage_send_add_event(pTask)函数,发送响应包

    storage_nio_notify(pTask); 

}

 

//数据服务器服务初始化

int storage_service_init()

{

    int result;

    struct storage_nio_thread_data *pThreadData;

    struct storage_nio_thread_data *pDataEnd;

    pthread_t tid;

    pthread_attr_t thread_attr;

 

    if ((result=init_pthread_lock(&g_storage_thread_lock)) != 0)

    {

        return result;

    }

    if ((result=init_pthread_lock(&path_index_thread_lock)) != 0)

    {

        return result;

    }

    if ((result=init_pthread_lock(&stat_count_thread_lock)) != 0)

    {

        return result;

    }

    if ((result=init_pthread_attr(&thread_attr, g_thread_stack_size)) != 0)

    {

        logError("file: "__FILE__", line: %d, " /

            "init_pthread_attr fail, program exit!", __LINE__);

        return result;

    }

    //StorageClientInfo参数所以说在任务Task结构里面用void * 参数来代替

    //g_buff_size缺省是K

    if ((result=free_queue_init(g_max_connections, g_buff_size, /

        g_buff_size, sizeof(StorageClientInfo))) != 0) 

    {

        return result;

    }

    //分配工作线程空间

    //需要注意的是socket工作线程跟磁盘io线程的是分开的

    g_nio_thread_data = (struct storage_nio_thread_data *)malloc(sizeof( /

    struct storage_nio_thread_data) * g_work_threads);

    if (g_nio_thread_data == NULL)

    {

        logError("file: "__FILE__", line: %d, " /

            "malloc %d bytes fail, errno: %d, error info: %s", /

            __LINE__, (int)sizeof(struct storage_nio_thread_data) * /

            g_work_threads, errno, strerror(errno));

        return errno != 0 ? errno : ENOMEM;

    }

    g_storage_thread_count = 0;

    pDataEnd = g_nio_thread_data + g_work_threads;

    for (pThreadData=g_nio_thread_data; pThreadData<pDataEnd; pThreadData++)

    {

        pThreadData->ev_base = event_base_new();

        if (pThreadData->ev_base == NULL)

        {

            result = errno != 0 ? errno : ENOMEM;

            logError("file: "__FILE__", line: %d, " /

                "event_base_new fail.", __LINE__);

            return result;

        }

        if (pipe(pThreadData->pipe_fds) != 0)

        {

            result = errno != 0 ? errno : EPERM;

            logError("file: "__FILE__", line: %d, " /

                "call pipe fail, " /

                "errno: %d, error info: %s", /

                __LINE__, result, strerror(result));

            break;

        }

        if ((result=set_nonblock(pThreadData->pipe_fds[0])) != 0)

        {

            break;

        }

        //创建线程

        if ((result=pthread_create(&tid, &thread_attr,work_thread_entrance, pThreadData)) != 0)

        {

            logError("file: "__FILE__", line: %d, " /

                "create thread failed, startup threads: %d, " /

                "errno: %d, error info: %s", /

                __LINE__, g_storage_thread_count, /

                result, strerror(result));

            break;

        }

        else

        {

            if ((result=pthread_mutex_lock(&g_storage_thread_lock)) != 0)

            {

                logError("file: "__FILE__", line: %d, " /

                    "call pthread_mutex_lock fail, " /

                    "errno: %d, error info: %s", /

                    __LINE__, result, strerror(result));

            }

            g_storage_thread_count++;

            if ((result=pthread_mutex_unlock(&g_storage_thread_lock)) != 0)

            {

                logError("file: "__FILE__", line: %d, " /

                    "call pthread_mutex_lock fail, " /

                    "errno: %d, error info: %s", /

                    __LINE__, result, strerror(result));

            }

        }

    }

    pthread_attr_destroy(&thread_attr);

    last_stat_change_count = g_stat_change_count;

    return result;

}

 

//数据服务器作为客户端监听socket请求

//具体的实现流程是:

//1.创建工作线程,每个在工作线程里面,绑定socket读写事件

//2.数据服务器服务端监听客户端请求,接收到请求后,从任务队列表取一个任务(从头部取),然后

//  pClientInfo->stage = FDFS_STORAGE_STAGE_NIO_INIT;

//  pClientInfo->nio_thread_index = pClientInfo->sock % g_work_threads;//工作线程取余数

//  pThreadData = g_nio_thread_data + pClientInfo->nio_thread_index;   //确定是哪一个工作线程

//  write(pThreadData->pipe_fds[1], &task_addr, sizeof(task_addr))     //向那一个工作线程写入事件(其实是写的任务)

//3.然后触发socket读的事件,注意读的也是Task任务

void storage_accept_loop(int server_sock)

{

    int incomesock;

    struct sockaddr_in inaddr;

    unsigned int sockaddr_len;

    in_addr_t client_addr;

    char szClientIp[IP_ADDRESS_SIZE];

    long task_addr;

    struct fast_task_info *pTask;

    StorageClientInfo *pClientInfo;

    struct storage_nio_thread_data *pThreadData;

    while (g_continue_flag)

    {

        sockaddr_len = sizeof(inaddr);

        incomesock = accept(server_sock, (struct sockaddr*)&inaddr, /

            &sockaddr_len);

        if (incomesock < 0) //error

        {

            if (!(errno == EINTR || errno == EAGAIN))

            {

                logError("file: "__FILE__", line: %d, " /

                    "accept failed, " /

                    "errno: %d, error info: %s", /

                    __LINE__, errno, strerror(errno));

            }

 

            continue;

        }

        client_addr = getPeerIpaddr(incomesock, szClientIp, IP_ADDRESS_SIZE);

        //判断是否允许在可连接的地址列表里面

        if (g_allow_ip_count >= 0)

        {

            if (bsearch(&client_addr, g_allow_ip_addrs, /

                g_allow_ip_count, sizeof(in_addr_t), /

                cmp_by_ip_addr_t) == NULL)

            {

                logError("file: "__FILE__", line: %d, " /

                    "ip addr %s is not allowed to access", /

                    __LINE__, szClientIp);

 

                close(incomesock);

                continue;

            }

        }

 

        if (tcpsetnonblockopt(incomesock) != 0)

        {

            close(incomesock);

            continue;

        }

        pTask = free_queue_pop();

        if (pTask == NULL)

        {

            logError("file: "__FILE__", line: %d, " /

                "malloc task buff failed", /

                __LINE__);

            close(incomesock);

            continue;

        }

        //在这里对pTask->arg的参数进行了赋值

        pClientInfo = (StorageClientInfo *)pTask->arg;

        pClientInfo->sock = incomesock;

        pClientInfo->stage = FDFS_STORAGE_STAGE_NIO_INIT;

        pClientInfo->nio_thread_index = pClientInfo->sock % g_work_threads;//工作线程取余数:一个nio,一个dio

        pThreadData = g_nio_thread_data + pClientInfo->nio_thread_index;

        strcpy(pTask->client_ip, szClientIp);

        strcpy(pClientInfo->tracker_client_ip, szClientIp);

        //使用的这种方法

        task_addr = (long)pTask;

        if (write(pThreadData->pipe_fds[1], &task_addr, /

            sizeof(task_addr)) != sizeof(task_addr))

        {

            close(incomesock);

            free_queue_push(pTask);

            logError("file: "__FILE__", line: %d, " /

                "call write failed, " /

                "errno: %d, error info: %s", /

                __LINE__, errno, strerror(errno));

        }

    }

}

 

void storage_nio_notify(struct fast_task_info *pTask)

{

    StorageClientInfo *pClientInfo;

    struct storage_nio_thread_data *pThreadData;

    long task_addr;

    pClientInfo = (StorageClientInfo *)pTask->arg;

    //去取工作线程,在写入客户端socket

    pThreadData = g_nio_thread_data + pClientInfo->nio_thread_index;

    task_addr = (long)pTask;

    if (write(pThreadData->pipe_fds[1], &task_addr, /

        sizeof(task_addr)) != sizeof(task_addr))

    {

        logError("file: "__FILE__", line: %d, " /

            "call write failed, " /

            "errno: %d, error info: %s", /

            __LINE__, errno, strerror(errno));

        task_finish_clean_up(pTask);

  • 0
    点赞
  • 2
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
Windows下使用FastDFS需要进行以下步骤: 1. 安装FastDFS依赖的软件: - 安装C语言编译器,如MinGW、Cygwin或者Visual Studio等。 - 安装libevent库,可以从官网下载源码编译安装,也可以从第三方网站下载预编译的版本。 - 安装libfastcommon库,可以从GitHub上下载源码编译安装。 2. 下载FastDFS源码: - 从FastDFS官方网站下载最新版本的源码压缩包,解压到本地。 3. 编译FastDFS: - 打开命令行界面(如cmd),进入FastDFS源码目录。 - 执行命令`make`编译FastDFS。 - 执行命令`make install`安装FastDFS。 4. 配置FastDFS: - 进入FastDFS安装目录,编辑`tracker.conf`和`storage.conf`配置文件。 - 根据实际需求修改配置参数,如Tracker服务器IP、端口等。 - 配置存储节点的IP、端口、存储路径等信息。 - 保存配置文件并退出。 5. 启动FastDFS: - 执行命令`trackerd start`启动Tracker服务器。 - 执行命令`storaged start`启动存储服务器。 6. 测试FastDFS: - 使用FastDFS提供的命令行工具或者API进行文件上传、下载等操作,验证FastDFS是否正常运行。 请注意,FastDFS是一个基于Linux的分布式文件系统,官方并没有提供Windows下的官方支持。上述步骤是通过在Windows环境下安装依赖库以及编译源码来实现的。在Windows环境下使用FastDFS可能会遇到一些兼容性或者功能限制的问题,建议在Linux环境下部署和使用FastDFS以获得更好的稳定性和性能。

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值