2023/4/14

这两个C程序展示了如何使用mkfifo创建管道并进行读写操作。mkfifo_write.c程序不断从标准输入读取数据,直到遇到quit命令,然后将其写入管道。mkfifo_read.c程序则从管道中读取数据并打印,同样在遇到quit时结束。
摘要由CSDN通过智能技术生成

 mkfifo_write.c

#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <math.h>
#include <unistd.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <errno.h>

int main(int argc, const char *argv[])
{

	int fd ;
	char ch[1024] = ""  ;

	if(argc != 2)
	{
		fprintf(stderr,"Usage:%s fifo\n",argv[0]);
		return -1;
	}

	fd = open(argv[1],O_RDWR);

	while(1)
	{
		fgets(ch,sizeof(ch),stdin);
		write(fd,ch,sizeof(ch));

		if(strncmp(ch,"quit",4) == 0)
		{
			exit(0);
		}
	}

	return 0;
}

mkfifo_read.c

#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <math.h>
#include <unistd.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <errno.h>

int main(int argc, const char *argv[])
{

	int fd ;
	char ch[1024] = ""  ;
	size_t size = 0;

	if(argc != 2)
	{
		fprintf(stderr,"Usage:%s fifo\n",argv[0]);
		return -1;
	}

	fd = open(argv[1],O_RDWR);

	while(1)
	{
		//fgets(ch,sizeof(ch),stdin);
	//	write(fd,ch,sizeof(ch));
		size = read(fd,ch,sizeof(ch));
		if(size == 0)
		{
			continue;
		}
		if(strncmp(ch,"quit",4) == 0)
		{
			exit(0);
		}
		printf("Read: %s\n",ch);
	}

	return 0;
}

以下是使用Python实现ID3算法的代码: ```python import pandas as pd import numpy as np import math # 定义一个节点类 class Node: def __init__(self, col=None, value=None, result=None, left=None, right=None): self.col = col # 切分的列 self.value = value # 切分的值 self.result = result # 叶节点的预测结果 self.left = left # 左子节点 self.right = right # 右子节点 # 定义一个ID3决策树类 class ID3Tree: def __init__(self, epsilon=0.1): self.epsilon = epsilon self.tree = None # 计算信息熵 def calc_entropy(self, data): n = len(data) label_counts = {} for row in data: label = row[-1] if label not in label_counts: label_counts[label] = 0 label_counts[label] += 1 entropy = 0.0 for key in label_counts: prob = float(label_counts[key]) / n entropy -= prob * math.log(prob, 2) return entropy # 划分数据集 def split_data(self, data, col, value): left_data = [] right_data = [] for row in data: if row[col] <= value: left_data.append(row) else: right_data.append(row) return left_data, right_data # 选择最优划分属性和划分值 def choose_best_feature(self, data): n_features = len(data[0]) - 1 base_entropy = self.calc_entropy(data) best_info_gain = 0.0 best_feature = -1 best_value = None for col in range(n_features): col_values = [row[col] for row in data] unique_values = set(col_values) for value in unique_values: sub_data_left, sub_data_right = self.split_data(data, col, value) prob_left = len(sub_data_left) / float(len(data)) new_entropy = prob_left * self.calc_entropy(sub_data_left) + (1 - prob_left) * self.calc_entropy( sub_data_right) info_gain = base_entropy - new_entropy if info_gain > best_info_gain: best_info_gain = info_gain best_feature = col best_value = value return best_feature, best_value # 构建决策树 def build_tree(self, data): y = [row[-1] for row in data] # 如果所有的标签都相同,返回叶节点 if len(set(y)) == 1: return Node(result=y[0]) # 如果没有特征可用来划分数据,返回叶节点,标记为最常见的标签 if len(data[0]) == 1: return Node(result=max(set(y), key=y.count)) # 选择最优划分属性和划分值 best_feature, best_value = self.choose_best_feature(data) # 如果最优划分属性的信息增益小于阈值epsilon,返回叶节点,标记为最常见的标签 base_entropy = self.calc_entropy(data) sub_data_left, sub_data_right = self.split_data(data, best_feature, best_value) prob_left = len(sub_data_left) / float(len(data)) new_entropy = prob_left * self.calc_entropy(sub_data_left) + (1 - prob_left) * self.calc_entropy(sub_data_right) if base_entropy - new_entropy < self.epsilon: return Node(result=max(set(y), key=y.count)) # 构建当前节点 node = Node(col=best_feature, value=best_value) # 递归构建左子树和右子树 node.left = self.build_tree(sub_data_left) node.right = self.build_tree(sub_data_right) return node # 训练模型 def fit(self, X, y): data = np.concatenate((X, y.reshape(-1, 1)), axis=1) self.tree = self.build_tree(data) # 预测单个样本 def predict_one(self, node, row): if node.result is not None: return node.result if row[node.col] <= node.value: return self.predict_one(node.left, row) else: return self.predict_one(node.right, row) # 预测多个样本 def predict(self, X): y_pred = [] for row in X: y_pred.append(self.predict_one(self.tree, row)) return np.array(y_pred) ``` 使用示例: ```python # 读取数据 data = pd.read_csv('b.csv') X = data.iloc[:, 2:4].values y = data.iloc[:, -2].values # 训练模型 tree = ID3Tree() tree.fit(X, y) # 预测 y_pred = tree.predict(X) print(y_pred) ```
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值