用C++自己写一个rm命令替换系统rm,执行mv * /Users/xxx/.Transh(废纸篓)

rm xxx

mv xxx ~/.Trash

rm命令简单但伤害性极大,现在用C++自己写一个rm命令执行mv * /Users/xxx/.Transh(废纸篓),给自己一碗回头药。

1、源码:

#include <iostream>
#include <regex>
#include <string>
#include <ctime>
using namespace std;
#define BUF_SIZE 1024  
#define flag regex_constants::ECMAScript
bool exec(const char* cmd,bool blog){
	int retcode(system(cmd));
	if(retcode==0) return true;
	else{
		// cout<<cmd<<" return errcode: "<<retcode<<endl;
		return false;
	}
	if(false){
		/*
		FILE * p_file = NULL;  
		p_file = popen(cmd, "r");
		if (!p_file) {  
			fprintf(stderr, "sh error, check rm source code! ! !");  
			return false;
		}
		regex reg("No such file or directory",flag);
		char buf[BUF_SIZE];
		bool bReturn(false);
		char* res=fgets(buf, BUF_SIZE, p_file);
		// mac上不论popen()执行成功与否,此处调用fgets()会引发“[1]    21015 segmentation fault”错误
		cout<<res<<endl;
		while (res != NULL) {
			// https://pubs.opengroup.org/onlinepubs/9699919799/functions/popen.html
			bReturn=true;
			if(blog) 
				cout<<buf<<endl;
			if(regex_search(buf,reg)){
				return false;
			}
			res=fgets(buf, BUF_SIZE, p_file);
		}
		if(! bReturn) return false;
		pclose(p_file);
		return true;
		*/
	}
	
};
int main(int argc,char* argv[]){
    if(argc<=1){
        cout<<"please input a valid file!"<<endl;
		return 1;
    }
	int iCountSuc(-1),iCountFail(-1);
	string cmdsls[argc-1],cmdsmv[argc-1],errs[argc-1];
	char mv[BUF_SIZE],desti[BUF_SIZE],fail[BUF_SIZE];
	strcpy(mv,"mv ");
	strcpy(desti," /Users/haypin/.Trash");
	strcpy(fail," FAILED! ! !");
	char chard[3],charf[3],chari[3],charP[3];
	char charR[3],charr[3],charv[3],charW[3];
	strcpy(chard,"-d");
	strcpy(charf,"-f");
	strcpy(chari,"-i");
	strcpy(charP,"-P");
	strcpy(charR,"-R");
	strcpy(charr,"-r");
	strcpy(charv,"-v");
	strcpy(charW,"-W");
	regex regd(chard,flag);	// 去除,mv直接移动目录
	regex regf(charf,flag);	// 保留,与mv -f作用相同
	regex regi(chari,flag);	// 保留,与mv -i作用相似
	regex regP(charP,flag);	// 去除
	regex regR(charR,flag);	// 去除,mv直接移动目录
	regex regr(charr,flag);	// 去除,等价于-R
	regex regv(charv,flag);	// 保留,与mv -v作用相同
	regex regW(charW,flag);	// break,Attempt to undelete the named files
	regex reghyphen("--",flag);
	bool bhyphen(false);
	string mv_joint;
	mv_joint.append("mv ");
	for(int i(1);i<argc;i++){
		bhyphen=regex_search(argv[i],reghyphen);
		if(!bhyphen){	// --前的都是选项,--后的不是选项
			bool bd(regex_search(argv[i],regd));
			bool bP(regex_search(argv[i],regP));
			bool bR(regex_search(argv[i],regR));
			bool br(regex_search(argv[i],regr));
			bool bW(regex_search(argv[i],regW));
			if(bd || bP || bR || br) {
				cout<<"continue "<<argv[i]<<endl;
				continue;
			}
			if(bW) {
				cout<<"-W mean Attempt to undelete the named files, will break"<<endl;
				break;
			}
			bool bf(regex_search(argv[i],regf));
			bool bi(regex_search(argv[i],regi));
			bool bv(regex_search(argv[i],regv));
			if(bf) {
				mv_joint.append(charf);
				mv_joint.append(" ");
				continue;
			}
			if(bi) {
				mv_joint.append(chari);
				mv_joint.append(" ");
				continue;
			}
			if(bv) {
				mv_joint.append(charv);
				mv_joint.append(" ");
				continue;
			}
		}
		cmdsls[i-1].append("ls ");		// ls /Users/haypin/.Trash/argv[i],如果存在就mv argv[i] desti/argv[i]+08-10-14:23:11
		cmdsls[i-1].append(desti);
		cmdsls[i-1].append("/");
		cmdsls[i-1].append(argv[i]);

		cmdsmv[i-1].append(mv_joint);
		cmdsmv[i-1].append(argv[i]);
		cmdsmv[i-1].append(desti);
		if(exec(cmdsls[i-1].c_str(),false)){
			time_t now(time(0));
			tm *now_tm(localtime(&now));
			char buf[100];
			strcpy(buf,"/");
			cmdsmv[i-1].append(buf);
			cmdsmv[i-1].append(argv[i]);
			strcpy(buf,"\\ \\ ");
			cmdsmv[i-1].append(buf);
			sprintf(buf,"%d",now_tm->tm_mon);
			cmdsmv[i-1].append(buf);
			cmdsmv[i-1].append("-");
			sprintf(buf,"%d",now_tm->tm_mday);
			cmdsmv[i-1].append(buf);
			cmdsmv[i-1].append("-");
			sprintf(buf,"%d",now_tm->tm_hour);
			cmdsmv[i-1].append(buf);
			cmdsmv[i-1].append(":");
			sprintf(buf,"%d",now_tm->tm_min);
			cmdsmv[i-1].append(buf);
			cmdsmv[i-1].append(":");
			sprintf(buf,"%d",now_tm->tm_sec);
			cmdsmv[i-1].append(buf);
			cmdsmv[i-1].append(".");
			sprintf(buf,"%d",int(now));
			cmdsmv[i-1].append(buf);
		}
		cout<<cmdsmv[i-1]<<endl;
		if(exec(cmdsmv[i-1].c_str(),true)){
			iCountSuc +=1;
			continue;
		}
		iCountFail+=1;
		string err;
		err.append(mv);
		err.append(argv[i]);
		err.append(fail);
		errs[iCountFail]=err;
	}  
	if(iCountSuc>=0){
		for(string& imv:cmdsmv) cout<<imv<<" success1"<<endl;
	}
	if(iCountFail>=0){
		// for(string& irr:errs)cout<<irr<<endl;	// 发生错误时system(cmd)会打印标准错误的
	}
	string res;
	res.append("total ");
	char str[BUF_SIZE];
	sprintf(str, "%d", iCountSuc+1);
	res.append(str);
	res.append(" success2");
	if(iCountFail>=0 || iCountSuc<0){
		res.append(", ");
		sprintf(str, "%d", iCountFail+1);
		res.append(str);
		res.append(fail);
	}
	cout<<res<<endl;
	return 0;
}

 

2、CMakeLists.txt:

cmake_minimum_required(VERSION 3.21)
project(rrm)
set(CMAKE_CXX_STANDARD 11)
add_executable(rrm rrm.cpp)

2.5、再给个VScode的编译任务设置tasks.json:

{
	"version": "2.0.0",
	"tasks": [
		{
			"label":"cd",
			"type":"shell",
			"command":"cd",
			"args":[
				"${fileDirname}/build"
			],
			"problemMatcher":"$gcc",
		},
		{
			"label":"cmake ..",
			"type":"shell",
			"command":"cmake",
			"args":[
				// ".."
				"${fileDirname}",
			],
			
			"problemMatcher":"$gcc",
		},
		{
			"label":"cmake --build .",
			"type":"shell",
			"command":"cmake",
			"args":[
				"--build",
				// "."
				"${fileDirname}/build",
			],
			"dependsOrder":"sequence",
			"dependsOn":[
				"cd",		// VScode dependsOn依赖任务的cd <cwd>对后序任务无影响,每次任务的cwd都位于${workspaceFolder}
				"cmake .."
			],
			"problemMatcher":"$gcc",
		},
	]
}

注意到tasks.json每个任务的执行都是相互独立、不影响的,既便通过"dependsOn"设置依赖的任务列表并开启"dependsOrder"指定执行顺序。比如上面先执行"cd"任务将cwd切换到${fileDirname}/build,后执行"cmake .."命令,则"cmake .."命令的cwd并不相应地保持在${fileDirname}/build,每个任务执行的cwd都初始位于${workspaceFolder}。幸好cmake和cmake --build命令提供<cwd>选项可以直接指定工作目录,所以直接写绝对路径就可以。

这个CMakeLists.txt等价于:

mkdir build

cd build

cmake ..

camke --build .

编译出rrm可执行文件

3、查看$PATH

 haypin@MBP  ~  echo $PATH
/Users/haypin/.local/bin
:/Applications/CMake.app/Contents/bin
:/Users/haypin/Go1201/bin
:/Users/haypin/Go/bin
:/Users/haypin/miniconda3/condabin
:/Library/Frameworks/Python.framework/Versions/2.7/bin
:/usr/local/bin
:/usr/bin
:/bin
:/usr/sbin
:/sbin
:/Applications/VMware Fusion.app/Contents/Public
:/usr/local/go/bin
:/usr/local/share/dotnet
:/Library/Apple/usr/bin
:/Library/Frameworks/Mono.framework/Versions/Current/Commands
:/usr/local/mysql-8.0.21-macos10.15-x86_64/bin
 haypin@MBP  ~  which rm
/bin/rm

系统rm命令在/bin目录,那将自己写的rrm链接到$PATH比/bin更靠前的路径就可以被优先找到,我选择在/Users/haypin/.local/bin目录下链接自己写的rrm:

 haypin@MBP  ~  cd ~/.local/bin
 haypin@MBP  ~/.local/bin  ln /Users/haypin/cpp_cmake_js_java_python/rrm/build/rrm rm
 haypin@MBP  ~/.local/bin  ll rm
-rwxr-xr-x  1 haypin  staff    56K  8 10 10:29 rm

关闭当前终端后再打开一个:

 haypin@MBP  ~  which rm
/Users/haypin/.local/bin/rm

此时的rm命令就是最先找到的自己写的rrm了。

4、测试

 haypin@MBP  ~/cpp_cmake_js_java_python/rrm  touch robot
 haypin@MBP  ~/cpp_cmake_js_java_python/rrm  rm robot
mv robot /Users/haypin/.Trash success
total 1 success
 haypin@MBP  ~/cpp_cmake_js_java_python/rrm  cd ~/.Trash
 haypin@MBP  ~/.Trash  ll
total 0
-rw-r--r--  1 haypin  staff     0B  8 10 11:21 robot
 haypin@MBP  ~/.Trash 

5、尽情地rm吧,妈妈再也不怕我翻车了 

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
TransH嵌入模型是一种基于知识图谱的实体关系表示学习模型,它是在TransE模型的基础上发展而来的。下面是TransH嵌入模型的详细介绍及每一步骤的Python实现代码。 1. 模型介绍 TransH模型的主要思想是为每个关系定义一个超平面,将实体映射到该超平面上。这个超平面被称为关系空间,实体被映射到该空间中的一个点上。在关系空间中,实体之间的距离可以通过两个实体在该超平面上的投影点之间的欧氏距离来计算。TransH模型的目标是最小化实体和关系之间的距离,并将其映射到关系空间中。 2. 数据处理 在实现TransH模型之前,需要对原始数据进行处理。假设原始数据包含三元组(head,relation,tail),其中head和tail表示实体,relation表示实体之间的关系。首先,需要将每个实体和关系映射到一个唯一的数字ID上。然后,将三元组表示为数字ID的形式。最后,将数据分为三部分:训练集、验证集和测试集。 以下是Python代码实现: ```python import numpy as np # 将实体和关系映射到数字ID def get_entity_id(entity_list): entity_id = {} for entity in entity_list: if entity not in entity_id: entity_id[entity] = len(entity_id) return entity_id def get_relation_id(relation_list): relation_id = {} for relation in relation_list: if relation not in relation_id: relation_id[relation] = len(relation_id) return relation_id # 将三元组表示为数字ID的形式 def get_triple(triple_list, entity_id, relation_id): triple = [] for triple_str in triple_list: head, relation, tail = triple_str.strip().split('\t') triple.append((entity_id[head], relation_id[relation], entity_id[tail])) return np.array(triple) # 将数据分为训练集、验证集和测试集 def split_data(triple, ratio=(0.7, 0.2, 0.1)): train_ratio, valid_ratio, test_ratio = ratio train_size = int(len(triple) * train_ratio) valid_size = int(len(triple) * valid_ratio) test_size = len(triple) - train_size - valid_size train_triple = triple[:train_size] valid_triple = triple[train_size:train_size+valid_size] test_triple = triple[train_size+valid_size:] return train_triple, valid_triple, test_triple ``` 3. 模型训练 TransH模型的训练分为两个步骤:第一步是训练TransE模型,第二步是在TransE模型的基础上训练TransH模型。 3.1 训练TransE模型 TransE模型的目标是最小化三元组中实体和关系之间的距离。具体来说,对于三元组(h,r,t),TransE模型的目标是最小化以下损失函数: $ L = \sum_{(h,r,t) \in S} max(0, \gamma + d(h+r,t) - d(h,r,t)) $ 其中,S表示训练集,d表示欧氏距离,$\gamma$表示边界值,$d(h+r,t)$表示实体h与关系r的和向量与实体t的向量之间的欧氏距离,$d(h,r,t)$表示实体h、关系r和实体t之间的欧氏距离。 以下是Python代码实现: ```python import torch import torch.nn as nn import torch.nn.functional as F class TransE(nn.Module): def __init__(self, n_entity, n_relation, embedding_dim): super(TransE, self).__init__() self.entity_embedding = nn.Embedding(n_entity, embedding_dim) self.relation_embedding = nn.Embedding(n_relation, embedding_dim) def forward(self, head, relation, tail): head_embedding = self.entity_embedding(head) relation_embedding = self.relation_embedding(relation) tail_embedding = self.entity_embedding(tail) score = torch.norm(head_embedding + relation_embedding - tail_embedding, p=2, dim=1) return score def loss(self, score, margin=1.0): return torch.mean(F.relu(margin + score)) def predict(self, head, relation): head_embedding = self.entity_embedding(head) relation_embedding = self.relation_embedding(relation) tail_embedding = head_embedding + relation_embedding return tail_embedding ``` 3.2 训练TransH模型 在TransE模型的基础上,TransH模型为每个关系定义一个超平面,将实体映射到该超平面上。具体来说,在TransH模型中,每个关系r都有一个超平面$W_r$,该超平面由一个法向量$w_r$和一个点$p_r$确定。实体h和t在关系空间中的投影点分别为$h_r$和$t_r$。对于三元组(h,r,t),TransH模型的目标是最小化以下损失函数: $ L = \sum_{(h,r,t) \in S} max(0, \gamma + d(h_r + w_r,t_r) - d(h_r,t_r) + d(h+r,t) - d(h,t)) $ 其中,S表示训练集,$\gamma$表示边界值,$d(h_r + w_r,t_r)$表示实体h和关系r的超平面投影点的和向量与实体t的超平面投影点之间的欧氏距离,$d(h_r,t_r)$表示实体h和实体t在关系r的超平面上的投影点之间的欧氏距离,$d(h+r,t)$表示实体h与关系r的和向量与实体t的向量之间的欧氏距离,$d(h,t)$表示实体h和实体t之间的欧氏距离。 以下是Python代码实现: ```python class TransH(nn.Module): def __init__(self, n_entity, n_relation, embedding_dim): super(TransH, self).__init__() self.entity_embedding = nn.Embedding(n_entity, embedding_dim) self.relation_embedding = nn.Embedding(n_relation, embedding_dim) self.relation_hyperplane_normal = nn.Embedding(n_relation, embedding_dim) self.relation_hyperplane_point = nn.Embedding(n_relation, embedding_dim) def forward(self, head, relation, tail): head_embedding = self.entity_embedding(head) relation_embedding = self.relation_embedding(relation) tail_embedding = self.entity_embedding(tail) relation_hyperplane_normal = self.relation_hyperplane_normal(relation) relation_hyperplane_point = self.relation_hyperplane_point(relation) head_projection = head_embedding - torch.sum(head_embedding * relation_hyperplane_normal, dim=1, keepdims=True) * relation_hyperplane_normal tail_projection = tail_embedding - torch.sum(tail_embedding * relation_hyperplane_normal, dim=1, keepdims=True) * relation_hyperplane_normal relation_hyperplane_projection = relation_embedding - torch.sum(relation_embedding * relation_hyperplane_normal, dim=1, keepdims=True) * relation_hyperplane_normal score = torch.norm(head_projection + relation_hyperplane_projection - tail_projection, p=2, dim=1) return score def loss(self, score, margin=1.0): return torch.mean(F.relu(margin + score)) def predict(self, head, relation): head_embedding = self.entity_embedding(head) relation_embedding = self.relation_embedding(relation) relation_hyperplane_normal = self.relation_hyperplane_normal(relation) relation_hyperplane_point = self.relation_hyperplane_point(relation) head_projection = head_embedding - torch.sum(head_embedding * relation_hyperplane_normal, dim=1, keepdims=True) * relation_hyperplane_normal relation_hyperplane_projection = relation_embedding - torch.sum(relation_embedding * relation_hyperplane_normal, dim=1, keepdims=True) * relation_hyperplane_normal tail_embedding = head_projection + relation_hyperplane_projection return tail_embedding ``` 4. 模型评估 模型评估的主要目标是计算模型的预测准确率。具体来说,需要计算模型在验证集和测试集上的平均准确率(Mean Average Precision,MAP)和平均逆排名(Mean Reciprocal Rank,MRR)。 以下是Python代码实现: ```python def evaluate(model, triple, entity_id, relation_id, device): with torch.no_grad(): hits10 = 0 hits1 = 0 ranks = [] for head, relation, tail in triple: head = torch.tensor([head], dtype=torch.long).to(device) relation = torch.tensor([relation], dtype=torch.long).to(device) tail = torch.tensor([tail], dtype=torch.long).to(device) all_entity = torch.tensor(list(entity_id.values()), dtype=torch.long).to(device) all_entity = all_entity.unsqueeze(0).repeat(len(entity_id), 1) score = model(head, relation, all_entity) _, indices = torch.sort(score) indices = indices.cpu().numpy().tolist()[0] rank = indices.index(tail.item()) + 1 ranks.append(rank) if rank <= 10: hits10 += 1 if rank == 1: hits1 += 1 mrr = np.mean([1/rank for rank in ranks]) map = 0 for i, rank in enumerate(ranks): if rank <= 10: map += (i+1) / hits10 map /= len(ranks) return hits10/len(triple), hits1/len(triple), mrr, map ``` 以上就是TransH嵌入模型的详细介绍及每一步骤的Python实现代码。

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值