前几天写了Apriori算法之后效率很差,在网上查了很多资料,另外的最大模式挖掘算法一般有FP-Growth 和 Ecalt 算法,这两种效率比较高而FP-Growth算法构建FP-Tree的过程非常复杂,后来在网上找到了另外一种效率更优化且结构更简单的算法,那就是北大邓志鸿老师的PPV算法,看了论文以后觉得这确实是一个好的算法,因此打算加以实现。在实现的第一步就卡住了。在构造PPV-TREE/ FP-TREE 的第一步都是要统计每个事务的频度并且对项集中按照频度进行排序,虽然只是简单的两句话,可是当实现的时候就没有说的这么简单了。我也用了一天的时间来实现这第一步。。。。。确实有些事不动手不知道难。在如何实现的问题上也经过了很多的思考,最初打算用map 来实现,后来为了锻炼下就改用了Trie树的方式,如果数据大最好的方式应该是map/reduce的成名函数 wordCount。好了下面直接上代码.
- #ifndef TRIE_H
- #define TRIE_H
- #include <fstream>
- class trie{
- public:
- trie(std::string fname,char spl):filename(fname), sp(spl),root(NULL){}
- ~trie();
- void insert(std::string);
- int search(std::string);
- void openfile();
- std::string getFname();
- char getsp();
- private:
- struct node{
- char key;
- int count;
- node();
- node *child[26];
- };
- node *root;
- std::ifstream in;
- std::string filename;
- char sp;
- };
- #endif //end tire define
- #include <iostream>
- #include <cassert>
- #include "trie.h"
- using namespace std;
- trie::node::node()
- {
- count = 0;
- for(int i = 0; i<26; ++i)
- child[i] = NULL;
- }
- trie::~trie()
- {
- if(in)
- in.close();
- delete root;
- }
- void trie::openfile()
- {
- in.open(filename.c_str());
- string line;
- assert(in != NULL);
- while(getline(in, line)){
- string tmp;
- while(line.find(sp) != -1)
- {
- tmp = line.substr(0,line.find(sp));
- insert(tmp);
- line = line.substr(line.find(sp)+1, line.size()-1);
- }
- }
- insert(line);
- }
- //trie 树的插入过程,排除含有非英文字母的单词,并且统计单词出现的频度
- void trie::insert(string word){
- if(root == NULL)
- {
- root =new node;
- }
- int index;
- node *loc = root;
- int pos = 0;
- while(loc != NULL && word[pos] != '\0')
- {
- if(word[pos] >='A' && word[pos] <= 'Z')
- index = word[pos] - 'A';
- else if(word[pos]>= 'a' && word[pos]<='z')
- index = word[pos] - 'a';
- else
- return;
- if(loc->child[index] == NULL)
- {
- loc->child[index] = new node;
- }
- ++pos;
- loc =loc->child[index];
- }
- ++(loc->count);
- }
- string trie::getFname()
- {
- return filename;
- }
- char trie::getsp()
- {
- return sp;
- }
- //查找某个单词出现的频度,如果不存在返回-1
- int trie::search(string word)
- {
- node *loc = root;
- assert(root != NULL);
- int index = 0;
- while(loc && word[index] != '\0')
- {
- int pos;
- if(word[index]>='A' && word[index]<='Z')
- pos = word[index] - 'A';
- else if(word[index]>='a' && word[index]<='z')
- pos = word[index] - 'a';
- else
- return -1;
- loc = loc->child[pos];
- ++index;
- }
- if(loc ==NULL)
- return -1;
- return loc->count;
- }
- #ifndef SORT_DATA
- #define SORT_DATA
- #include "trie.h"
- #include <vector>
- #include <fstream>
- class sort_data{
- public:
- sort_data(trie *itree, int sup):tree(itree),support(sup){filename = itree->getFname(); split = itree->getsp();}
- ~sort_data();
- void openfile();
- void count_data();
- private:
- void sorts();
- int partion(int *a, int *b,int begin, int end);
- void quicksort(int *a, int *b, int begin,int end);
- std::vector<std::string> spl;
- trie *tree;
- std::string filename;
- std::ifstream in;
- std::ofstream out;
- int support;
- char split;
- };
- #endif //end define
- // 整体功能是对一个文本中单词进行词频统计,并且对该文本的每行进行排序,如果有相同单词则合并为一个
- // 如果有两个单词词频相同那么按照字典序进行排序。输出文件名称为输入文件名字+你定义的support
- #include "sort_data.h"
- #include "trie.h"
- #include <iostream>
- #include <cassert>
- #include <sstream>
- #include <map>
- using namespace std;
- sort_data::~sort_data()
- {
- in.close();
- out.close();
- // delete tree;
- }
- void sort_data::openfile()
- {
- in.open(filename.c_str());
- assert(in != NULL);
- stringstream ss;
- ss<<support;
- string tmp = filename + ss.str();
- out.open(tmp.c_str());
- }
- //将文本的每一行进行拆分,将每个单词放到vector<string> spl 中
- void sort_data::count_data()
- {
- string line;
- while(getline(in,line))
- {
- spl.clear();
- string tmp;
- while(line.find(split) != -1)
- {
- tmp = line.substr(0,line.find(split));
- spl.push_back(tmp);
- line = line.substr(line.find(split)+1,line.size()-1);
- }
- spl.push_back(line);
- sorts();
- }
- }
- //对排序后的单词进行去重和按照字典序排列的过程,使用了map<string, int>
- void sort_data::sorts()
- {
- int n = spl.size();
- int a[n];
- int tmp = 0;
- for(int i = 0;i<n; ++i)
- {
- if(tree->search(spl[i])>=support)
- {
- a[tmp] = i;
- ++tmp;
- }
- }
- int b[tmp];
- int c[tmp];
- for(int k = 0; k<tmp;++k)
- {
- c[k] = k;
- }
- for(int j = 0; j<tmp; ++j)
- {
- int in = a[j];
- b[j] = tree->search(spl[in]);
- }
- quicksort(b,c,0,tmp-1);
- for(int i = 0; i<tmp; ++i)
- {
- int j = c[i];
- j = a[j];
- }
- map<string, int> qop;
- string line;
- int cp = 0;
- if(tmp == 1)
- {
- int i = a[0];
- line = spl[i]+'\n';
- }
- else{
- while(cp<tmp){
- int i;
- if(cp+1 == tmp)
- {
- i = c[cp];
- i = a[i];
- line =line +spl[i]+'\n';
- break;
- }
- if(b[cp]>b[cp+1])
- {
- i = c[cp];
- i = a[i];
- line = line+spl[i]+' ';
- ++cp;
- }
- else
- {
- while(cp+1 != tmp && (b[cp] == b[cp+1]))
- {
- i = c[cp];
- i = a[i];
- string s = spl[i];
- ++qop[s];
- ++cp;
- }
- i = c[cp];
- i = a[i];
- string s =spl[i];
- ++qop[s];
- ++cp;
- for(map<string,int>::iterator it = qop.begin(); it != qop.end(); ++it)
- {
- line = line+it->first+' ';
- }
- qop.clear();
- }
- }
- line = line + '\n';
- }
- out<<line;
- out<<endl;
- }
- //对每一行出现次数大于support的单词进行排序过程,a为词频数组,b为伴随数组
- int sort_data::partion(int *a,int *b,int begin, int end)
- {
- int index = a[end];
- int start = begin;
- for(int i = begin; i<end; ++i)
- {
- if(a[i]>= index)
- {
- swap(a[start],a[i]);
- swap(b[start],b[i]);
- ++start;
- }
- }
- swap(a[start],a[end]);
- swap(b[start],b[end]);
- return start;
- }
- void sort_data::quicksort(int *a,int *b,int begin,int end)
- {
- if(begin<end)
- {
- int i = partion(a,b,begin,end);
- quicksort(a,b,begin,i-1);
- quicksort(a,b,i+1,end);
- }
- }
- #include "trie.h"
- #include <iostream>
- #include "sort_data.h"
- using namespace std;
- int main()
- {
- trie tr("in.txt", ' ');
- tr.openfile();
- cout<<tr.search("balancing")<<endl;
- sort_data sort(&tr, 5);
- sort.openfile();
- sort.count_data();
- return 0;
- }