#!/usr/bin/python
# -*- coding: utf-8 -*-
'''
Created on 2015-1-21
@author: beyondzhou
@name: nltk_compute_collocation.py
'''
import json
import nltk
# Load in human language data
DATA = r'E:\eclipse\Google\dFile\107033731246200681024.json'
data = json.loads(open(DATA).read())
# Number of collocations to find
N = 25
all_tokens = [token for activity in data for token in activity['object']['content'].lower().split()]
finder = nltk.BigramCollocationFinder.from_words(all_tokens)
finder.apply_freq_filter(2)
finder.apply_word_filter(lambda w: w in nltk.corpus.stopwords.words('english'))
scorer = nltk.metrics.BigramAssocMeasures.jaccard
collocations = finder.nbest(scorer, N)
for collocation in collocations:
c = ' '.join(collocation)
print c
cabo pulmo
maker faire
bay mini
east bay
pulmo sunrise
never search
mini maker
money supply
desired outcomes
open data
, +
important piece
it's also
new services
new york