R语言实现机器学习

R语言:





rxFastTrees: Fast Tree
# Estimate a binary classification tree
 infert1 <- infert
 infert1$isCase = (infert1$case == 1)
 treeModel <- rxFastTrees(formula = isCase ~ age + parity + education + spontaneous + induced,
         data = infert1)

 # Create xdf file with per-instance results using rxPredict
 xdfOut <- tempfile(pattern = "scoreOut", fileext = ".xdf")
 scoreDS <- rxPredict(treeModel, data = infert1,
    extraVarsToWrite = c("isCase", "Score"), 
    outData = xdfOut)

 rxDataStep(scoreDS, numRows = 10)

 # Clean-up
 file.remove(xdfOut)

 ######################################################################
 # Estimate a regression fast tree

 # Use the built-in data set 'airquality' to create test and train data
 DF <- airquality[!is.na(airquality$Ozone), ]  
 DF$Ozone <- as.numeric(DF$Ozone)
 randomSplit <- rnorm(nrow(DF))
 trainAir <- DF[randomSplit >= 0,]
 testAir <- DF[randomSplit < 0,]
 airFormula <- Ozone ~ Solar.R + Wind + Temp

 # Regression Fast Tree for train data
 fastTreeReg <- rxFastTrees(airFormula, type = "regression", 
     data = trainAir)  

 # Put score and model variables in data frame
 fastTreeScoreDF <- rxPredict(fastTreeReg, data = testAir, 
     writeModelVars = TRUE)

 # Plot actual versus predicted values with smoothed line
 rxLinePlot(Score ~ Ozone, type = c("p", "smooth"), data = fastTreeScoreDF)
rxFastForest: Fast Forest
# Estimate a binary classification forest
 infert1 <- infert
 infert1$isCase = (infert1$case == 1)
 forestModel <- rxFastForest(formula = isCase ~ age + parity + education + spontaneous + induced,
         data = infert1)

 # Create text file with per-instance results using rxPredict
 txtOutFile <- tempfile(pattern = "scoreOut", fileext = ".txt")
 txtOutDS <- RxTextData(file = txtOutFile)
 scoreDS <- rxPredict(forestModel, data = infert1,
    extraVarsToWrite = c("isCase", "Score"), outData = txtOutDS)

 # Print the fist ten rows   
 rxDataStep(scoreDS, numRows = 10)

 # Clean-up
 file.remove(txtOutFile)

 ######################################################################
 # Estimate a regression fast forest

 # Use the built-in data set 'airquality' to create test and train data
 DF <- airquality[!is.na(airquality$Ozone), ]  
 DF$Ozone <- as.numeric(DF$Ozone)
 randomSplit <- rnorm(nrow(DF))
 trainAir <- DF[randomSplit >= 0,]
 testAir <- DF[randomSplit < 0,]
 airFormula <- Ozone ~ Solar.R + Wind + Temp

 # Regression Fast Forest for train data
 rxFastForestReg <- rxFastForest(airFormula, type = "regression", 
     data = trainAir)  

 # Put score and model variables in data frame
 rxFastForestScoreDF <- rxPredict(rxFastForestReg, data = testAir, 
     writeModelVars = TRUE)

 # Plot actual versus predicted values with smoothed line
 rxLinePlot(Score ~ Ozone, type = c("p", "smooth"), data = rxFastForestScoreDF)
rxOneClassSvm: OneClass SVMn
# Estimate a One-Class SVM model
 trainRows <- c(1:30, 51:80, 101:130)
 testRows = !(1:150 %in% trainRows)
 trainIris <- iris[trainRows,]
 testIris <- iris[testRows,]

 svmModel <- rxOneClassSvm(
     formula = ~Sepal.Length + Sepal.Width + Petal.Length + Petal.Width,
     data = trainIris)

 # Add additional non-iris data to the test data set
 testIris$isIris <- 1
 notIris <- data.frame(
     Sepal.Length = c(2.5, 2.6),
     Sepal.Width = c(.75, .9),
     Petal.Length = c(2.5, 2.5),
     Petal.Width = c(.8, .7),
     Species = c("not iris", "not iris"),
     isIris = 0)
 testIris <- rbind(testIris, notIris)  

 scoreDF <- rxPredict(svmModel, 
      data = testIris, extraVarsToWrite = "isIris")

 # Look at the last few observations
 tail(scoreDF)
 # Look at average scores conditioned by 'isIris'
 rxCube(Score ~ F(isIris), data = scoreDF)
rxNeuralNet: Neural Net
# Estimate a binary neural net
 rxNeuralNet1 <- rxNeuralNet(isCase ~ age + parity + education + spontaneous + induced,
                   transforms = list(isCase = case == 1),
                   data = infert)

 # Score to a data frame
 scoreDF <- rxPredict(rxNeuralNet1, data = infert, 
     extraVarsToWrite = "isCase",
     outData = NULL) # return a data frame

 # Compute and plot the Radio Operator Curve and AUC
 roc1 <- rxRoc(actualVarName = "isCase", predVarNames = "Probability", data = scoreDF) 
 plot(roc1)
 rxAuc(roc1)

 #########################################################################
 # Regression neural net

 # Create an xdf file with the attitude data
 myXdf <- tempfile(pattern = "tempAttitude", fileext = ".xdf")
 rxDataStep(attitude, myXdf, rowsPerRead = 50, overwrite = TRUE)
 myXdfDS <- RxXdfData(file = myXdf)

 attitudeForm <- rating ~ complaints + privileges + learning + 
     raises + critical + advance

 # Estimate a regression neural net 
 res2 <- rxNeuralNet(formula = attitudeForm,  data = myXdfDS, 
     type = "regression")

 # Score to data frame
 scoreOut2 <- rxPredict(res2, data = myXdfDS, 
     extraVarsToWrite = "rating")

 # Plot the rating versus the score with a regression line
 rxLinePlot(rating~Score, type = c("p","r"), data = scoreOut2)

 # Clean up   
 file.remove(myXdf)    

 #############################################################################
 # Multi-class neural net
 multiNN <- rxNeuralNet(
     formula = Species~Sepal.Length + Sepal.Width + Petal.Length + Petal.Width,
     type = "multiClass", data = iris)
 scoreMultiDF <- rxPredict(multiNN, data = iris, 
     extraVarsToWrite = "Species", outData = NULL)    
 # Print the first rows of the data frame with scores
 head(scoreMultiDF)
 # Compute % of incorrect predictions
 badPrediction = scoreMultiDF$Species != scoreMultiDF$PredictedLabel
 sum(badPrediction)*100/nrow(scoreMultiDF)
 # Look at the observations with incorrect predictions
 scoreMultiDF[badPrediction,]
rxFastLinear: Fast Linear Model -- Stochastic Dual Coordinate Ascent
# Train a binary classiication model with rxFastLinear
 res1 <- rxFastLinear(isCase ~ age + parity + education + spontaneous + induced,
                   transforms = list(isCase = case == 1),
                   data = infert,
                   type = "binary")
 # Print a summary of the model
 summary(res1)

 # Score to a data frame
 scoreDF <- rxPredict(res1, data = infert, 
     extraVarsToWrite = "isCase")

 # Compute and plot the Radio Operator Curve and AUC
 roc1 <- rxRoc(actualVarName = "isCase", predVarNames = "Probability", data = scoreDF) 
 plot(roc1)
 rxAuc(roc1)

 #########################################################################
 # rxFastLinear Regression

 # Create an xdf file with the attitude data
 myXdf <- tempfile(pattern = "tempAttitude", fileext = ".xdf")
 rxDataStep(attitude, myXdf, rowsPerRead = 50, overwrite = TRUE)
 myXdfDS <- RxXdfData(file = myXdf)

 attitudeForm <- rating ~ complaints + privileges + learning + 
     raises + critical + advance

 # Estimate a regression model with rxFastLinear 
 res2 <- rxFastLinear(formula = attitudeForm,  data = myXdfDS, 
     type = "regression")

 # Score to data frame
 scoreOut2 <- rxPredict(res2, data = myXdfDS, 
     extraVarsToWrite = "rating")

 # Plot the rating versus the score with a regression line
 rxLinePlot(rating~Score, type = c("p","r"), data = scoreOut2)

 # Clean up   
 file.remove(myXdf)
rxEnsemble: Ensembles
# Create an ensemble of regression rxFastTrees models

 # use xdf data source
 dataFile <- file.path(rxGetOption("sampleDataDir"), "claims4blocks.xdf")
 rxGetInfo(dataFile, getVarInfo = TRUE, getBlockSizes = TRUE)
 form <- cost ~ age + type + number

 rxSetComputeContext("localpar")
 rxGetComputeContext()

 # build an ensemble model that contains three 'rxFastTrees' models with different parameters
 ensemble <- rxEnsemble(
     formula = form,
     data = dataFile,
     type = "regression",
     trainers = list(fastTrees(), fastTrees(numTrees = 60), fastTrees(learningRate = 0.1)), #a list of trainers with their arguments.
     replace = TRUE # Indicates using a bootstrap sample for each trainer
     )

 # use text data source
 colInfo <- list(DayOfWeek = list(type = "factor", levels = c("Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday", "Sunday")))

 source <- system.file("SampleData/AirlineDemoSmall.csv", package = "RevoScaleR")
 data <- RxTextData(source, missingValueString = "M", colInfo = colInfo)

 # When 'distributed' is TRUE distributed data source is created
 distributed <- FALSE
 if (distributed) {
     bigDataDirRoot <- "/share"
     inputDir <- file.path(bigDataDirRoot, "AirlineDemoSmall")
     rxHadoopMakeDir(inputDir)
     rxHadoopCopyFromLocal(source, inputDir)
     hdfsFS <- RxHdfsFileSystem()
     data <- RxTextData(file = inputDir, missingValueString = "M", colInfo = colInfo, fileSystem = hdfsFS)
 }

 # When 'distributed' is TRUE training is distributed
 if (distributed) {
     cc <- rxSetComputeContext(RxSpark())
 } else {
     cc <- rxGetComputeContext()
 }

 ensemble <- rxEnsemble(
     formula = ArrDelay ~ DayOfWeek,
     data = data,
     type = "regression",
     trainers = list(fastTrees(), fastTrees(numTrees = 60), fastTrees(learningRate = 0.1)), # The ensemble will contain three 'rxFastTrees' models
     replace = TRUE # Indicates using a bootstrap sample for each trainer
     )

 # Change the compute context back to previous for scoring
 rxSetComputeContext(cc)

 # Put score and model variables in data frame
 scores <- rxPredict(ensemble, data = data, writeModelVars = TRUE)

 # Plot actual versus predicted values with smoothed line
 rxLinePlot(Score ~ ArrDelay, type = c("p", "smooth"), data = scores)




concat: Machine Learning Concat Transform
testObs <- rnorm(nrow(iris)) > 0
 testIris <- iris[testObs,]
 trainIris <- iris[!testObs,]

 multiLogitOut <- rxLogisticRegression(
         formula = Species~Features, type = "multiClass", data = trainIris,
         mlTransforms = list(concat(vars = list(
             Features = c("Sepal.Length", "Sepal.Width", "Petal.Length", "Petal.Width")
           ))))
 summary(multiLogitOut)
categorical: Machine Learning Categorical Data Transform
trainReviews <- data.frame(review = c( 
         "This is great",
         "I hate it",
         "Love it",
         "Do not like it",
         "Really like it",
         "I hate it",
         "I like it a lot",
         "I kind of hate it",
         "I do like it",
         "I really hate it",
         "It is very good",
         "I hate it a bunch",
         "I love it a bunch",
         "I hate it",
         "I like it very much",
         "I hate it very much.",
         "I really do love it",
         "I really do hate it",
         "Love it!",
         "Hate it!",
         "I love it",
         "I hate it",
         "I love it",
         "I hate it",
         "I love it"),
      like = c(TRUE, FALSE, TRUE, FALSE, TRUE,
         FALSE, TRUE, FALSE, TRUE, FALSE, TRUE, FALSE, TRUE,
         FALSE, TRUE, FALSE, TRUE, FALSE, TRUE, FALSE, TRUE, 
         FALSE, TRUE, FALSE, TRUE), stringsAsFactors = FALSE
     )

     testReviews <- data.frame(review = c(
         "This is great",
         "I hate it",
         "Love it",
         "Really like it",
         "I hate it",
         "I like it a lot",
         "I love it",
         "I do like it",
         "I really hate it",
         "I love it"), stringsAsFactors = FALSE)


 # Use a categorical transform: the entire string is treated as a category
 outModel1 <- rxLogisticRegression(like~reviewCat, data = trainReviews, 
     mlTransforms = list(categorical(vars = c(reviewCat = "review"))))
 # Note that 'I hate it' and 'I love it' (the only strings appearing more than once)
 # have non-zero weights
 summary(outModel1)

 # Use the model to score
 scoreOutDF1 <- rxPredict(outModel1, data = testReviews, 
     extraVarsToWrite = "review")
 scoreOutDF1
categoricalHash: Machine Learning Categorical HashData Transform
trainReviews <- data.frame(review = c( 
         "This is great",
         "I hate it",
         "Love it",
         "Do not like it",
         "Really like it",
         "I hate it",
         "I like it a lot",
         "I kind of hate it",
         "I do like it",
         "I really hate it",
         "It is very good",
         "I hate it a bunch",
         "I love it a bunch",
         "I hate it",
         "I like it very much",
         "I hate it very much.",
         "I really do love it",
         "I really do hate it",
         "Love it!",
         "Hate it!",
         "I love it",
         "I hate it",
         "I love it",
         "I hate it",
         "I love it"),
      like = c(TRUE, FALSE, TRUE, FALSE, TRUE,
         FALSE, TRUE, FALSE, TRUE, FALSE, TRUE, FALSE, TRUE,
         FALSE, TRUE, FALSE, TRUE, FALSE, TRUE, FALSE, TRUE, 
         FALSE, TRUE, FALSE, TRUE), stringsAsFactors = FALSE
     )

     testReviews <- data.frame(review = c(
         "This is great",
         "I hate it",
         "Love it",
         "Really like it",
         "I hate it",
         "I like it a lot",
         "I love it",
         "I do like it",
         "I really hate it",
         "I love it"), stringsAsFactors = FALSE)


 # Use a categorical hash transform
 outModel2 <- rxLogisticRegression(like~reviewCatHash, data = trainReviews, 
     mlTransforms = list(categoricalHash(vars = c(reviewCatHash = "review"))))
 # Weights are similar to categorical
 summary(outModel2)

 # Use the model to score
 scoreOutDF2 <- rxPredict(outModel2, data = testReviews, 
     extraVarsToWrite = "review")
 scoreOutDF2
stopwordsDefault: Machine Learning Text Transform
trainReviews <- data.frame(review = c( 
         "This is great",
         "I hate it",
         "Love it",
         "Do not like it",
         "Really like it",
         "I hate it",
         "I like it a lot",
         "I kind of hate it",
         "I do like it",
         "I really hate it",
         "It is very good",
         "I hate it a bunch",
         "I love it a bunch",
         "I hate it",
         "I like it very much",
         "I hate it very much.",
         "I really do love it",
         "I really do hate it",
         "Love it!",
         "Hate it!",
         "I love it",
         "I hate it",
         "I love it",
         "I hate it",
         "I love it"),
      like = c(TRUE, FALSE, TRUE, FALSE, TRUE,
         FALSE, TRUE, FALSE, TRUE, FALSE, TRUE, FALSE, TRUE,
         FALSE, TRUE, FALSE, TRUE, FALSE, TRUE, FALSE, TRUE, 
         FALSE, TRUE, FALSE, TRUE), stringsAsFactors = FALSE
     )

     testReviews <- data.frame(review = c(
         "This is great",
         "I hate it",
         "Love it",
         "Really like it",
         "I hate it",
         "I like it a lot",
         "I love it",
         "I do like it",
         "I really hate it",
         "I love it"), stringsAsFactors = FALSE)


 outModel <- rxLogisticRegression(like ~ reviewTran, data = trainReviews,
     mlTransforms = list(featurizeText(vars = c(reviewTran = "review"),
     stopwordsRemover = stopwordsDefault(), keepPunctuations = FALSE)))
 # 'hate' and 'love' have non-zero weights
 summary(outModel)

 # Use the model to score
 scoreOutDF5 <- rxPredict(outModel, data = testReviews, 
     extraVarsToWrite = "review")
 scoreOutDF5
ngram: Machine Learning Feature Extractors
myData <- data.frame(opinion = c(
     "I love it!",
     "I love it!",
     "Love it!",
     "I love it a lot!",
     "Really love it!",
     "I hate it",
     "I hate it",
     "I hate it.",
     "Hate it",
     "Hate"),
     like = rep(c(TRUE, FALSE), each = 5),
     stringsAsFactors = FALSE)

 outModel1 <- rxLogisticRegression(like~opinionCount, data = myData, 
     mlTransforms = list(featurizeText(vars = c(opinionCount = "opinion"), 
         wordFeatureExtractor = ngramHash(invertHash = -1, hashBits = 3)))) 
 summary(outModel1)   

 outModel2 <- rxLogisticRegression(like~opinionCount, data = myData, 
     mlTransforms = list(featurizeText(vars = c(opinionCount = "opinion"), 
         wordFeatureExtractor = ngramCount(maxNumTerms = 5, weighting = "tf"))))         
 summary(outModel2)
selectFeatures: Machine Learning Feature Selection Transform
trainReviews <- data.frame(review = c( 
         "This is great",
         "I hate it",
         "Love it",
         "Do not like it",
         "Really like it",
         "I hate it",
         "I like it a lot",
         "I kind of hate it",
         "I do like it",
         "I really hate it",
         "It is very good",
         "I hate it a bunch",
         "I love it a bunch",
         "I hate it",
         "I like it very much",
         "I hate it very much.",
         "I really do love it",
         "I really do hate it",
         "Love it!",
         "Hate it!",
         "I love it",
         "I hate it",
         "I love it",
         "I hate it",
         "I love it"),
      like = c(TRUE, FALSE, TRUE, FALSE, TRUE,
         FALSE, TRUE, FALSE, TRUE, FALSE, TRUE, FALSE, TRUE,
         FALSE, TRUE, FALSE, TRUE, FALSE, TRUE, FALSE, TRUE, 
         FALSE, TRUE, FALSE, TRUE), stringsAsFactors = FALSE
     )

     testReviews <- data.frame(review = c(
         "This is great",
         "I hate it",
         "Love it",
         "Really like it",
         "I hate it",
         "I like it a lot",
         "I love it",
         "I do like it",
         "I really hate it",
         "I love it"), stringsAsFactors = FALSE)

 # Use a categorical hash transform which generated 128 features.
 outModel1 <- rxLogisticRegression(like~reviewCatHash, data = trainReviews, l1Weight = 0, 
     mlTransforms = list(categoricalHash(vars = c(reviewCatHash = "review"), hashBits = 7)))
 summary(outModel1)

 # Apply a categorical hash transform and a count feature selection transform
 # which selects only those hash slots that has value.
 outModel2 <- rxLogisticRegression(like~reviewCatHash, data = trainReviews, l1Weight = 0, 
     mlTransforms = list(
   categoricalHash(vars = c(reviewCatHash = "review"), hashBits = 7), 
   selectFeatures("reviewCatHash", mode = minCount())))
 summary(outModel2)

 # Apply a categorical hash transform and a mutual information feature selection transform
 # which selects only 10 features with largest mutual information with the label.
 outModel3 <- rxLogisticRegression(like~reviewCatHash, data = trainReviews, l1Weight = 0, 
     mlTransforms = list(
   categoricalHash(vars = c(reviewCatHash = "review"), hashBits = 7), 
   selectFeatures(like ~ reviewCatHash, mode = mutualInformation(numFeaturesToKeep = 10))))
 summary(outModel3)




rxPredict.mlModel: Score using a Microsoft R Machine Learning model
# Estimate a logistic regression model
 infert1 <- infert
 infert1$isCase <- (infert1$case == 1)
 myModelInfo <- rxLogisticRegression(formula = isCase ~ age + parity + education + spontaneous + induced,
                        data = infert1)

 # Create an xdf file with per-instance results using rxPredict
 xdfOut <- tempfile(pattern = "scoreOut", fileext = ".xdf")
 scoreDS <- rxPredict(myModelInfo, data = infert1,
     outData = xdfOut, overwrite = TRUE,
     extraVarsToWrite = c("isCase", "Probability"))

 # Summarize results with an ROC curve
 rxRocCurve(actualVarName = "isCase", predVarNames = "Probability", data = scoreDS)

 # Use the built-in data set 'airquality' to create test and train data
 DF <- airquality[!is.na(airquality$Ozone), ]  
 DF$Ozone <- as.numeric(DF$Ozone)
 set.seed(12)
 randomSplit <- rnorm(nrow(DF))
 trainAir <- DF[randomSplit >= 0,]
 testAir <- DF[randomSplit < 0,]
 airFormula <- Ozone ~ Solar.R + Wind + Temp

 # Regression Fast Tree for train data
 fastTreeReg <- rxFastTrees(airFormula, type = "regression", 
     data = trainAir)  

 # Put score and model variables in data frame, including the model variables
 # Add the suffix "Pred" to the new variable
 fastTreeScoreDF <- rxPredict(fastTreeReg, data = testAir, 
     writeModelVars = TRUE, suffix = "Pred")

 rxGetVarInfo(fastTreeScoreDF)

 # Clean-up
 file.remove(xdfOut)
rxFeaturize: Data Transformation for RevoScaleR data sources
# rxFeaturize basically allows you to access data from the MicrosoftML transforms
 # In this example we'll look at getting the output of the categorical transform

 # Create the data
 categoricalData <- data.frame(
   placesVisited = c(
     "London",
     "Brunei",
     "London",
     "Paris",
     "Seria"
   ),
   stringsAsFactors = FALSE
 )

 # Invoke the categorical transform
 categorized <- rxFeaturize(
   data = categoricalData,
   mlTransforms = list(categorical(vars = c(xDataCat = "placesVisited")))
 )

 # Now let's look at the data
 categorized
summary.mlModel: Summary of a Microsoft R Machine Learning model
# Estimate a logistic regression model
 logitModel <- rxLogisticRegression(isCase ~ age + parity + education + spontaneous + induced,
                   transforms = list(isCase = case == 1),
                   data = infert)
 # Print a summary of the model
 summary(logitModel)

 # Score to a data frame
 scoreDF <- rxPredict(logitModel, data = infert, 
     extraVarsToWrite = "isCase")

 # Compute and plot the Radio Operator Curve and AUC
 roc1 <- rxRoc(actualVarName = "isCase", predVarNames = "Probability", data = scoreDF) 
 plot(roc1)
 rxAuc(roc1)

 #######################################################################################
 # Multi-class logistic regression  
 testObs <- rnorm(nrow(iris)) > 0
 testIris <- iris[testObs,]
 trainIris <- iris[!testObs,]
 multiLogit <- rxLogisticRegression(
     formula = Species~Sepal.Length + Sepal.Width + Petal.Length + Petal.Width,
     type = "multiClass", data = trainIris)

 # Score the model
 scoreMultiDF <- rxPredict(multiLogit, data = testIris, 
     extraVarsToWrite = "Species")    
 # Print the first rows of the data frame with scores
 head(scoreMultiDF)
 # Look at confusion matrix
 table(scoreMultiDF$Species, scoreMultiDF$PredictedLabel)

 # Look at the observations with incorrect predictions
 badPrediction = scoreMultiDF$Species != scoreMultiDF$PredictedLabel
 scoreMultiDF[badPrediction,]




loss functions: Classification and Regression Loss functions
train <- function(lossFunction) {

     result <- rxFastLinear(isCase ~ age + parity + education + spontaneous + induced,
                   transforms = list(isCase = case == 1), lossFunction = lossFunction,
                   data = infert,
                   type = "binary")
     coef(result)[["age"]]
 }

 age <- list()
 age$LogLoss <- train(logLoss())
 age$LogLossHinge <- train(hingeLoss())
 age$LogLossSmoothHinge <- train(smoothHingeLoss())
 age




minCount: Feature Selection Count Mode
trainReviews <- data.frame(review = c( 
         "This is great",
         "I hate it",
         "Love it",
         "Do not like it",
         "Really like it",
         "I hate it",
         "I like it a lot",
         "I kind of hate it",
         "I do like it",
         "I really hate it",
         "It is very good",
         "I hate it a bunch",
         "I love it a bunch",
         "I hate it",
         "I like it very much",
         "I hate it very much.",
         "I really do love it",
         "I really do hate it",
         "Love it!",
         "Hate it!",
         "I love it",
         "I hate it",
         "I love it",
         "I hate it",
         "I love it"),
      like = c(TRUE, FALSE, TRUE, FALSE, TRUE,
         FALSE, TRUE, FALSE, TRUE, FALSE, TRUE, FALSE, TRUE,
         FALSE, TRUE, FALSE, TRUE, FALSE, TRUE, FALSE, TRUE, 
         FALSE, TRUE, FALSE, TRUE), stringsAsFactors = FALSE
     )

     testReviews <- data.frame(review = c(
         "This is great",
         "I hate it",
         "Love it",
         "Really like it",
         "I hate it",
         "I like it a lot",
         "I love it",
         "I do like it",
         "I really hate it",
         "I love it"), stringsAsFactors = FALSE)

 # Use a categorical hash transform which generated 128 features.
 outModel1 <- rxLogisticRegression(like~reviewCatHash, data = trainReviews, l1Weight = 0, 
     mlTransforms = list(categoricalHash(vars = c(reviewCatHash = "review"), hashBits = 7)))
 summary(outModel1)

 # Apply a categorical hash transform and a count feature selection transform
 # which selects only those hash features that has value.
 outModel2 <- rxLogisticRegression(like~reviewCatHash, data = trainReviews, l1Weight = 0, 
     mlTransforms = list(
   categoricalHash(vars = c(reviewCatHash = "review"), hashBits = 7), 
   selectFeatures("reviewCatHash", mode = minCount())))
 summary(outModel2)

 # Apply a categorical hash transform and a mutual information feature selection transform
 # which selects those features appearing with at least a count of 5.
 outModel3 <- rxLogisticRegression(like~reviewCatHash, data = trainReviews, l1Weight = 0, 
     mlTransforms = list(
   categoricalHash(vars = c(reviewCatHash = "review"), hashBits = 7), 
   selectFeatures("reviewCatHash", mode = minCount(count = 5))))
 summary(outModel3)
mutualInformation: Feature Selection Mutual Information Mode
trainReviews <- data.frame(review = c( 
         "This is great",
         "I hate it",
         "Love it",
         "Do not like it",
         "Really like it",
         "I hate it",
         "I like it a lot",
         "I kind of hate it",
         "I do like it",
         "I really hate it",
         "It is very good",
         "I hate it a bunch",
         "I love it a bunch",
         "I hate it",
         "I like it very much",
         "I hate it very much.",
         "I really do love it",
         "I really do hate it",
         "Love it!",
         "Hate it!",
         "I love it",
         "I hate it",
         "I love it",
         "I hate it",
         "I love it"),
      like = c(TRUE, FALSE, TRUE, FALSE, TRUE,
         FALSE, TRUE, FALSE, TRUE, FALSE, TRUE, FALSE, TRUE,
         FALSE, TRUE, FALSE, TRUE, FALSE, TRUE, FALSE, TRUE, 
         FALSE, TRUE, FALSE, TRUE), stringsAsFactors = FALSE
     )

     testReviews <- data.frame(review = c(
         "This is great",
         "I hate it",
         "Love it",
         "Really like it",
         "I hate it",
         "I like it a lot",
         "I love it",
         "I do like it",
         "I really hate it",
         "I love it"), stringsAsFactors = FALSE)

 # Use a categorical hash transform which generated 128 features.
 outModel1 <- rxLogisticRegression(like~reviewCatHash, data = trainReviews, l1Weight = 0, 
     mlTransforms = list(categoricalHash(vars = c(reviewCatHash = "review"), hashBits = 7)))
 summary(outModel1)

 # Apply a categorical hash transform and a count feature selection transform
 # which selects only those hash features that has value.
 outModel2 <- rxLogisticRegression(like~reviewCatHash, data = trainReviews, l1Weight = 0, 
     mlTransforms = list(
   categoricalHash(vars = c(reviewCatHash = "review"), hashBits = 7), 
   selectFeatures("reviewCatHash", mode = minCount())))
 summary(outModel2)

 # Apply a categorical hash transform and a mutual information feature selection transform
 # which selects those features appearing with at least a count of 5.
 outModel3 <- rxLogisticRegression(like~reviewCatHash, data = trainReviews, l1Weight = 0, 
     mlTransforms = list(
   categoricalHash(vars = c(reviewCatHash = "review"), hashBits = 7), 
   selectFeatures("reviewCatHash", mode = minCount(count = 5))))
 summary(outModel3)




rxFastForest: Fast Forest
# Estimate a binary classification forest
 infert1 <- infert
 infert1$isCase = (infert1$case == 1)
 forestModel <- rxFastForest(formula = isCase ~ age + parity + education + spontaneous + induced,
         data = infert1)

 # Create text file with per-instance results using rxPredict
 txtOutFile <- tempfile(pattern = "scoreOut", fileext = ".txt")
 txtOutDS <- RxTextData(file = txtOutFile)
 scoreDS <- rxPredict(forestModel, data = infert1,
    extraVarsToWrite = c("isCase", "Score"), outData = txtOutDS)

 # Print the fist ten rows   
 rxDataStep(scoreDS, numRows = 10)

 # Clean-up
 file.remove(txtOutFile)

 ######################################################################
 # Estimate a regression fast forest

 # Use the built-in data set 'airquality' to create test and train data
 DF <- airquality[!is.na(airquality$Ozone), ]  
 DF$Ozone <- as.numeric(DF$Ozone)
 randomSplit <- rnorm(nrow(DF))
 trainAir <- DF[randomSplit >= 0,]
 testAir <- DF[randomSplit < 0,]
 airFormula <- Ozone ~ Solar.R + Wind + Temp

 # Regression Fast Forest for train data
 rxFastForestReg <- rxFastForest(airFormula, type = "regression", 
     data = trainAir)  

 # Put score and model variables in data frame
 rxFastForestScoreDF <- rxPredict(rxFastForestReg, data = testAir, 
     writeModelVars = TRUE)

 # Plot actual versus predicted values with smoothed line
 rxLinePlot(Score ~ Ozone, type = c("p", "smooth"), data = rxFastForestScoreDF)




maOptimizer: Optimization Algorithms
myIris = iris
 myIris$Setosa <- iris$Species == "setosa"

 res1 <- rxNeuralNet(formula = Setosa~Sepal.Length + Sepal.Width + Petal.Width,
         data = myIris, 
         optimizer = sgd(learningRate = .002))

 res2 <- rxNeuralNet(formula = Setosa~Sepal.Length + Sepal.Width + Petal.Width,
         data = myIris, 
         optimizer = adaDeltaSgd(decay = .9, conditioningConst = 1e-05))
  • 0
    点赞
  • 10
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值