svm
.
## S3 method for class 'svm': predict(object, newdata, decision.values = FALSE, probability = FALSE, ..., na.action = na.omit)
"svm"
, created by
svm
.
probability
option enabled.
na.omit
, which leads to rejection of cases
with missing values on any required variable. An alternative
is
na.fail
, which causes an error if
NA
cases
are found. (NOTE: If given, this argument must be named.)
decision.value
is
TRUE
, the vector gets a
"decision.values"
attribute
containing a n x c matrix (n number of predicted values, c number of
classifiers) of all c binary classifiers' decision values. There are k
* (k - 1) / 2 classifiers (k number of classes). The colnames of
the matrix indicate the labels of the two classes. If
probability
is
TRUE
, the vector gets a
"probabilities"
attribute
containing a n x k matrix (n number of predicted values, k number of
classes) of the class probabilities.
If the training set was scaled by
svm
(done by default), the
new data is scaled accordingly using scale and center of
the training data.
David Meyer (based on C++-code by Chih-Chung Chang and Chih-Jen Lin)
mailto:David.Meyer@R-project.org
## classification mode # default with factor response: model <- svm(Species ~ ., data = iris.df) # alternatively the traditional interface: x <- iris.df[,c(1:4)] y <- iris.df[,5] model <- svm(x, y, probability = TRUE) print(model) summary(model) # test with train data pred <- predict(model, x) # (same as:) pred <- fitted(model) # compute decision values and probabilites pred <- predict(model, x, decision.values = TRUE, probability = TRUE) attr(pred, "decision.values")[1:4,] attr(pred, "probabilities")[1:4,] ## try regression mode on two dimensions # create data x <- seq(0.1, 5, by = 0.05) y <- log(x) + rnorm(x, sd = 0.2) # estimate model and predict input values m <- svm(x, y) new <- predict(m, x) # visualize plot (x, y) points (x, log(x), col = 2) points (x, new, col = 4) ## density-estimation # create 2-dim. normal with rho=0: X <- data.frame(a = rnorm(1000), b = rnorm(1000)) # traditional way: m <- svm(X, gamma = 0.1) # formula interface: m <- svm(~., data = X, gamma = 0.1) # or: m <- svm(~ X$a + X$b, gamma = 0.1) # test: newdata <- data.frame(a = c(0, 4), b = c(0, 4)) predict (m, newdata) # visualize: plot(X$a, X$b, type="n", xlab="a", ylab="b", xlim = c(-5,5), ylim=c(-5,5)) z <- 1:1000 %in% m$index + 1 for ( i in z ) { points( X$a[z==i] , X$b[z==i], col=i) } points(as.matrix(newdata), pch = "+", col = 2, cex = 8)