{cat("--------------- Hypothesis Testing ---------------\n");T}
{
# Function: binom.test
# Data: Hollander and Wolfe, Comment 3 p. 18
# Reference: Hollander, M. and D. Wolfe. 1973. Nonparametric Statistical
#      Methods.
# Description: Ho: p = 0.4, two-sided alternative; check test
#      statistic, p-value, and alternative
tol <- 1e-4
b.test <- binom.test(1,8,p=.4)
all(c(b.test$statistic == 1,
      abs(b.test$p.value - 0.1562) < tol,
      b.test$alternative == "two.sided"))
}
{
# Function: binom.test
# Data: Hollander and Wolfe, Comment 3 p. 18
# Reference: Hollander, M. and D. Wolfe. 1973. Nonparametric Statistical
#      Methods.
# Description: Ho: p = 0.4, one-sided alternative (greater than); check test 
#      statistic, p-value, and alternative
tol <- 1e-4
b.test <- binom.test(2,8,p=0.4,alternative="greater")
all(c(b.test$statistic == 2,
      abs(b.test$p.value - 0.8936) < tol,
      b.test$alternative == "greater"))
}
{
# Function: binom.test
# Data: Hollander and Wolfe, Comment 3 p. 18
# Reference: Hollander, M. and D. Wolfe. 1973. Nonparametric Statistical
#      Methods.
# Description: Ho: p = 0.4, one-sided alternative (less than); check test
#      statistic, p-value, and alternative
tol <- 1e-4
b.test <- binom.test(1,8,p=0.4,alternative="less")
all(c(b.test$statistic == 1,
      abs(b.test$p.value - 0.1064) < tol,
      b.test$alternative == "less"))
}
{
# Function: prop.test
# Data: Fienberg Table 2-1, p. 8
# Reference: Fienberg, S.E. 1977. The Analysis of Cross-classified 
#      Categorical Data
# Description: 2-sample test, Ho: P1 = P2, two-sided alternative, no continuity 
#      correction; check test statistic, df's, and p-value
tol <- 1e-2
x <- c(31, 17)
n <- c(140, 139)
p.test <- prop.test(x,n,correct=F)
all(c(abs(p.test$statistic - 4.81) < tol,
      p.test$parameters == 1,
      abs(p.test$p.value - 0.028) < tol))
}
{
# Function: chisq.test
# Data: Box, Hunter, Hunter Table 5.9, p. 149
# Reference: Box, G., W. G. Hunter, and J. S. Hunter. 1978. Statistics for 
#      Experimenters
# Description: 2x2 contingency table; no continuity correction; check test 
#      statistic, and df's; check p-value using a different tolerance
#      (a published p-value not given, results checked against Splus
#      calculation)
tol1 <- 1e-2
tol2 <- 1e-6
x <- matrix(c(111, 85, 162, 54),ncol=2)
x.test <- chisq.test(x,correct=F)
all(c(abs(x.test$statistic - 15.51) < tol1,
      x.test$parameters == 1,
      abs(x.test$p.value - 8.220857e-05) < tol2))
}
{
# Function: chisq.test
# Data: Box, Hunter, Hunter Table 5.9, p. 149
# Reference: Box, G., W. G. Hunter, and J. S. Hunter. 1978. Statistics for
#      Experimenters
# Description: 2x2 contingency table; with continuity correction; check test
#      statistic, and df's; check p-value using a different tolerance
#      (a published p-value not given, results checked against Splus
#      calculation)
tol1 <- 1e-2
tol2 <- 1e-6
x <- matrix(c(111, 85, 162, 54),ncol=2)
x.test <- chisq.test(x)
all(c(abs(x.test$statistic - 14.70) < tol1,
      x.test$parameters == 1,
      abs(x.test$p.value - 0.0001263084) < tol2))
}
{
# Function: chisq.test
# Data: Box, Hunter, Hunter Table 5.7, p. 146
# Reference: Box, G., W. G. Hunter, and J. S. Hunter. 1978. Statistics for 
#      Experimenters
# Description: 4x5 contingency table; no continuity correction; check test
#      statistic, and df's; check p-value using a different tolerance
#      (a published p-value not given, results checked against Splus
#      calculation)
tol1 <- 1e-1
tol2 <- 1e-6
x <- matrix(c(13, 18, 16, 5, 10, 16, 8, 36, 35, 21, 56, 51, 43, 29, 10),ncol=5)
x.test <- chisq.test(x,correct=F)
all(c(abs(x.test$statistic - 56.7) < tol1,
      x.test$parameters == 8,
      abs(x.test$p.value - 2.055893e-09) < tol2))
}
{
# Function: t.test
# Data: Zar, Example 7.1 p. 94
# Reference: Zar, J.H. Biostatistical Analysis, 3rd edition. 1996. 
#      Prentice-Hall Inc.
# Description: 1-sample, 2-tailed test for difference between a population mean 
#      and a hypothesized pop'n mean; check test statistic, df's, 
#      p-value, and 95% confidence limits
tol <- 1e-2
x <- c(25.8, 24.6, 26.1, 22.9, 25.1, 27.3, 24.0, 24.5, 23.9, 26.2, 24.3, 24.6,
       23.3, 25.5, 28.1, 24.8, 23.5, 26.3, 25.4, 25.5, 23.9, 27.0, 24.8, 22.9,
       25.4)
t.tst <- t.test(x, mu=24.3)
all(c(abs(t.tst$statistic - 2.704) < tol,
      t.tst$parameters == 24,
      abs(t.tst$p.value - 0.012) < tol,
      abs(t.tst$conf.int[1] - 24.47) < tol,
      abs(t.tst$conf.int[2] - 25.59) < tol))
}
{
# Function: t.test
# Data: Zar, Example 7.4 p. 100
# Reference: Zar, J.H. Biostatistical Analysis, 3rd edition. 1996. 
#      Prentice-Hall Inc.
# Description: 1-sample, 1-tailed test for Ho: mu<=45 vs. Ha: mu>45;
#      check test statistic, df's, and p-value
tol <- 1e-2
x <- c(42.7, 43.4, 44.6, 45.1, 45.6, 45.9, 46.8, 47.6)
t.tst <- t.test(x, alternative="greater",mu=45)
all(c(abs(t.tst$statistic - 0.36) < tol,
      t.tst$parameters == 7,
      abs(t.tst$p.value - 0.36) < tol))
}
{
# Function: t.test
# Data: Zar, Example 8.1 p. 124
# Reference: Zar, J.H. Biostatistical Analysis, 3rd edition. 1996. 
#      Prentice-Hall Inc.
# Description: 2-sample, 2-tailed test; check test statistic, df's, and 
#      p-value
tol <- 1e-2
x <- c(8.8, 8.4, 7.9, 8.7, 9.1, 9.6)
y <- c(9.9, 9.0, 11.1, 9.6, 8.7, 10.4, 9.5)
t.tst <- t.test(x,y,var.equal=T)
all(c(abs(t.tst$statistic + 2.475) < tol,
      t.tst$parameters == 11,
      abs(t.tst$p.value - 0.030) < tol))
}
{
# Function: t.test
# Data: Zar, Example 8.2 p. 127
# Reference: Zar, J.H. Biostatistical Analysis, 3rd edition. 1996.
#      Prentice-Hall Inc.
# Description: 2-sample, 1-tailed alternative (less); check test statistic,
#      df's, and p-value
tol <- 1e-2
x <- c(48.2, 54.6, 58.3, 47.8, 51.4, 52.0, 55.2, 49.1, 49.9, 52.6)
y <- c(52.3, 57.4, 55.6, 53.2, 61.3, 58.0, 59.8, 54.8)
t.tst <- t.test(x,y,alternative="less",var.equal=T)
all(c(abs(t.tst$statistic + 2.99) < tol,
      t.tst$parameters == 16,
      abs(t.tst$p.value - 0.0043) < tol))
}
{
# Function: t.test
# Data: Fisher and Van Belle, Table 5.2 p. 147
# Reference: Fisher, L. and G. Van Belle. 1993. Biostatistics: A Methodology
#      for the Health Sciences
# Description: paired t-test; check test statistic and df's; check p-value using
#      a different tolerance (a published p-value not given, results
#      checked against Splus calculation)
tol1 <- 1e-2
tol2 <- 1e-6
x <- c(1.71, 1.25, 2.13, 1.29, 1.58, 4.00, 1.42, 1.08, 1.83, 0.67, 1.13,
       2.71, 1.96)
y <- c(0.13, 0.88, 1.38, 0.13, 0.25, 2.63, 1.38, 0.50, 1.25, 0.75, 0.00,
       2.38, 1.13)
pt.tst <- t.test(x,y,paired=T)
all(c(abs(pt.tst$statistic - 5.278) < tol1,
      pt.tst$parameters == 12,
      abs(pt.tst$p.value - 0.000196) < tol2))
}
{
# Function: wilcox.test
# Data: Conover, Example 2 p. 284; test data truncated to the first 15 
#      X[i]'s; Splus computes D[i]=X[i]-30, therefore the reported 
#      test statistic V=75 instead of 45
# Description: 1-sample signed rank test; 1-sided alternative; check V, n,
#      and p-value 
x <- c(23.8, 26.0, 26.9, 27.4, 28.0, 30.3, 30.7, 31.2, 31.3, 32.8, 33.2,
       33.9, 34.3, 34.9, 35.0)
w.test <- wilcox.test(x,mu=30,alternative="greater")
all(c(w.test$statistic == 75,
      w.test$parameters == 15,
      abs(w.test$p.value - 0.2) > 0))
}
{
# Function: wilcox.test
# Data: Fisher and Van Belle, Table 8.2 p. 316
# Reference: Fisher, L. and G. Van Belle. 1993. Biostatistics: A Methodology
#      for the Health Sciences
# Description: 2-sample rank sum test using large sample approximation, no 
#      continuity correction, 2-sided alternative; check normal-z 
#      and p-value
tol <- 1e-2
x <- c(1014, 684, 810, 990, 840, 978, 1002, 1110)
y <- c(864, 636, 638, 708, 786, 600, 1320, 750, 594, 750)
w.test <- wilcox.test(x,y,exact=F,correct=F)
all(c(abs(w.test$statistic - 2.22) < tol,
      abs(w.test$p.value - 0.026) < tol))
}
{
# Function: wilcox.test
# Data: Sokal and Rohlf, Box 13.10 p. 448
# Reference: Sokal, R. and F. J. Rohlf. 1981. Biometry, 2nd edition.
#      W. H. Freeman and Company 
# Description: paired-sample signed rank test; testing mu=0 vs. 2-sided
#      alternative; check test statistic and df's; check p-value;
#      NOTE: p-value published as < 0.01; compared with Splus
#      calculated value
tol <- 1e-6
x <- c(2.86, 2.60, 2.43, 2.90, 2.94, 2.70, 2.68, 2.98, 2.85)
y <- c(2.36, 2.41, 2.39, 2.85, 2.82, 2.73, 2.58, 2.89, 2.78)
w.test <- wilcox.test(x,y,paired=T)
all(c(w.test$statistic == 44,
      w.test$parameters == 9,
      abs(w.test$p.value - 0.0078125) < tol))
}
{
# Function: cor.test
# Data: Fisher and Van Belle, Table 9.3, p. 350 
# Reference: Fisher, L. and G. Van Belle. 1993. Biostatistics: A Methodology
#      for the Health Sciences
# Description: Pearson's product moment correlation coefficient; t-test 
#      for significance of correlation, 2-sided alternative; check 
#      test statistic, and df's;  check correlation coefficient and
#      p-value (p-value not published, compared with Splus calculated
#      value)
tol1 <- 1e-3
tol2 <- 1e-6
x.atp <- c(4.18, 5.16, 4.85, 3.43, 4.53, 5.13, 4.10, 4.77, 4.12, 4.65, 6.03, 
           5.94, 5.99, 5.43, 5.00, 4.82, 5.25)
y.atp <- c(4.81, 4.98, 4.48, 4.19, 4.27, 4.87, 4.74, 4.53, 3.72, 4.62, 5.83,
           4.40, 4.87, 5.44, 4.70, 4.14, 5.30)
c.test <- cor.test(x.atp,y.atp)
all(c(abs(c.test$statistic - 2.885) < tol1,
      c.test$parameters == 15,
      abs(c.test$estimate - 0.597401) < tol2,
      abs(c.test$p.value - 0.011331) < tol2))
}
{
# Function: cor.test
# Data: Fisher and Van Belle, Table 9.10, p. 387 (same data as in 2 previous
#      tests)
# Description: Spearman's rank correlation; check rho
tol <- 1e-4
c.test <- cor.test(x.atp,y.atp,method="spearman")
abs(c.test$estimate - 0.6340) < tol
}
{
# Function: cor.test
# Data: Fisher and Van Belle, Table 9.10, p. 387 (same data as in 3 previous
#      tests)
# Description: Kendall's tau-statistic; check tau, normal-z test statistic, and 
#      p-value; NOTE: published results (p. 388) are incorrect---the 
#      numbers in the approx. std. normal test statistic are transposed;
#      the 2.67 should be 2.76; as a result an incorrect p-value is 
#      reported; 0.0076 should be 0.0058
tol <- 1e-3
c.test <- cor.test(x.atp,y.atp,method="kendall")
all(c(abs(c.test$estimate - 0.493) < tol,
      abs(c.test$statistic - 2.762) < tol,
      abs(c.test$p.value - 0.0057) < tol))
}
{
# Function: fisher.test
# Data: Zar, Example 23.16 and 23.19, p. 541 and 547
# Reference: Zar, J.H. Biostatistical Analysis, 3rd edition. 1996.
#      Prentice-Hall Inc.
# Description: fisher exact test for 2x2 contingency table, 2-sided alternative;
#      check p-value 
tol <- 1e-5
x <- matrix(c(12, 2, 7, 9),ncol=2)
f.test <- fisher.test(x)
abs(f.test$p.value - 0.02589) < tol
}
{
# Function: mantelhaen.test
# Data: Snedecor and Cochran, Table 19.14.1 p. 253
# Reference: Snedecor, G. and W. Cochran. 1967. Statistical Methods, 6th ed. 
#      The Iowa State University Press.
# Description: 2x2x3 contingency table; with continuity correction; check 
#      test statistic;  check p-value using a different tolerance
#      (a published p-value not given, results checked against Splus
#      calculation); NOTE: published test statistic is sqrt(M^2)---this
#      test comparison is based on the test statistic M^2 
tol1 <- 1e-2
tol2 <- 1e-6
x <- array(c(20, 82, 10, 54, 26, 41, 16, 30, 27, 22, 14, 23),dim=c(2,2,3),
     dimnames=list(c("Losses","No Losses"),c("Problems","Controls"),
     c("2","3-4","5+")))
mh.test <- mantelhaen.test(x)
all(c(abs(mh.test$statistic - 1.904) < tol1,
      abs(mh.test$p.value - .166642) < tol2))
}
{
# Function: mcnemar.test
# Data: Fisher and Van Belle, Example 6.19 p. 210
# Reference: Fisher, L. and G. Van Belle. 1993. Biostatistics: A Methodology
#      for the Health Sciences
# Description: 2x2 table; check test statistic, df's, and p-value 
tol <- 1e-2
x <- matrix(c(10, 13, 57, 95),ncol=2)
x.test <-  mcnemar.test(x,correct=F)
all(c(abs(x.test$statistic - 27.66) < tol,
      x.test$parameters == 1,
      x.test$p.value - 0.001 < 0))
}
{
# Function: kruskal.test
# Data: Fisher and Van Belle, Example 10.3 p. 431
# Reference: Fisher, L. and G. Van Belle. 1993. Biostatistics: A Methodology
#      for the Health Sciences
# Description: one-way layout with 3 groups; testing Ho: the 3 groups are 
#      equivalent vs. two-sided alternative; check test statistic,
#      df's, p-value, and alternative
tol <- 1e-2
x <- c(7.3, 7.4, 13.3, 10.6, 15.0, 20.7, 14.7, 23.0, 22.7, 26.6)
x.groups <- c(1, 1, 2, 2, 2, 2, 3, 3, 3, 3)
x.groups <- as.factor(x.groups)
x.gof <- kruskal.test(x, x.groups)
all(c(abs(x.gof$statistic - 6.33) < tol,
      x.gof$parameters == 2,
      abs(x.gof$p.value - .0423) < tol,
      x.gof$alternative == "two.sided"))
}
{
# Function: friedman.test
# Data: Hollander and Wolfe Table 1, p. 141 
# Reference: Hollander, M. and D. Wolfe. 1973. Nonparametric Statistical
#      Methods.
# Description: 2-way unreplicated layout; check test statistic, df's, p-value, 
#      method, and alternative
tol <- 1e-1
x <- c(5.40, 5.85, 5.20, 5.55, 5.90, 5.45, 5.40, 5.45, 5.25, 5.85, 5.25,
       5.65, 5.60, 5.05, 5.50, 5.45, 5.55, 5.45, 5.50, 5.65, 5.70, 6.30,
       5.50, 5.70, 5.60, 5.50, 5.85, 5.55, 5.40, 5.50, 5.15, 5.80, 5.20,
       5.55, 5.35, 5.00, 5.50, 5.55, 5.55, 5.50, 5.45, 5.60, 5.65, 6.30,
       5.55, 5.75, 5.50, 5.40, 5.70, 5.60, 5.35, 5.35, 5.00, 5.70, 5.10,
       5.45, 5.45, 4.95, 5.40, 5.50, 5.35, 5.55, 5.25, 5.40, 5.55, 6.25)
x.groups <- c(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
              2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
              3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3)
x.blocks <- c(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
              19, 20, 21, 22, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
              15, 16, 17, 18, 19, 20, 21, 22, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
              11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22)
x.groups <- as.factor(x.groups)
x.blocks <- as.factor(x.blocks)
fm.test <- friedman.test(x,x.groups,x.blocks)
all(c(abs(fm.test$statistic - 11.1) < tol,
      fm.test$parameters == 2,
      abs(fm.test$p.value - 0.004) < tol,
      fm.test$method == "Friedman rank sum test",
      fm.test$alternative == "two.sided"))
}
{
# Function: var.test
# Data: Zar, Example 8.8 p. 138
# Reference: Zar, J.H. Biostatistical Analysis, 3rd edition. 1996.
#      Prentice-Hall Inc.
# Description: 2-tailed variance ratio test, 2-tailed alternative; check test 
#      statistic, df's, and p-value
tol <- 1e-2
x <- c(41, 34, 33, 36, 40, 25, 31, 37, 34, 30, 38)
y <- c(52, 57, 62, 55, 64, 57, 56, 55) 
v.test <- var.test(x,y)
all(c(abs(v.test$statistic - 1.42) < tol,
      v.test$parameters == c(10,7),
      v.test$p.value - 0.5 > 0 ))
}
{
# Function: var.test
# Data: Zar, Example 8.9 p. 140
# Reference: Zar, J.H. Biostatistical Analysis, 3rd edition. 1996.
#      Prentice-Hall Inc.
# Description: 2-tailed variance ratio test, 1-tailed alternative (less); 
#      check test statistic, df's, and p-value
tol <- 1e-2
x <- c(10, 11, 12, 11, 10, 11, 11)
y <- c(9, 8, 11, 12, 10, 13, 11, 10, 12)
v.test <- var.test(x,y,alternative="less")
all(c(abs(v.test$statistic - 0.192) < tol,
      v.test$parameters == c(6,8),
      abs(v.test$p.value - 0.030) < tol))
}
{
# Function: ks.gof
# Data: Conover Example 1, p. 348
# Reference: Conover, W.J. 1980. Practical Nonparametric Statistics,
#      2nd edition.
# Description: 1-sample test; two-sided alternative; check test statistic 
#      and p-value
tol <- 1e-3
x <- c(.621, .503, .203, .477, .710, .581, .329, .480, .554, .382)
x.gof <- ks.gof(x,alternative="two.sided",dist="uniform",min=0,max=1)
all(c(abs(x.gof$statistic - 0.290) < tol,
      x.gof$p.value - 0.2 > 0))
}
{
# Function: ks.gof
# Data: Conover Example 1, p. 348
# Reference: Conover, W.J. 1980. Practical Nonparametric Statistics,
#      2nd edition.
# Description: 1-sample test; one-sided alternative (less than); check test 
#      statistic and p-value
tol <- 1e-3
x <- c(.621, .503, .203, .477, .710, .581, .329, .480, .554, .382)
x.gof <- ks.gof(x,alternative="less",dist="uniform",min=0,max=1)
all(c(abs(x.gof$statistic - 0.2289) < tol,
      x.gof$p.value - 0.1 > 0))
}
{
# Function: ks.gof
# Data: Conover Example 1, p. 348
# Reference: Conover, W.J. 1980. Practical Nonparametric Statistics,
#      2nd edition.
# Description: 1-sample test; one-sided alternative (greater than); check test 
#      statistic and p-value
tol <- 1e-3
x <- c(.621, .503, .203, .477, .710, .581, .329, .480, .554, .382)
x.gof <- ks.gof(x,alternative="greater",dist="uniform",min=0,max=1)
all(c(abs(x.gof$statistic - 0.290) < tol,
      x.gof$p.value - 0.1 > 0))
}
{
# Function: ks.gof
# Data: Hollander and Wolfe Example 1, p. 222
# Reference: Hollander, M. and D. Wolfe. 1973. Nonparametric Statistical
#      Methods.
# Description: 2-sample test; check test statistic and p-value 
tol <- 1e-2
x <- c(-.15, 8.60, 5.00, 3.71, 4.29, 7.74, 2.48, 3.25, -1.15, 8.38)
y <- c(2.55, 12.07, .46, .35, 2.69, -.94, 1.73, .73, -.35, -.37)
x.gof <- ks.gof(x,y)
all(c(abs(x.gof$statistic - 0.6) < tol,
      abs(x.gof$p.value - 0.055) < tol))
}
{
# Function: chisq.gof
# Data: Conover Example 1, p. 191 
# Reference: Conover, W.J. 1980. Practical Nonparametric Statistics,
#      2nd edition.
# Description: Continuous data; test Ho: data from a normally distributed
#      random variable with mean 30 and variance 100;
#      check test statistic, df's, p-value, observed counts, and 
#      expected counts
tol <- 1e-6
x <- c(16.7, 18.8, 24.0, 35.1, 39.8, 17.4, 19.3, 24.7, 35.8, 42.1, 
       18.1, 22.4, 25.9, 36.5, 43.2, 18.2, 22.5, 27.0, 37.6, 46.2)
x.gof <- chisq.gof(x,n.classes=4,dist="normal",mean=30,sd=10)
all(c(abs(x.gof$statistic - 2.8) < tol,
      x.gof$parameters == 3,
      x.gof$p.value - 0.25 > 0,
      x.gof$data.name == "x",
      all(x.gof$counts == c(8,4,3,5)),
      all(x.gof$expected - c(5,5,5,5) < tol)))
}
{
# Function: chisq.gof
# Data: Conover Example 3, p. 195 
# Reference: Conover, W.J. 1980. Practical Nonparametric Statistics,
#      2nd edition.
# Description: Discrete data; test Ho: numbers are observations on a normally 
#      distributed random variable; check test statistic, df's, 
#      p-value, observed counts, and expected counts
tol1 <- 3e-2
tol2 <- 8e-1
x <- c(23, 36, 54, 61, 73, 23, 37, 54, 61, 73, 24, 40, 56, 62, 74, 27,
       42, 57, 63, 75, 29, 43, 57, 64, 77, 31, 43, 58, 65, 81, 32, 44,
       58, 66, 87, 33, 45, 58, 68, 89, 33, 48, 58, 68, 93, 35, 48, 59, 70, 97)
old.op <- options(warn=-1)
x.gof <- chisq.gof(x, cut.points=c(-Inf,39.999999999,59.999999999,79.999999999,
     Inf), dist="normal", n.param.est=2,mean=55.2,sd=18.7)
options(old.op)
all(c(abs(x.gof$statistic - .395) < tol1,
      x.gof$parameters == 1,
      abs(x.gof$p.value - 0.5296828) < tol1,
      x.gof$data.name == "x",
      all(x.gof$counts == c(12,18,15,5)),
      all(abs(x.gof$expected - c(9.658002,19.367697,15.896430,5.077871)) <
                                                                         tol2)))
}
{
# Clean up after htests
rm(tol, b.test, x, y, n, p.test, x.test, x.groups, x.gof, x.blocks, fm.test)
rm(t.tst, pt.tst, w.test, x.atp, y.atp, c.test, f.test, mh.test, v.test)
rm(old.op, tol1, tol2)
T
}
