#讀檔案,這是 CSV 檔(用逗號分隔的檔),可以用 notepad 或 EXCEL 開啟
dta <- read.csv("mathmod.csv", header = TRUE)
#載入 PROCESS,特別記得要讓 process.r 可讀取(在同目錄,或特定目錄)
source('process.r')
##
## ********************* PROCESS for R Version 4.3.1 *********************
##
## Written by Andrew F. Hayes, Ph.D. www.afhayes.com
## Documentation available in Hayes (2022). www.guilford.com/p/hayes3
##
## ***********************************************************************
##
## PROCESS is now ready for use.
## Copyright 2020-2023 by Andrew F. Hayes ALL RIGHTS RESERVED
## Workshop schedule at http://haskayne.ucalgary.ca/CCRAM
##
#套件的變項要用字串符號括入
process (data = dta, y = 'math', x = 'training', w ='gender', model = 1,wcatcode=1,
center = 2,jn = 1,modelbt= 1, seed = 20231029)
##
## ********************* PROCESS for R Version 4.3.1 *********************
##
## Written by Andrew F. Hayes, Ph.D. www.afhayes.com
## Documentation available in Hayes (2022). www.guilford.com/p/hayes3
##
## ***********************************************************************
##
## Model : 1
## Y : math
## X : training
## W : gender
##
## Sample size: 101
##
## Custom seed: 20231029
##
##
## ***********************************************************************
## Outcome Variable: math
##
## Model Summary:
## R R-sq MSE F df1 df2 p
## 0.6164 0.3799 0.9086 19.8111 3.0000 97.0000 0.0000
##
## Model:
## coeff se t p LLCI ULCI
## constant 3.2928 0.1579 20.8486 0.0000 2.9794 3.6063
## training -0.3394 0.0539 -6.3009 0.0000 -0.4463 -0.2325
## gender -0.2355 0.2000 -1.1776 0.2418 -0.6325 0.1614
## Int_1 0.5043 0.0685 7.3666 0.0000 0.3684 0.6401
##
## Product terms key:
## Int_1 : training x gender
##
## Test(s) of highest order unconditional interaction(s):
## R2-chng F df1 df2 p
## X*W 0.3469 54.2674 1.0000 97.0000 0.0000
## ----------
## Focal predictor: training (X)
## Moderator: gender (W)
##
## Conditional effects of the focal predictor at values of the moderator(s):
## gender effect se t p LLCI ULCI
## 0.0000 -0.3394 0.0539 -6.3009 0.0000 -0.4463 -0.2325
## 1.0000 0.1648 0.0422 3.9029 0.0002 0.0810 0.2487
##
## ***********************************************************************
## Bootstrapping progress:
##
|
| | 0%
|
| | 1%
|
|> | 1%
|
|> | 2%
|
|>> | 2%
|
|>> | 3%
|
|>> | 4%
|
|>>> | 4%
|
|>>> | 5%
|
|>>> | 6%
|
|>>>> | 6%
|
|>>>> | 7%
|
|>>>>> | 7%
|
|>>>>> | 8%
|
|>>>>> | 9%
|
|>>>>>> | 9%
|
|>>>>>> | 10%
|
|>>>>>>> | 10%
|
|>>>>>>> | 11%
|
|>>>>>>> | 12%
|
|>>>>>>>> | 12%
|
|>>>>>>>> | 13%
|
|>>>>>>>> | 14%
|
|>>>>>>>>> | 14%
|
|>>>>>>>>> | 15%
|
|>>>>>>>>>> | 15%
|
|>>>>>>>>>> | 16%
|
|>>>>>>>>>> | 17%
|
|>>>>>>>>>>> | 17%
|
|>>>>>>>>>>> | 18%
|
|>>>>>>>>>>> | 19%
|
|>>>>>>>>>>>> | 19%
|
|>>>>>>>>>>>> | 20%
|
|>>>>>>>>>>>>> | 20%
|
|>>>>>>>>>>>>> | 21%
|
|>>>>>>>>>>>>> | 22%
|
|>>>>>>>>>>>>>> | 22%
|
|>>>>>>>>>>>>>> | 23%
|
|>>>>>>>>>>>>>>> | 23%
|
|>>>>>>>>>>>>>>> | 24%
|
|>>>>>>>>>>>>>>> | 25%
|
|>>>>>>>>>>>>>>>> | 25%
|
|>>>>>>>>>>>>>>>> | 26%
|
|>>>>>>>>>>>>>>>> | 27%
|
|>>>>>>>>>>>>>>>>> | 27%
|
|>>>>>>>>>>>>>>>>> | 28%
|
|>>>>>>>>>>>>>>>>>> | 28%
|
|>>>>>>>>>>>>>>>>>> | 29%
|
|>>>>>>>>>>>>>>>>>> | 30%
|
|>>>>>>>>>>>>>>>>>>> | 30%
|
|>>>>>>>>>>>>>>>>>>> | 31%
|
|>>>>>>>>>>>>>>>>>>>> | 31%
|
|>>>>>>>>>>>>>>>>>>>> | 32%
|
|>>>>>>>>>>>>>>>>>>>> | 33%
|
|>>>>>>>>>>>>>>>>>>>>> | 33%
|
|>>>>>>>>>>>>>>>>>>>>> | 34%
|
|>>>>>>>>>>>>>>>>>>>>> | 35%
|
|>>>>>>>>>>>>>>>>>>>>>> | 35%
|
|>>>>>>>>>>>>>>>>>>>>>> | 36%
|
|>>>>>>>>>>>>>>>>>>>>>>> | 36%
|
|>>>>>>>>>>>>>>>>>>>>>>> | 37%
|
|>>>>>>>>>>>>>>>>>>>>>>> | 38%
|
|>>>>>>>>>>>>>>>>>>>>>>>> | 38%
|
|>>>>>>>>>>>>>>>>>>>>>>>> | 39%
|
|>>>>>>>>>>>>>>>>>>>>>>>> | 40%
|
|>>>>>>>>>>>>>>>>>>>>>>>>> | 40%
|
|>>>>>>>>>>>>>>>>>>>>>>>>> | 41%
|
|>>>>>>>>>>>>>>>>>>>>>>>>>> | 41%
|
|>>>>>>>>>>>>>>>>>>>>>>>>>> | 42%
|
|>>>>>>>>>>>>>>>>>>>>>>>>>> | 43%
|
|>>>>>>>>>>>>>>>>>>>>>>>>>>> | 43%
|
|>>>>>>>>>>>>>>>>>>>>>>>>>>> | 44%
|
|>>>>>>>>>>>>>>>>>>>>>>>>>>>> | 44%
|
|>>>>>>>>>>>>>>>>>>>>>>>>>>>> | 45%
|
|>>>>>>>>>>>>>>>>>>>>>>>>>>>> | 46%
|
|>>>>>>>>>>>>>>>>>>>>>>>>>>>>> | 46%
|
|>>>>>>>>>>>>>>>>>>>>>>>>>>>>> | 47%
|
|>>>>>>>>>>>>>>>>>>>>>>>>>>>>> | 48%
|
|>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> | 48%
|
|>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> | 49%
|
|>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> | 49%
|
|>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> | 50%
|
|>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> | 51%
|
|>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> | 51%
|
|>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> | 52%
|
|>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> | 52%
|
|>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> | 53%
|
|>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> | 54%
|
|>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> | 54%
|
|>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> | 55%
|
|>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> | 56%
|
|>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> | 56%
|
|>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> | 57%
|
|>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> | 57%
|
|>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> | 58%
|
|>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> | 59%
|
|>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> | 59%
|
|>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> | 60%
|
|>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> | 60%
|
|>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> | 61%
|
|>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> | 62%
|
|>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> | 62%
|
|>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> | 63%
|
|>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> | 64%
|
|>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> | 64%
|
|>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> | 65%
|
|>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> | 65%
|
|>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> | 66%
|
|>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> | 67%
|
|>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> | 67%
|
|>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> | 68%
|
|>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> | 69%
|
|>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> | 69%
|
|>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> | 70%
|
|>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> | 70%
|
|>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> | 71%
|
|>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> | 72%
|
|>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> | 72%
|
|>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> | 73%
|
|>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> | 73%
|
|>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> | 74%
|
|>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> | 75%
|
|>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> | 75%
|
|>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> | 76%
|
|>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> | 77%
|
|>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> | 77%
|
|>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> | 78%
|
|>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> | 78%
|
|>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> | 79%
|
|>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> | 80%
|
|>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> | 80%
|
|>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> | 81%
|
|>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> | 81%
|
|>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> | 82%
|
|>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> | 83%
|
|>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> | 83%
|
|>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> | 84%
|
|>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> | 85%
|
|>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> | 85%
|
|>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> | 86%
|
|>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> | 86%
|
|>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> | 87%
|
|>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> | 88%
|
|>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> | 88%
|
|>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> | 89%
|
|>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> | 90%
|
|>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> | 90%
|
|>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> | 91%
|
|>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> | 91%
|
|>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> | 92%
|
|>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> | 93%
|
|>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> | 93%
|
|>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> | 94%
|
|>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> | 94%
|
|>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> | 95%
|
|>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> | 96%
|
|>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> | 96%
|
|>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> | 97%
|
|>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> | 98%
|
|>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> | 98%
|
|>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> | 99%
|
|>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>| 99%
|
|>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>| 100%
##
## ********** BOOTSTRAP RESULTS FOR REGRESSION MODEL PARAMETERS **********
##
## Outcome variable: math
##
## Coeff BootMean BootSE BootLLCI BootULCI
## constant 3.2928 3.2897 0.1582 2.9773 3.5933
## training -0.3394 -0.3396 0.0546 -0.4502 -0.2368
## gender -0.2355 -0.2292 0.1993 -0.6050 0.1804
## Int_1 0.5043 0.5045 0.0707 0.3672 0.6470
##
## ******************** ANALYSIS NOTES AND ERRORS ************************
##
## Level of confidence for all confidence intervals in output: 95
##
## Number of bootstraps for percentile bootstrap confidence intervals: 5000
##
## NOTE: The following variables were mean centered prior to analysis:
## training
#畫圖
dta$gender <- as.factor(dta$gender)
m2 <- lm(math ~ training+gender+training:gender, data = dta)
interactions::interact_plot(m2, pred = training, modx = gender, interval = TRUE,
int.type = "confidence", int.width = .8)
dta <- dummy_cols(dta,select_columns=c("gender"), remove_first_dummy=TRUE)
dta$int <- dta$train*dta$gender_1
model1 <-'
math ~ b1*training + b2*gender_1 + b3*int
sslope1 := b1+b3*0
sslope2 := b1+b3*1
'
#徑路分析報表
fit <- lavaan::sem(model1, data=dta)
summary(fit)
## lavaan 0.6.15 ended normally after 1 iteration
##
## Estimator ML
## Optimization method NLMINB
## Number of model parameters 4
##
## Number of observations 101
##
## Model Test User Model:
##
## Test statistic 0.000
## Degrees of freedom 0
##
## Parameter Estimates:
##
## Standard errors Standard
## Information Expected
## Information saturated (h1) model Structured
##
## Regressions:
## Estimate Std.Err z-value P(>|z|)
## math ~
## training (b1) -0.339 0.053 -6.429 0.000
## gender_1 (b2) -2.757 0.372 -7.420 0.000
## int (b3) 0.504 0.067 7.517 0.000
##
## Variances:
## Estimate Std.Err z-value P(>|z|)
## .math 0.873 0.123 7.106 0.000
##
## Defined Parameters:
## Estimate Std.Err z-value P(>|z|)
## sslope1 -0.339 0.053 -6.429 0.000
## sslope2 0.165 0.041 3.983 0.000
#以拔靴法看徑路係數與簡單效果信賴區間
set.seed(1234)
fit <- lavaan::sem(model1, data=dta, test="bootstrap", bootstrap=501)
parameterEstimates(fit,ci=TRUE,boot.ci.type="bca.simple")
lhs | op | rhs | label | est | se | z | pvalue | ci.lower | ci.upper |
---|---|---|---|---|---|---|---|---|---|
math | ~ | training | b1 | -0.339 | 0.0528 | -6.43 | 1.28e-10 | -0.443 | -0.236 |
math | ~ | gender_1 | b2 | -2.76 | 0.372 | -7.42 | 1.17e-13 | -3.49 | -2.03 |
math | ~ | int | b3 | 0.504 | 0.0671 | 7.52 | 5.6e-14 | 0.373 | 0.636 |
math | ~~ | math | 0.873 | 0.123 | 7.11 | 1.19e-12 | 0.632 | 1.11 | |
training | ~~ | training | 8.5 | 0 | 8.5 | 8.5 | |||
training | ~~ | gender_1 | 0.291 | 0 | 0.291 | 0.291 | |||
training | ~~ | int | 6.64 | 0 | 6.64 | 6.64 | |||
gender_1 | ~~ | gender_1 | 0.237 | 0 | 0.237 | 0.237 | |||
gender_1 | ~~ | int | 1.3 | 0 | 1.3 | 1.3 | |||
int | ~~ | int | 12.1 | 0 | 12.1 | 12.1 | |||
sslope1 | := | b1+b3*0 | sslope1 | -0.339 | 0.0528 | -6.43 | 1.28e-10 | -0.443 | -0.236 |
sslope2 | := | b1+b3*1 | sslope2 | 0.165 | 0.0414 | 3.98 | 6.82e-05 | 0.0837 | 0.246 |
#畫圖看模型與估計值
lavaanPlot::lavaanPlot(model = fit,
edge_options = list(color = "grey"),
coefs = TRUE,
stand = TRUE)
#畫圖
dta$gender <- as.factor(dta$gender)
m2 <- lm(math ~ training+gender+training:gender, data = dta)
interactions::interact_plot(m2, pred = training, modx = gender, interval = TRUE,
int.type = "confidence", int.width = .8)
#用迴歸分析並製表
dta$gender <- as.factor(dta$gender)
m1 <- lm(math ~ training+gender, data = dta)
m2 <- lm(math ~ training+gender+training:gender, data = dta)
options(huxtable.knitr_output_format="md")
jtools::export_summs(m1,m2,
model.names = c("math", "math"),
error_format = "[{conf.low},{conf.high}]")
## Registered S3 methods overwritten by 'broom':
## method from
## tidy.glht jtools
## tidy.summary.glht jtools
## Warning in to_md.huxtable(structure(list(names = c("", "(Intercept)", "", :
## Markdown cannot handle cells with colspan/rowspan > 1
## Warning in to_md.huxtable(structure(list(names = c("", "(Intercept)", "", :
## Can't vary column alignment in markdown; using first row
math | math | |
---|---|---|
(Intercept) | 3.66 *** | 4.99 *** |
[3.15,4.18] | [4.44,5.54] | |
training | -0.03 | -0.34 *** |
[-0.11,0.05] | [-0.45,-0.23] | |
gender1 | -0.38 | -2.76 *** |
[-0.87,0.11] | [-3.51,-2.00] | |
training:gender1 | 0.50 *** | |
[0.37,0.64] | ||
N | 101 | 101 |
R2 | 0.03 | 0.38 |
*** p < 0.001; * | * p < 0.01; * | p < 0.05. |
#兩模型的解釋量差異檢定
Rsquared_m1 <- broom::glance(m1)$r.squared
Rsquared_m2 <- broom::glance(m2)$r.squared
mrst <- c(m1_Rsquared=Rsquared_m1,m2_Rsquared=Rsquared_m2,deltaRsquared=Rsquared_m2-Rsquared_m1)
round(mrst,3)
## m1_Rsquared m2_Rsquared deltaRsquared
## 0.033 0.380 0.347
anova(m1,m2)
Res.Df | RSS | Df | Sum of Sq | F | Pr(>F) |
---|---|---|---|---|---|
98 | 137 | ||||
97 | 88.1 | 1 | 49.3 | 54.3 | 5.8e-11 |
#畫圖
dta$gender <- as.factor(dta$gender)
m2 <- lm(math ~ training+gender+training:gender, data = dta)
interactions::interact_plot(m2, pred = training, modx = gender, interval = TRUE,
int.type = "confidence", int.width = .8)
#檢驗簡單斜率
simple_slopes(m2,
levels=list(gender=c('0','1', 'sstest')))
traini ng | gender | Test Estima te | Std. Error | t value | df | Pr(>|t |) |
---|---|---|---|---|---|---|
sstest | 0 | -0.339 | 0.0539 | -6.3 | 97 | 8.7e-09 |
sstest | 1 | 0.165 | 0.0422 | 3.9 | 97 | 0.000175 |
2.069983 | sstest | -1.71 | 0.269 | -6.37 | 97 | 6.39e-09 |
5 | sstest | -0.236 | 0.2 | -1.18 | 97 | 0.242 |
7.930017 | sstest | 1.24 | 0.297 | 4.18 | 97 | 6.28e-05 |