7 Models with Limited Dependent Variables
7.1 Table 7.2
Load libraries
library(haven)
library(AER)
library(stargazer)
library(MASS)
library(censReg)
library(sampleSelection)
library(dplyr)
rm(list = ls())
Binary choice models for applying for unemployment benefits (blue-collar workers)
df <- read_dta("Data/benefits.dta")
df$agesq <- (df$age^2)/10
df$rrsq <- (df$rr^2)
OLS1 <- lm(y ~ rr + rrsq + age + agesq + tenure + slack + abol + seasonal + head + married + dkids + dykids + smsa + nwhite + yrdispl + school12 + male + statemb + stateur, data = df)
Logit1 <- glm(y ~ rr + rrsq + age + agesq + tenure + slack + abol + seasonal + head + married + dkids + dykids + smsa + nwhite + yrdispl + school12 + male + statemb + stateur,data = df, family=binomial)
Probit1 <- glm(y ~ rr + rrsq + age + agesq + tenure + slack + abol + seasonal + head + married + dkids + dykids + smsa + nwhite + yrdispl + school12 + male + statemb + stateur, data = df, family=binomial(link ="probit"))
stargazer(OLS1, Logit1, Probit1, no.space=TRUE, type="text",
title = "Table 7.2 Binary choice models for applying for unemployment benefits")
##
## Table 7.2 Binary choice models for applying for unemployment benefits
## ===================================================================
## Dependent variable:
## -----------------------------------------------
## y
## OLS logistic probit
## (1) (2) (3)
## -------------------------------------------------------------------
## rr 0.629 3.068 1.863*
## (0.384) (1.868) (1.129)
## rrsq -1.019** -4.891** -2.980**
## (0.481) (2.334) (1.412)
## age 0.016*** 0.068*** 0.042***
## (0.005) (0.024) (0.014)
## agesq -0.001** -0.006** -0.004**
## (0.001) (0.003) (0.002)
## tenure 0.006*** 0.031*** 0.018***
## (0.001) (0.007) (0.004)
## slack 0.128*** 0.625*** 0.375***
## (0.014) (0.071) (0.042)
## abol -0.007 -0.036 -0.022
## (0.025) (0.118) (0.072)
## seasonal 0.058 0.271 0.161
## (0.036) (0.171) (0.104)
## head -0.044*** -0.211*** -0.125**
## (0.017) (0.081) (0.049)
## married 0.049*** 0.242*** 0.145***
## (0.016) (0.079) (0.048)
## dkids -0.031* -0.158* -0.097*
## (0.017) (0.086) (0.052)
## dykids 0.043** 0.206** 0.124**
## (0.020) (0.097) (0.059)
## smsa -0.035** -0.170** -0.100**
## (0.014) (0.070) (0.042)
## nwhite 0.017 0.074 0.052
## (0.019) (0.093) (0.056)
## yrdispl -0.013*** -0.064*** -0.038***
## (0.003) (0.015) (0.009)
## school12 -0.014 -0.065 -0.042
## (0.017) (0.082) (0.050)
## male -0.036** -0.180** -0.107**
## (0.018) (0.088) (0.053)
## statemb 0.001*** 0.006*** 0.004***
## (0.0002) (0.001) (0.001)
## stateur 0.018*** 0.096*** 0.057***
## (0.003) (0.016) (0.009)
## Constant -0.077 -2.800*** -1.700***
## (0.122) (0.604) (0.363)
## -------------------------------------------------------------------
## Observations 4,877 4,877 4,877
## R2 0.067
## Adjusted R2 0.063
## Log Likelihood -2,873.197 -2,874.071
## Akaike Inf. Crit. 5,786.393 5,788.142
## Residual Std. Error 0.450 (df = 4857)
## F Statistic 18.331*** (df = 19; 4857)
## ===================================================================
## Note: *p<0.1; **p<0.05; ***p<0.01
7.2 Table 7.3
Cross-tabulation of actual and predicted outcomes (logit model)
y <- df$y
yfit <- Logit1$fitted.values
yhat <- rep(0, length(y))
yhat[which(yfit>0.5)] = 1
table(y, yhat)
## yhat
## y 0 1
## 0 242 1300
## 1 171 3164
R2p Criterion, loglikelihhod value and the pseudo and McFadden R2
## [1] 0.046
## 0
## -3043.028
## [1] 1.106
7.3 Table 7.4
Summary statistics
df <- read_dta("Data/credit.dta")
sum_stat <- round(cbind(apply(df,2,mean), apply(df,2,median), apply(df,2,min), apply(df,2,max)), 3)
colnames(sum_stat) <- c("average","median", "minimum", "maximum")
sum_stat
## average median minimum maximum
## booklev 0.293 0.264 0.000 0.999
## ebit 0.094 0.090 -0.384 0.652
## invgrade 0.472 0.000 0.000 1.000
## logsales 7.996 7.884 1.100 12.701
## marklev 0.255 0.211 0.000 0.965
## rating 3.499 3.000 1.000 7.000
## reta 0.157 0.180 -0.996 0.980
## wka 0.140 0.123 -0.412 0.748
7.4 Table 7.5
Estimation results binary and ordered logit, MLE
df$rating_bin <- ifelse(df$rating>3,1,0)
summary(logit <- glm(rating_bin ~ booklev + ebit + logsales + reta + wka, data = df, family=binomial))
##
## Call:
## glm(formula = rating_bin ~ booklev + ebit + logsales + reta +
## wka, family = binomial, data = df)
##
## Deviance Residuals:
## Min 1Q Median 3Q Max
## -2.5840 -0.5411 -0.0491 0.5286 3.4210
##
## Coefficients:
## Estimate Std. Error z value Pr(>|z|)
## (Intercept) -8.21432 0.86685 -9.476 < 2e-16 ***
## booklev -4.42727 0.77142 -5.739 9.52e-09 ***
## ebit 4.35474 1.43992 3.024 0.00249 **
## logsales 1.08159 0.09568 11.304 < 2e-16 ***
## reta 4.11611 0.48851 8.426 < 2e-16 ***
## wka -4.01249 0.74791 -5.365 8.10e-08 ***
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## (Dispersion parameter for binomial family taken to be 1)
##
## Null deviance: 1273.95 on 920 degrees of freedom
## Residual deviance: 682.16 on 915 degrees of freedom
## AIC: 694.16
##
## Number of Fisher Scoring iterations: 6
summary(ologit <- polr(factor(rating) ~ booklev + ebit + logsales + reta + wka, data = df, method="logistic", Hess = TRUE))
## Call:
## polr(formula = factor(rating) ~ booklev + ebit + logsales + reta +
## wka, data = df, Hess = TRUE, method = "logistic")
##
## Coefficients:
## Value Std. Error t value
## booklev -2.7517 0.47730 -5.765
## ebit 4.7313 0.94476 5.008
## logsales 0.9406 0.05884 15.984
## reta 3.5600 0.30229 11.777
## wka -2.5802 0.48263 -5.346
##
## Intercepts:
## Value Std. Error t value
## 1|2 -0.3696 0.6332 -0.5837
## 2|3 4.8805 0.5208 9.3709
## 3|4 7.6259 0.5512 13.8361
## 4|5 9.8849 0.5916 16.7078
## 5|6 12.8829 0.6734 19.1315
## 6|7 14.7825 0.7841 18.8527
##
## Residual Deviance: 1930.614
## AIC: 1952.614
7.5 Table 7.6
Ordered probit model for willingness to pay
## 1 2 3 4
## 123 18 113 58
## Call:
## polr(formula = wtp ~ 1, data = df, Hess = TRUE, method = "probit")
##
## No coefficients
##
## Intercepts:
## Value Std. Error t value
## 1|2 -0.2683 0.0719 -3.7322
## 2|3 -0.1208 0.0711 -1.6980
## 3|4 0.8931 0.0823 10.8570
##
## Residual Deviance: 756.3822
## AIC: 762.3822
## Call:
## polr(formula = wtp ~ age + female + income, data = df, Hess = TRUE,
## method = "probit")
##
## Coefficients:
## Value Std. Error t value
## age -0.1751 0.04380 -3.998
## female -0.2436 0.12859 -1.895
## income 0.1508 0.05073 2.972
##
## Intercepts:
## Value Std. Error t value
## 1|2 -0.5734 0.2273 -2.5231
## 2|3 -0.4123 0.2261 -1.8235
## 3|4 0.6812 0.2281 2.9868
##
## Residual Deviance: 718.4817
## AIC: 730.4817
## Call:
## polr(formula = wtp ~ age + female + income, data = df, Hess = TRUE,
## method = "probit")
##
## Coefficients:
## age female income
## -0.1750907 -0.2436402 0.1507513
##
## Intercepts:
## 1|2 2|3 3|4
## -0.5733947 -0.4122700 0.6812116
##
## Residual Deviance: 718.4817
## AIC: 730.4817
## age female income
## 0.8393809 0.7837696 1.1627075
Output from Stata
#. oprobit depvar age female income
#
#Iteration 0: log likelihood = -378.19111
#Iteration 1: log likelihood = -359.27192
#Iteration 2: log likelihood = -359.24085
#Iteration 3: log likelihood = -359.24085
#
#Ordered probit regression Number of obs = 312
# LR chi2(3) = 37.90
# Prob > chi2 = 0.0000
#Log likelihood = -359.24085 Pseudo R2 = 0.0501
#
#------------------------------------------------------------------------------
# depvar | Coef. Std. Err. z P>|z| [95% Conf. Interval]
#-------------+----------------------------------------------------------------
# age | -.1750912 .0437968 -4.00 0.000 -.2609313 -.0892511
# female | -.2436467 .128588 -1.89 0.058 -.4956745 .0083811
# income | .1507535 .0507301 2.97 0.003 .0513242 .2501827
#-------------+----------------------------------------------------------------
# /cut1 | -.5733944 .2272618 -1.018819 -.1279695
# /cut2 | -.4122675 .226091 -.8553976 .0308627
# /cut3 | .6812162 .2280778 .2341919 1.12824
#------------------------------------------------------------------------------
#
7.6 Table 7.7
Estimation results Poisson model, MLE and QMLE
df <- read_dta("Data/patents.dta")
summary(Poisson <- glm(p91 ~ lr91 + aerosp + chemist + computer + machines + vehicles + japan + us, data = df,family=poisson))
##
## Call:
## glm(formula = p91 ~ lr91 + aerosp + chemist + computer + machines +
## vehicles + japan + us, family = poisson, data = df)
##
## Deviance Residuals:
## Min 1Q Median 3Q Max
## -27.979 -5.246 -1.572 2.352 29.246
##
## Coefficients:
## Estimate Std. Error z value Pr(>|z|)
## (Intercept) -0.873731 0.065868 -13.27 < 2e-16 ***
## lr91 0.854525 0.008387 101.89 < 2e-16 ***
## aerosp -1.421850 0.095640 -14.87 < 2e-16 ***
## chemist 0.636267 0.025527 24.93 < 2e-16 ***
## computer 0.595343 0.023338 25.51 < 2e-16 ***
## machines 0.688953 0.038346 17.97 < 2e-16 ***
## vehicles -1.529653 0.041864 -36.54 < 2e-16 ***
## japan 0.222222 0.027502 8.08 6.46e-16 ***
## us -0.299507 0.025300 -11.84 < 2e-16 ***
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## (Dispersion parameter for poisson family taken to be 1)
##
## Null deviance: 29669.4 on 180 degrees of freedom
## Residual deviance: 9081.9 on 172 degrees of freedom
## AIC: 9919.6
##
## Number of Fisher Scoring iterations: 5
Qpoisson <- glm(p91 ~ lr91 + aerosp + chemist + computer + machines + vehicles + japan + us, data = df, family=quasipoisson)
coeftest(Qpoisson, vcovHC(Qpoisson, type = "HC0") )
##
## z test of coefficients:
##
## Estimate Std. Error z value Pr(>|z|)
## (Intercept) -0.873731 0.742962 -1.1760 0.239591
## lr91 0.854525 0.093695 9.1203 < 2.2e-16 ***
## aerosp -1.421850 0.380168 -3.7401 0.000184 ***
## chemist 0.636267 0.225359 2.8233 0.004753 **
## computer 0.595343 0.300803 1.9792 0.047796 *
## machines 0.688953 0.414664 1.6615 0.096619 .
## vehicles -1.529653 0.280693 -5.4496 5.049e-08 ***
## japan 0.222222 0.352840 0.6298 0.528819
## us -0.299507 0.273621 -1.0946 0.273689
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
7.7 Table 7.8
Estimation results NegBin I and NegBin II model, MLE
summary(NegBinII <- glm.nb(p91 ~ lr91 + aerosp + chemist + computer + machines + vehicles + japan + us, data = df))
##
## Call:
## glm.nb(formula = p91 ~ lr91 + aerosp + chemist + computer + machines +
## vehicles + japan + us, data = df, init.theta = 0.7686768349,
## link = log)
##
## Deviance Residuals:
## Min 1Q Median 3Q Max
## -2.6373 -1.0264 -0.2694 0.3438 2.5966
##
## Coefficients:
## Estimate Std. Error z value Pr(>|z|)
## (Intercept) -0.32462 0.56234 -0.577 0.5638
## lr91 0.83148 0.08006 10.386 < 2e-16 ***
## aerosp -1.49746 0.37691 -3.973 7.10e-05 ***
## chemist 0.48861 0.26788 1.824 0.0682 .
## computer -0.17355 0.28086 -0.618 0.5366
## machines 0.05926 0.28429 0.208 0.8349
## vehicles -1.53065 0.36852 -4.153 3.27e-05 ***
## japan 0.25222 0.40983 0.615 0.5383
## us -0.59050 0.28834 -2.048 0.0406 *
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## (Dispersion parameter for Negative Binomial(0.7687) family taken to be 1)
##
## Null deviance: 426.38 on 180 degrees of freedom
## Residual deviance: 213.94 on 172 degrees of freedom
## AIC: 1659.2
##
## Number of Fisher Scoring iterations: 1
##
##
## Theta: 0.7687
## Std. Err.: 0.0812
##
## 2 x log-likelihood: -1639.1910
NegBin I (MLE), Stata output
# . nbreg p91 lr91 aerosp chemist computer machines vehicles japan us, dispersion(constant)
#
#Negative binomial regression Number of obs = 181
# LR chi2(8) = 88.55
#Dispersion = constant Prob > chi2 = 0.0000
#Log likelihood = -848.1952 Pseudo R2 = 0.0496
#------------------------------------------------------------------------------
# p91 | Coef. Std. Err. z P>|z| [95% Conf. Interval]
#-------------+----------------------------------------------------------------
# lr91 | .5784411 .0676273 8.55 0.000 .4458941 .7109882
# aerosp | -.7865472 .3367902 -2.34 0.020 -1.446644 -.1264505
# chemist | .7333364 .1851601 3.96 0.000 .3704292 1.096244
# computer | .1449746 .2063161 0.70 0.482 -.2593974 .5493467
# machines | .1558559 .254978 0.61 0.541 -.3438918 .6556035
# vehicles | -.817596 .2686091 -3.04 0.002 -1.34406 -.2911319
# japan | .4005015 .2572806 1.56 0.120 -.1037592 .9047623
# us | .1588398 .1983946 0.80 0.423 -.2300064 .547686
# _cons | .6898775 .5069637 1.36 0.174 -.3037531 1.683508
#-------------+----------------------------------------------------------------
# /lndelta | 4.556439 .1470632 4.268201 4.844678
#-------------+----------------------------------------------------------------
# delta | 95.24374 14.00685 71.39306 127.0623
#------------------------------------------------------------------------------
#LR test of delta=0: chibar2(01) = 8205.19 Prob >= chibar2 = 0.000
7.8 Table 7.9
Tobit models for budget shares alcohol and tobacco
df <- read_dta("Data/tobacco.dta")
df$agelnx <- df$age*df$lnx
df$nadlnx <- df$nadults*df$lnx
Tobit1 <- censReg(share1 ~ age + nadults + nkids + nkids2 + lnx + agelnx + nadlnx, data=df)
Tobit2 <- censReg(share2 ~ age + nadults + nkids + nkids2 + lnx + agelnx + nadlnx, data=df)
stargazer(Tobit1, Tobit2, type="text", no.space = T, single.row = T,
title="Table 7.9 Tobit models for budget shares alcohol and tobacco", column.labels = c("Alchol", "Tobacco"))
##
## Table 7.9 Tobit models for budget shares alcohol and tobacco
## =======================================================
## Dependent variable:
## -----------------------------------
## share1 share2
## Alchol Tobacco
## (1) (2)
## -------------------------------------------------------
## age 0.013 (0.011) -0.126*** (0.024)
## nadults 0.029* (0.017) 0.015 (0.038)
## nkids -0.003*** (0.001) 0.004*** (0.001)
## nkids2 -0.004 (0.002) -0.010* (0.005)
## lnx 0.013*** (0.003) -0.044*** (0.007)
## agelnx -0.001 (0.001) 0.009*** (0.002)
## nadlnx -0.002* (0.001) -0.001 (0.003)
## logSigma -3.712*** (0.015) -3.037*** (0.025)
## Constant -0.159*** (0.044) 0.590*** (0.093)
## -------------------------------------------------------
## Observations 2,724 2,724
## Log Likelihood 4,755.371 758.701
## Akaike Inf. Crit. -9,492.742 -1,499.401
## Bayesian Inf. Crit. -9,439.553 -1,446.212
## =======================================================
## Note: *p<0.1; **p<0.05; ***p<0.01
7.9 Table 7.10
Models for budget shares alcohol and tobacco, estimated by OLS using positive observations only
AlcohOLS <- lm(share1 ~ age + nadults + nkids + nkids2 + lnx + agelnx + nadlnx, data=df, subset=share1>0)
TobaccOLS <- lm(share2 ~ age + nadults + nkids + nkids2 + lnx + agelnx + nadlnx, data=df, subset=share2>0)
stargazer(AlcohOLS, TobaccOLS, type="text", no.space = T,
title ="Table 7.10 Models for budget shares alcohol and tobacco",
column.labels = c("Alchol", "Tobacco"), single.row = T)
##
## Table 7.10 Models for budget shares alcohol and tobacco
## =====================================================================
## Dependent variable:
## -------------------------------------------------
## share1 share2
## Alchol Tobacco
## (1) (2)
## ---------------------------------------------------------------------
## age 0.008 (0.011) -0.031 (0.021)
## nadults -0.013 (0.016) -0.013 (0.032)
## nkids -0.002*** (0.001) 0.001 (0.001)
## nkids2 -0.002 (0.002) -0.003 (0.005)
## lnx -0.002 (0.003) -0.034*** (0.005)
## agelnx -0.0004 (0.001) 0.002 (0.002)
## nadlnx 0.001 (0.001) 0.001 (0.002)
## Constant 0.053 (0.044) 0.490*** (0.074)
## ---------------------------------------------------------------------
## Observations 2,258 1,036
## R2 0.051 0.154
## Adjusted R2 0.048 0.148
## Residual Std. Error 0.022 (df = 2250) 0.029 (df = 1028)
## F Statistic 17.270*** (df = 7; 2250) 26.732*** (df = 7; 1028)
## =====================================================================
## Note: *p<0.1; **p<0.05; ***p<0.01
7.10 Table 7.11
Probit models for abstention of alcohol and tobacco
df$Alchol <- ifelse(df$share1>0, 1, 0)
df$Tobacco <- ifelse(df$share2>0, T, F)
Probit_Alchol <- glm(
Alchol ~ age + nadults + nkids + nkids2 + lnx + agelnx + nadlnx + bluecol + whitecol, data=df,
family=binomial("probit"))
Probit_Tobacco <- glm(
Tobacco ~ age + nadults + nkids + nkids2 + lnx + agelnx + nadlnx + bluecol + whitecol,
data=df,family=binomial("probit"))
stargazer(Probit_Alchol, Probit_Tobacco, type="text", no.space = T,
title="Table 7.11 Probit models for abstention of alcohol and tobacco",
column.labels = c("Alchol", "Tobacco"), single.row = T)
##
## Table 7.11 Probit models for abstention of alcohol and tobacco
## ======================================================
## Dependent variable:
## ------------------------------------
## Alchol Tobacco
## Alchol Tobacco
## (1) (2)
## ------------------------------------------------------
## age 0.668 (0.652) -2.483*** (0.560)
## nadults 2.255** (1.020) 0.485 (0.872)
## nkids -0.077** (0.037) 0.081*** (0.031)
## nkids2 -0.186 (0.140) -0.212* (0.123)
## lnx 1.236*** (0.191) -0.632*** (0.163)
## agelnx -0.045 (0.049) 0.175*** (0.041)
## nadlnx -0.169** (0.074) -0.025 (0.063)
## bluecol -0.061 (0.098) 0.206** (0.084)
## whitecol 0.051 (0.085) 0.022 (0.070)
## Constant -15.882*** (2.570) 8.244*** (2.213)
## ------------------------------------------------------
## Observations 2,724 2,724
## Log Likelihood -1,159.865 -1,754.886
## Akaike Inf. Crit. 2,339.729 3,529.772
## ======================================================
## Note: *p<0.1; **p<0.05; ***p<0.01
7.11 Table 7.12
Two-step estimation of Engel curves for alcohol and tobacco (tobit II model)
summary(Tobit_Alchol <- selection(Alchol ~ age + nadults + nkids + nkids2 + lnx + agelnx + nadlnx + bluecol + whitecol, share1 ~ age + nadults + nkids + nkids2 + lnx + agelnx + nadlnx, method="2step", data=df))
## --------------------------------------------
## Tobit 2 model (sample selection model)
## 2-step Heckman / heckit estimation
## 2724 observations (466 censored and 2258 observed)
## 21 free parameters (df = 2704)
## Probit selection equation:
## Estimate Std. Error t value Pr(>|t|)
## (Intercept) -15.88236 2.57393 -6.170 7.83e-10 ***
## age 0.66785 0.65200 1.024 0.3058
## nadults 2.25540 1.02453 2.201 0.0278 *
## nkids -0.07705 0.03725 -2.069 0.0387 *
## nkids2 -0.18572 0.14083 -1.319 0.1874
## lnx 1.23553 0.19130 6.459 1.25e-10 ***
## agelnx -0.04480 0.04854 -0.923 0.3561
## nadlnx -0.16879 0.07423 -2.274 0.0231 *
## bluecol -0.06117 0.09777 -0.626 0.5316
## whitecol 0.05056 0.08471 0.597 0.5506
## Outcome equation:
## Estimate Std. Error t value Pr(>|t|)
## (Intercept) 0.0542676 0.1329936 0.408 0.68327
## age 0.0077097 0.0130468 0.591 0.55462
## nadults -0.0133445 0.0247046 -0.540 0.58913
## nkids -0.0020244 0.0007637 -2.651 0.00808 **
## nkids2 -0.0024127 0.0025715 -0.938 0.34821
## lnx -0.0024288 0.0093674 -0.259 0.79544
## agelnx -0.0004044 0.0009420 -0.429 0.66772
## nadlnx 0.0008461 0.0018047 0.469 0.63922
## Multiple R-Squared:0.051, Adjusted R-Squared:0.0476
## Error terms:
## Estimate Std. Error t value Pr(>|t|)
## invMillsRatio -0.0002045 0.0165285 -0.012 0.99
## sigma 0.0214876 NA NA NA
## rho -0.0095177 NA NA NA
## --------------------------------------------
summary(Tobit_Tobacco <- selection(Tobacco ~ age + nadults + nkids + nkids2 + lnx + agelnx + nadlnx + bluecol + whitecol, share2 ~ age + nadults + nkids + nkids2 +agelnx+nadlnx+ lnx, method="2step", data=df))
## --------------------------------------------
## Tobit 2 model (sample selection model)
## 2-step Heckman / heckit estimation
## 2724 observations (1688 censored and 1036 observed)
## 21 free parameters (df = 2704)
## Probit selection equation:
## Estimate Std. Error t value Pr(>|t|)
## (Intercept) 8.24447 2.21108 3.729 0.000196 ***
## age -2.48300 0.55960 -4.437 9.48e-06 ***
## nadults 0.48520 0.87174 0.557 0.577851
## nkids 0.08128 0.03083 2.637 0.008424 **
## nkids2 -0.21166 0.12305 -1.720 0.085522 .
## lnx -0.63208 0.16320 -3.873 0.000110 ***
## agelnx 0.17474 0.04131 4.230 2.41e-05 ***
## nadlnx -0.02534 0.06292 -0.403 0.687181
## bluecol 0.20642 0.08343 2.474 0.013417 *
## whitecol 0.02153 0.06943 0.310 0.756453
## Outcome equation:
## Estimate Std. Error t value Pr(>|t|)
## (Intercept) 0.4515814 0.1086284 4.157 3.32e-05 ***
## age -0.0172991 0.0358591 -0.482 0.629547
## nadults -0.0174379 0.0339635 -0.513 0.607692
## nkids 0.0007643 0.0015130 0.505 0.613469
## nkids2 -0.0020755 0.0053883 -0.385 0.700125
## agelnx 0.0012243 0.0025454 0.481 0.630568
## nadlnx 0.0013650 0.0024238 0.563 0.573370
## lnx -0.0301094 0.0090459 -3.329 0.000885 ***
## Multiple R-Squared:0.1542, Adjusted R-Squared:0.1476
## Error terms:
## Estimate Std. Error t value Pr(>|t|)
## invMillsRatio -0.009018 0.018589 -0.485 0.628
## sigma 0.029881 NA NA NA
## rho -0.301792 NA NA NA
## --------------------------------------------
stargazer(Tobit_Alchol, Tobit_Tobacco, type="text", no.space = T,
title="Table 7.12 Two-step estimation of Engel curves for alcohol and tobacco",
column.labels = c("Alchol", "Tobacco"), single.row = T)
##
## Table 7.12 Two-step estimation of Engel curves for alcohol and tobacco
## =======================================================
## Dependent variable:
## -----------------------------------
## share1 share2
## Alchol Tobacco
## (1) (2)
## -------------------------------------------------------
## age 0.008 (0.013) -0.017 (0.036)
## nadults -0.013 (0.025) -0.017 (0.034)
## nkids -0.002*** (0.001) 0.001 (0.002)
## nkids2 -0.002 (0.003) -0.002 (0.005)
## lnx -0.002 (0.009) -0.030*** (0.009)
## agelnx -0.0004 (0.001) 0.001 (0.003)
## nadlnx 0.001 (0.002) 0.001 (0.002)
## Constant 0.054 (0.133) 0.452*** (0.109)
## -------------------------------------------------------
## Observations 2,724 2,724
## rho -0.010 -0.302
## Inverse Mills Ratio -0.0002 (0.017) -0.009 (0.019)
## =======================================================
## Note: *p<0.1; **p<0.05; ***p<0.01