// -*- mode: C++; -*- /* ############################################################ * General K component mixture model, no covariates. * ########################################################### * p -- variant. Strong priors on mixture component SDs (beta0, tau0, & gamma0) * Unconstrained variant. No parameter is constrained, and components * need to be identified post hoc. Use model.constrained <- FALSE in * the script. */ data { /* Fixed Structural Parameters */ int I; //number of essays, index for essays int J[I]; // number of events per essay, index for events, int Jmax; //Jmax <- max(J); int K; // number of mixture components /* Hyperparameters for second level model */ real alphaNu; //Degress of freedom for alpha0N vector[K] alpha0mu; //Prior for alpha0 vector[K] mu0m; //location of mean log group location prior vector[K] mu0s; //scale of mean log group location prior vector[K] lbeta0m; //location of sd log group location prior vector[K] lbeta0s; //location of sd log group location prior vector[K] tau0m; //location of mean log group precision prior vector[K] tau0s; //scale of mean log group precision prior vector[K] lgamma0m; //location of sd log group precision prior vector[K] lgamma0s; //location of sd log group precision prior /* Data */ matrix[I,Jmax] Y; // log pause time /* row_vector[Jmax] Yi; // one row of Y */ /* Note that this is a ragged array, so that Y[i,j] for j>J[i] will be NA */ } parameters { /* 1st Level parameters */ simplex[K] pi[I]; // mixture probabilities /* 2nd level random effects (allows reparameterizing as a regression * instead of estimating precisions) */ vector[K] theta[I]; // component effect for mean vector[K] eta[I]; // component effect for log precision /* 2nd Level hyperparameters */ real alphaN; // Size for mixing probs simplex[K] alpha0; // default mixing probabilities. vector[K] mu0; // grand means for components vector[K] beta0; // sd of mean distributions vector[K] tau0; //grand mean of log precisions vector[K] gamma0; // sd of log precisions } transformed parameters { vector[K] mu[I]; // component means vector[K] tau[I]; // component log precisions vector[K] sigma[I]; // Component standard deviation. vector[K] alpha; // mixture component proportions for (i in 1:I) { mu[i] <- mu0 + theta[i].*beta0; tau[i] <- tau0+eta[i].*gamma0; sigma[i] <- exp(-0.5*tau[i]); //should be equivalent to below // for (k in 1:K) { // sigma[i,k] <- 1/sqrt(exp(tau[i,k])); // sqrt is not vectorized // } } // Priors alpha <- alpha0*alphaN; } model{ real ps[K]; // Temp calculating likelihood. // Priors for 2nd level parameters; alphaN ~ chi_square(alphaNu); alpha0 ~ dirichlet(alpha0mu); mu0 ~ normal(mu0m,mu0s); beta0 ~ lognormal(lbeta0m,lbeta0s); tau0 ~ normal(tau0m,tau0s); gamma0 ~ lognormal(lgamma0m,lgamma0s); ## Priors for individual components for(i in 1:I){ pi[i] ~ dirichlet(alpha+.01); for (k in 1:K) { // reparameterize mu[i,k] = mu0[k]+beta[k]*theta[i,k] theta[i][k] ~ normal(0,1); eta[i][k] ~ normal(0,1); } } /* Likelihood for data */ for (i in 1:I) { for (j in 1:J[i]) { /* Z[i,j] ~ dcat(pi[i,1:K]) Y[i,j] ~dnorm(mu[i,Z[i,j]], tau[i,Z[i,j]]) Y[i,j] ~ dnormmix(mu[i,],tau[i,],pi[i,]) */ for (k in 1:K) { ps[k] <- log(pi[i,k]) + normal_log(Y[i,j],mu[i,k],sigma[i,k]); } increment_log_prob(log_sum_exp(ps)); } } }