# install.packages("dplyr") # install.packages("devtools") # install.packages("metafor") # install.packages("devtools") # devtools::install_github("MathiasHarrer/dmetar") library(dmetar) # provides pcurve() library(metafor) # provides escalc() library(dplyr) # pipes (%>%) and data manipulation bias_the_publications <- function(n0 = 40, # baseline sample size per group effsize = 0.5, # true effect size expressed in SD units var_effsize = 0.5, # between‑study variance of the true effect size var_n = 200) { # between‑study variance of the sample size # Helper: compute the standardized mean difference (SMD) and its variance for two samples calc_es <- function(a, b) { escalc( measure = "SMD", m1i = mean(a), sd1i = sd(a), n1i = length(a), m2i = mean(b), sd2i = sd(b), n2i = length(b) ) } # Draw the sample size for the current study (baseline n0 plus random noise) n <- n0 + round(rnorm(1, n0, sqrt(var_n))) # Draw the true effect size for this study esize <- rnorm(1, effsize, sqrt(var_effsize)) # Generate sample for group 1 (control) x <- rnorm(n) # Generate sample for group 2 (treatment), shifted by the effect size y <- rnorm(n, esize) # Calculate the observed effect size es <- calc_es(y, x) # Perform two‑sample t‑test and extract the p‑value p <- t.test(x, y, var.equal = TRUE)$p.value if (p < .05) # If the p‑value is below the significance threshold, we assume the study is published return(c(p = p, yi = es$yi, vi = es$vi)) ## Otherwise, the study remains in the file drawer (nothing is returned) } # ---------------- Simulation ---------------- set.seed(2025) B <- 300 # number of simulated studies results <- replicate( B, bias_the_publications( effsize = 0.25, var_effsize = 0.1, n0 = 50, # try 500 or 20 to see the impact of sample size var_n = 200 ), simplify = FALSE ) %>% bind_rows(.id = "study_id") %>% mutate( studlab = study_id, TE = yi, seTE = sqrt(vi) ) # ---------------- Meta‑analysis ---------------- res.meta <- rma(yi, vi, data = results) # random‑effects model summary(res.meta) # Forest plot of individual study effects forest(res.meta) # Funnel plot allows us to visually assess the magnitude of publication bias funnel(res.meta) # The trim‑and‑fill method imputes 'missing' studies that may have been excluded by publication bias trimfill(res.meta) funnel(trimfill(res.meta)) # ---------------- Sensitivity: more replications ---------------- # Repeat the same simulation with many more iterations (for the sceptics) set.seed(2025) B <- 3000 results <- replicate( B, bias_the_publications(effsize = 0.25, var_effsize = 0.1), simplify = FALSE ) %>% bind_rows(.id = "study_id") %>% mutate( studlab = study_id, TE = yi, seTE = sqrt(vi) ) res.meta <- rma(yi, vi, data = results) summary(res.meta)