content
stringlengths 0
14.9M
| filename
stringlengths 44
136
|
---|---|
## File Name: reglca_threshold_parameter.R
## File Version: 0.15
reglca_threshold_parameter <- function(x, regular_type, lambda, vt=1 )
{
multiply <- TRUE
# multiply <- ( vt < 1 )
if (multiply){
x <- x * vt
}
y <- cdm_parameter_regularization(x=x, regular_type=regular_type, regular_lam=lambda)
if (multiply){
y <- y / vt
}
return(y)
}
|
/scratch/gouwar.j/cran-all/cranData/CDM/R/reglca_threshold_parameter.R
|
## File Name: reglca_update_parameter.R
## File Version: 0.588
reglca_update_parameter <- function(parm, pp, C, W, h, lambda, regular_type,
cd_steps, conv, max_increment, vt=NULL, prob_min=0, increment_factor=1.02,
ii=NULL, eps=1e-8, iter=NULL)
{
iterate <- TRUE
iter <- 0
parchange <- 1
vt_null <- is.null(vt)
bounds <- c(prob_min, 1-prob_min)
NC <- length(parm)
ind_pp <- pp:NC
#*** iterations
while (iterate){
parm_old <- parm
probs0 <- reglca_calc_probs(parm=parm, eps=1E-5)
# evaluate log-likelihood
q0 <- reglca_freq_ll( x=probs0, C=C, W=W )
# 1st derivative
contr <- C / probs0 - W / (1-probs0)
f1 <- sum(contr[ind_pp])
# 2nd derivative
contr <- -C / probs0^2 - W / (1-probs0)^2
f2 <- sum(contr[ind_pp])
# parameter update
incr <- - sign(f2) * f1 / ( abs(f2) + eps )
incr <- cdm_trim_increment( increment=incr, max.increment=max_increment, type=1)
max_increment <- min( .10, max( abs(incr) ) / increment_factor )
parm[pp] <- parm[pp] + incr
parm[pp] <- cdm_squeeze( x=parm[pp], bounds=bounds )
#-- apply threshold operator
if (pp>1){
if ( vt_null ){
vt <- abs(f2) + eps
}
parm[pp] <- reglca_threshold_parameter(x=parm[pp], regular_type=regular_type,
lambda=lambda, vt=vt)
}
iter <- iter + 1
if ( iter > cd_steps ){ iterate <- FALSE }
parchange <- abs( parm[pp] - parm_old[pp] )
if ( parchange < conv ){ iterate <- FALSE }
}
#-- output
return(parm)
}
|
/scratch/gouwar.j/cran-all/cranData/CDM/R/reglca_update_parameter.R
|
## File Name: replace_NA.R
## File Version: 0.04
replace_NA <- function( matr, value=0 )
{
matr[ is.na(matr) ] <- value
return(matr)
}
|
/scratch/gouwar.j/cran-all/cranData/CDM/R/replace_NA.R
|
## File Name: rmsd_chisquare.R
## File Version: 0.09
##########################################
# auxiliary function
rmsd_chisquare <- function( n.ik, pi.k, probs, eps=10^(-30) )
{
# probs ... [ classes, items, categories ]
# n.ik ... [ classes, items, categories, groups ]
# N.ik ... [ classes, items, categories]
N.ik <- n.ik[,,,1]
G <- dim(n.ik)[4]
pitot <- pi.k[,1]
eps <- 1E-10
if (G>1){
for (gg in 2:G ){
N.ik <- N.ik + n.ik[,,,gg]
pitot <- pitot + pi.k[,gg]
}
}
#*** extract maximum number of categories
maxK <- apply( N.ik, c(2,3), sum, na.rm=TRUE )
maxK <- rowSums( maxK > eps )
# calculate summed counts
N.ik_tot <- array( 0, dim=dim(N.ik) )
N.ik_tot[,,1] <- N.ik[,,1,drop=FALSE]
K <- dim(N.ik)[3]
for (kk in 2:K){
N.ik_tot[,,1] <- N.ik_tot[,,1,drop=FALSE] + N.ik[,,kk,drop=FALSE]
}
for (kk in 2:K){
N.ik_tot[,,kk] <- N.ik_tot[,,1]
}
E.ik <- N.ik_tot*probs
#--- calculate chi square
chisq_stat0 <- ( N.ik - E.ik )^2 / E.ik
chisq_stat <- chisq_stat0[,,1]
for (kk in 2:K){
chisq_stat <- chisq_stat + chisq_stat0[,,kk]
}
chisq_stat <- colSums(chisq_stat)
return(chisq_stat)
}
|
/scratch/gouwar.j/cran-all/cranData/CDM/R/rmsd_chisquare.R
|
## File Name: rowMaxs.R
## File Version: 1.13
################################################################################
# utility method for computing intermediate information #
################################################################################
# rowMaxs <- function(mat){
# Call: from din()
# Input: numeric matrix
# Output: row maxima of input matrix
# n <- nrow(mat)
# p <- ncol(mat)
# x <- as.vector(mat)
# x <- matrix(x[order(rep(1:n, p), x)], p, n)
# x[p, ]
#}
#########################################################
rowMaxs <- function(mat)
{
n <- nrow(mat)
p <- ncol(mat)
maxval <- mat[,1]
for ( cc in 2:p){
maxval <- ifelse( mat[,cc] > maxval, mat[,cc], maxval )
}
return(maxval)
}
###########################################################
rowMaxs2 <- function(mat)
{
n <- nrow(mat)
p <- ncol(mat)
maxval <- mat[,1]
maxind <- 1
for ( cc in 2:p){
ind <- ( mat[,cc] > maxval )
maxval <- ifelse( ind, mat[,cc], maxval )
maxind <- ifelse( ind, cc, maxind)
}
res <- list( "maxval"=maxval, "maxind"=maxind )
return(res)
}
#############################################################
rowMaxs3 <- function(mat)
{
n <- nrow(mat)
p <- ncol(mat)
maxval <- mat[,1]
maxind <- 1
for ( cc in 2:p){
maxval <- ifelse( mat[,cc] > maxval, mat[,cc], maxval )
maxind <- maxind + ( cc - maxind ) * (mat[,cc]==maxval )
}
res <- list( "maxval"=maxval, "maxind"=maxind )
return(res)
}
# rowMaxs3 is faster than rowMaxs2!
|
/scratch/gouwar.j/cran-all/cranData/CDM/R/rowMaxs.R
|
## File Name: rowProds.R
## File Version: 1.03
rowProds <- function(matr)
{
# Call: from din()
# Input: numeric matrix with positive entries
# Output: row products of input matrix
exp( rowSums( log(matr + 10^(-300) ) ) )
}
|
/scratch/gouwar.j/cran-all/cranData/CDM/R/rowProds.R
|
## File Name: rowProds2.R
## File Version: 1.02
#************************************************************************
rowProds2 <- function(matr)
{
y <- matr[,1]
for (ii in 2:dim(matr)[2]){
y <- y * matr[,ii]
}
return(y)
}
#...................................................................
|
/scratch/gouwar.j/cran-all/cranData/CDM/R/rowProds2.R
|
## File Name: rrum.param.R
## File Version: 0.26
#***************************************************************
# RRUM parametrization
.rrum.param <- function( delta.summary, q.matrix )
{
#---
# RRUM parametrization
# log( P(X=1) )=b0 + b1*alpha1 + b2 * alpha2
# RRUM:
# P(X=1)=pi * r1^( 1- alpha1) * r2^(1-alpha2)
#=> log( P(X=1) )=log[ pi * r1 * r2 * r1^(-alpha1) * r2^(-alpha2) ]
#=log( pi ) + log(r1) + log(r2) + -log(r1)*alpha1 + -log(r2) * alpha2
#=> b1=-log(r1) and r1=exp( -b1 )
#=> log(pi)=b0 + b1 + b2 and pi=exp( b0 + b1 + b2 )
I <- nrow(q.matrix)
K <- ncol(q.matrix)
rrum.params <- matrix( NA, I, K+1 )
rownames(rrum.params) <- delta.summary[ delta.summary$partype==0, "item" ]
colnames(rrum.params) <- c( "pi", paste( "r_", colnames(q.matrix), sep="") )
for (ii in 1:I){
d.ii <- delta.summary[ delta.summary$itemno==ii, ]
rrum.params[ii,"pi"] <- exp( sum( d.ii$est ) )
rrum.params[ ii, which( q.matrix[ii,]==1) +1 ] <- exp( - d.ii$est[-1] )
}
return( rrum.params )
}
|
/scratch/gouwar.j/cran-all/cranData/CDM/R/rrum.param.R
|
## File Name: rrumpars2logpars.R
## File Version: 0.05
#######################################################
# pi * r1^(1-a1) * r2^(1-a2)
#=pi * r1 * r2 * r1^(-a1) * r2^(-a2)
#=pi * r1 * r2 * (1/r1)^a1 * (1/r2)^a2
rrumpars2logpars <- function(v1){
l1 <- rep(0,length(v1))
l1[-1] <- log( 1 / v1[-1] )
l1[1] <- log( prod(v1) )
return(l1)
}
#######################################################
|
/scratch/gouwar.j/cran-all/cranData/CDM/R/rrumpars2logpars.R
|
## File Name: sequential.items.R
## File Version: 1.11
# converts a dataset with polytomous item responses
# into a dataset with sequential dichotomous items
sequential.items <- function( data )
{
N <- nrow(data)
I <- ncol(data)
maxK <- apply( data, 2, max, na.rm=TRUE )
dat.exp <- matrix( NA, nrow=N, ncol=sum(maxK) )
vv <- 1
for (ii in 1:I){
dat.ii <- data[,ii]
kk <- 1
dat.exp[, vv ] <- 1 * ( dat.ii >=kk )
vv <- vv + 1 # column index
if (maxK[ii]>1){
for (kk in 2:maxK[ii]){
dat.exp[, vv ] <- 1 * ( dat.ii >=kk )
dat.exp[ dat.ii < kk - 1, vv ] <- NA
vv <- vv + 1
}
}
}
#****
# variable names
varnames <- sapply( 1:I, FUN=function(ii){
if ( maxK[ii]==1 ){ v1 <- colnames(data)[ii] } else
{ v1 <- paste0( colnames(data)[ii], "_Cat", 1:maxK[ii] ) }
v1
} )
colnames(dat.exp) <- unlist( varnames )
dat.exp <- as.data.frame( dat.exp)
# item information table
iteminfo <- data.frame("item"=rep( colnames(data), maxK ) )
iteminfo$itemindex <- match( iteminfo$item, colnames(data) )
iteminfo$category <- unlist( sapply( 1:I, FUN=function(ii){ 1:(maxK[ii]) },
simplify=FALSE) )
iteminfo$pseudoitem <- colnames(dat.exp)
res <- list( "dat.expand"=dat.exp, "iteminfo"=iteminfo,
"maxK"=maxK)
return(res)
}
|
/scratch/gouwar.j/cran-all/cranData/CDM/R/sequential.items.R
|
## File Name: sim.din.R
## File Version: 1.13
################################################################################
# utility function for data simulation from a CDM model #
################################################################################
sim.din <- function( N=0, q.matrix, guess=rep(.2, nrow(q.matrix) ), slip=guess,
mean=rep(0, ncol(q.matrix) ), Sigma=diag( ncol(q.matrix) ), rule="DINA",
alpha=NULL)
{
# simulated normal variates
if (N>0){
normsim <- CDM_rmvnorm( N, mean, Sigma)
# dichotomous variates
dichsim <- 1 * ( normsim > 0 )
}
if ( ! is.null( alpha) ){
dichsim <- alpha
N <- nrow(alpha)
}
# number of possessed attributes, of those which are necessary for this item
poss.attr <- dichsim %*% t( q.matrix )
# calculate for each item how many attributes are necessary for solving the items
# according to the specified DINA or DINO rule
ness.attr <- ( rowSums(q.matrix) )*( rule=="DINA") + 1* ( rule=="DINO" )
# latent response
eta.pp <- poss.attr >=outer( rep(1,N), ness.attr )
# simulating responses according DINA rule
R <- matrix( eta.pp * stats::rbinom( N*nrow(q.matrix), size=1, prob=1 - outer( rep(1,N), slip ) ) +
( 1 - eta.pp) * stats::rbinom( N*nrow(q.matrix), size=1,
prob=outer( rep(1,N), guess ) ), ncol=nrow(q.matrix) )
colnames(R) <- paste( "I", substring( 1000 + 1:( nrow(q.matrix) ), 2), sep="")
res <- list( "dat"=R, "alpha"=dichsim )
return(res)
}
|
/scratch/gouwar.j/cran-all/cranData/CDM/R/sim.din.R
|
## File Name: sim.gdina.R
## File Version: 2.20
################################################################################
# Simulation of the GDINA model
sim.gdina <- function( n, q.matrix, delta, link="identity",
thresh.alpha=NULL, cov.alpha=NULL, alpha=NULL,
Mj, Aj, necc.attr )
{
I <- length(delta)
# simulate alpha
if ( is.null(alpha) ){
alpha <- 1* ( CDM_rmvnorm( n, mean=thresh.alpha, sigma=cov.alpha ) > 0 )
}
dat <- matrix( NA, n, I )
for (ii in 1:I){
na.ii <- necc.attr[[ii]]
Aj.ii <- Aj[[ii]]
Mj.ii <- Mj[[ii]][[1]]
delta.ii <- delta[[ii]]
alpha.ii <- alpha[, na.ii, drop=FALSE ]
# calculate probability for every attribute pattern
patt.prob <- rowSums( Mj.ii * outer( rep(1,nrow(Mj.ii) ), delta.ii ) )
# create patterns for alpha.ii and Aj.ii
l1.Aj <- l1.al <- "P"
for (vv in 1:( ncol(alpha.ii) ) ){
l1.Aj <- paste( l1.Aj, Aj.ii[,vv], sep="")
l1.al <- paste( l1.al, alpha.ii[,vv], sep="")
}
resp.ii <- patt.prob[ match( l1.al, l1.Aj ) ]
if ( link=="logit"){
resp.ii <- stats::plogis( resp.ii )
}
if ( link=="log"){
resp.ii <- exp( resp.ii )
}
dat[, ii] <- 1*( stats::runif(n) < resp.ii )
}
res <- list( data=dat, alpha=alpha, q.matrix=q.matrix, delta=delta,
Aj=Aj, Mj=Mj, link=link )
return(res)
}
|
/scratch/gouwar.j/cran-all/cranData/CDM/R/sim.gdina.R
|
## File Name: sim.gdina.prepare.R
## File Version: 0.05
######################################################################################
# Function for preparation of GDINA simulation
sim.gdina.prepare <- function( q.matrix )
{
I <- nrow(q.matrix) # number of items
rsqm <- rowSums(q.matrix) # row sums in Q matrix
necc.attr <- delta <- Mj <- Aj <- as.list( rep(1,I) )
for (ii in 1:I){
necc.attr[[ii]] <- which( q.matrix[ii,] > 0 )
Aj[[ii]] <- gdina_designmatrices_create_Aj( nq=rsqm[ii] )
Mj[[ii]] <- gdina_designmatrices_create_Mj( Aj=Aj[[ii]], rule="GDINA" )
delta[[ii]] <- rep( 0, ncol( Mj[[ii]][[1]] ) )
}
res <- list( delta=delta, necc.attr=necc.attr, Aj=Aj, Mj=Mj )
return(res)
}
######################################################################################
|
/scratch/gouwar.j/cran-all/cranData/CDM/R/sim.gdina.prepare.R
|
## File Name: sim_model.R
## File Version: 0.10
sim_model <- function(object=NULL, irfprob=NULL, theta_index=NULL,
prob.theta=NULL, data=NULL, N_sim=NULL )
{
theta <- NULL
if ( ! is.null(object)){
#--- extract parameter
irfprob <- IRT.irfprob(object)
theta <- attr(irfprob, "theta")
prob.theta <- attr(irfprob, "prob.theta")
data <- IRT.data(object)
if (is.null(N_sim)){
N_sim <- nrow(data)
}
}
if ( is.null(N_sim) & ( ! is.null(theta_index) ) ){
N_sim <- length(theta_index)
}
#--- sample theta
TP <- length(prob.theta)
if ( ( ! is.null(N_sim) ) & ( is.null(theta_index) ) ){
theta_index <- sample( 1:TP, size=N_sim, prob=prob.theta, replace=TRUE )
if (! is.null(theta)){
theta <- theta[ theta_index, ]
}
}
dim_irfprob <- dim(irfprob)
#** apply sampling function
dat <- cdm_rcpp_sim_model_item_responses( theta_index=theta_index-1,
irfprob=as.vector(irfprob), dim_irfprob=dim_irfprob)
N_sim <- nrow(dat)
#*** include missings
if (! is.null(data) ){
ind_miss <- sample(1:nrow(data), size=N_sim, replace=TRUE)
dat[ is.na( data[ind_miss,] ) ] <- NA
}
#--- output
res <- list( dat=dat, theta=theta, theta_index=theta_index)
return(res)
}
|
/scratch/gouwar.j/cran-all/cranData/CDM/R/sim_model.R
|
## File Name: skill.cor.R
## File Version: 1.28
######################################################################################
# This function calculates tetrachoric correlations among skills ##
# Input is an object generated by din function ##
skill.cor <- function( object)
{
options( warn=-1)
ap <- object$attribute.patt
aps <- object$attribute.patt.splitted
# collect all skill combinations
skill.combis <- t( utils::combn( nrow(object$skill.patt ), 2) )
# create contingency tables
skills.bivariate <- t( apply( skill.combis, 1, FUN=function(ll){
ss1 <- ll[1] ; ss2 <- ll[2]
c( "Freq00"=sum( ap[ aps[, ss1 ]==0 & aps[,ss2]==0, "class.prob" ] ),
"Freq10"=sum( ap[ aps[, ss1 ]==1 & aps[,ss2]==0, "class.prob" ] ),
"Freq01"=sum( ap[ aps[, ss1 ]==0 & aps[,ss2]==1, "class.prob" ] ),
"Freq11"=sum( ap[ aps[, ss1 ]==1 & aps[,ss2]==1, "class.prob" ] ) )
} ) )
res <- data.frame( "skill1"=rownames(object$skill.patt)[ skill.combis[,1] ],
"skill2"=rownames(object$skill.patt)[ skill.combis[,2] ],
skill.combis, skills.bivariate )
for (vv in 3:8){
res[,vv] <- as.numeric( paste( res[,vv] ) )
}
# calculate tetrachoric correlation
res$tetracor <- apply( res[, 5:8 ], 1, FUN=function(ll){
polycor::polychor( matrix(as.numeric(ll),nrow=2) ) } )
r2 <- res[, c(2,1,4,3,5,7,6,8,9) ]
colnames(r2) <- colnames(res)
res <- rbind( res, r2 )
res <- res[ order( res[,3]*1000 + res[,4] ), ]
# create matrix of tetrachoric correlations
K <- max( r2[,3] )
skill.cors <- diag( 1, K )
rownames(skill.cors) <- colnames(skill.cors) <- rownames(object$skill.patt)
for (ii in 1:K){
skill.cors[ii,-ii] <- res[ res[,3]==ii, "tetracor"]
}
res <- list( "conttable.skills"=res, "cor.skills"=skill.cors )
options(warn=0)
return(res)
}
######################################################################################
# polychoric correlations
skill.polychor <- function( object, colindex=1 )
{
ap <- object$attribute.patt
aps <- object$attribute.patt.splitted
# collect all skill combinations
NO <- nrow(object$skill.patt )
skill.combis <- matrix(NA, nrow=0,ncol=2)
if (NO>1){
skill.combis <- t( utils::combn( NO, 2) )
}
ZZ <- nrow(skill.combis)
skill.cors <- matrix(1, ncol(aps), ncol(aps) )
warn_temp <- options()$warn
options(warn=-1)
if (ZZ>0){
for (zz in 1:ZZ){
# zz <- 8
ll <- skill.combis[zz,]
ss1 <- ll[1]
ss2 <- ll[2]
v1 <- stats::aggregate( ap[, colindex ], list( aps[,ss1], aps[,ss2] ), sum )
NR <- length( unique( aps[,ss1] ) )
NC <- length( unique( aps[,ss2] ) )
v1 <- matrix( v1[,3], nrow=NR, ncol=NC )
skill.cors[ss1,ss2] <- skill.cors[ss2,ss1] <- polycor::polychor( v1 )
}
}
options(warn=warn_temp)
rownames(skill.cors) <- colnames(skill.cors) <- rownames(object$skill.patt)
res <- list( cor.skills=skill.cors )
return(res)
}
#####################################################
######################################################
# calculate polychoric correlation
CDM.calc.polychor <- function( res )
{
G <- res$G
res0 <- as.list(1:G)
for (gg in 1:G){
res0[[gg]] <- skill.polychor( res, colindex=gg )$cor.skills
}
return(res0)
}
########################################################
#########################################################
# extract vector of polychoric correlations from a matrix
CDM.polychor2vec <- function(pcmat)
{
D <- dim(pcmat)[1]
pcvec <- NULL
zz <- 1
if (D>1){
for (dd in 1:(D-1) ){
for (ee in (dd+1):D){
pcvec <- c( pcvec, pcmat[ee,dd] )
names(pcvec)[zz] <- paste0( rownames(pcmat)[ee], "_", rownames(pcmat)[dd] )
zz <- zz+1
}
}
}
if (D==1){
pcvec <- c(1)
names(pcvec) <- "polycor1"
}
return(pcvec)
}
##############################################################
##########################################################
# read list of polychoric correlation matrices
CDM.polychorList2vec <- function(polychorList)
{
G <- length(polychorList)
pcvec <- NULL
for (gg in 1:G){
pcvec0 <- CDM.polychor2vec(polychorList[[gg]])
if (G>1){
names(pcvec0) <- paste0( names(pcvec0), "_group", gg)
}
pcvec <- c( pcvec, pcvec0 )
}
return(pcvec)
}
#######################################################
|
/scratch/gouwar.j/cran-all/cranData/CDM/R/skill.cor.R
|
## File Name: skillspace.approximation.R
## File Version: 0.11
#-- skill space approximation
skillspace.approximation <- function( L, K, nmax=5000 )
{
CDM_require_namespace("sfsmisc")
n <- nmax
ndim <- K
res <- sfsmisc::QUnif(n, p=ndim, leap=409)
res <- 1*(res>.5)
res <- rbind( rep( 0,ndim), rep(1,ndim), res )
v1 <- paste0("P", res[,1] )
for (vv in 2:ndim){
v1 <- paste0( v1, res[,vv] )
}
rownames(res) <- v1
res <- res[ ! duplicated(v1), ]
res <- res[ 1:L, ]
res <- res[ order( rownames(res) ), ]
return(res)
}
|
/scratch/gouwar.j/cran-all/cranData/CDM/R/skillspace.approximation.R
|
## File Name: skillspace.hierarchy.R
## File Version: 0.17
###############################################################
# computation of skill space hierarchy
skillspace.hierarchy <- function( B, skill.names )
{
if ( ! is.matrix(B) ){
Blist <- strsplit(B, split="\\n")[[1]]
BL <- length(Blist)
K <- length(skill.names)
B <- matrix( 0, nrow=K, ncol=K)
rownames(B) <- colnames(B) <- skill.names
for (bb in 1:BL){
Blist.bb <- gsub( " ", "", Blist[[bb]] )
Bl2 <- strsplit( Blist.bb, split=">" )[[1]]
VV <- length(Bl2)
for ( vv in 1:(VV-1) ){
B[ Bl2[vv], Bl2[vv+1] ] <- 1
}
}
}
K <- length(skill.names)
# define complete skill space
dfr <- rbind( rep(0,K), rep(1,K) )
skillspace <- expand.grid( as.list(as.data.frame(dfr) ) )
colnames(skillspace) <- skill.names
# attribute pattern labels
n1 <- paste0("A", skillspace[,1] )
for (nn in 2:K){
n1 <- paste0( n1, skillspace[,nn] )
}
rownames(skillspace) <- n1
skillspace0 <- skillspace
# compute reachability
R <- B
V1 <- R
vv <- 0
while( ( abs( sum(R) ) > 0 ) & ( vv < 200 ) ){
R <- R %*% B
V1 <- V1 + R
vv <- vv+1
}
# exclude skill classes
for (ii in 1:K){
for (jj in 1:K){
if ( ( V1[ii,jj] > 0 ) & ( ii !=jj) ){
ind <- which( ( skillspace[, ii ]==0 ) & ( skillspace[,jj]==1 ) )
if ( length(ind) > 0 ){ skillspace <- skillspace[ - ind, ] }
}
}
}
#**** determine skill classes which were removed
zeroprob.skillclasses <- which( ! ( rownames(skillspace0) %in% rownames(skillspace) ) )
#**************************************
# output
res <- list("R"=V1, "skillspace.reduced"=as.matrix(skillspace),
"skillspace.complete"=as.matrix(skillspace0),
"zeroprob.skillclasses"=zeroprob.skillclasses )
return(res)
}
############################################################################
# full skill space
skillspace.full <- function( skill.names )
{
B <- paste0( skill.names[1], " > ", skill.names[2] )
M1 <- skillspace.hierarchy( B=B, skill.names=skill.names )
return(M1$skillspace.complete)
}
|
/scratch/gouwar.j/cran-all/cranData/CDM/R/skillspace.hierarchy.R
|
## File Name: slca.R
## File Version: 1.869
###########################################
# Structured latent class analysis
###########################################
slca <- function( data, group=NULL,
weights=rep(1, nrow(data)),
Xdes, Xlambda.init=NULL, Xlambda.fixed=NULL,
Xlambda.constr.V=NULL, Xlambda.constr.c=NULL,
delta.designmatrix=NULL, delta.init=NULL,
delta.fixed=NULL, delta.linkfct="log",
Xlambda_positive=NULL,
regular_type="lasso", regular_lam=0, regular_w=NULL, regular_n=nrow(data),
maxiter=1000, conv=1E-5, globconv=1E-5, msteps=10,
convM=.0005, decrease.increments=FALSE, oldfac=0, dampening_factor=1.01,
seed=NULL, progress=TRUE, PEM=TRUE, PEM_itermax=maxiter, ...)
{
#************************************************************
cl <- match.call()
theta.k <- NULL
#*************************
# data preparation
s1 <- Sys.time()
e1 <- environment()
use.freqpatt <- FALSE
deviance.history <- rep(NA, maxiter)
## prevent from warnings in R CMD check "no visible binding"
## gdm: no visible binding for global variable 'TD'
TD <- TP <- EAP.rel <- mean.trait <- sd.trait <- skewness.trait <- NULL
K.item <- correlation.trait <- NULL
se.theta.k <- NULL
#-- data processing
res <- slca_proc_data(data=data)
dat <- res$dat
dat.ind <- res$dat.ind
I <- res$I
n <- res$n
dat.resp <- res$dat.resp
K <- res$K
maxK <- res$maxK
data <- res$data
data0 <- res$data0
resp.ind.list <- res$resp.ind.list
#-- process data for multiple groups
res <- slca_proc_multiple_groups( group=group, n=n )
G <- res$G
group <- res$group
group0 <- res$group0
group.stat <- res$group.stat
Ngroup <- res$Ngroup
#--- define design matrices
KK <- K # if KK==1 then a slope parameter for all items is estimated
deltaNULL <- 0
if ( is.null(delta.designmatrix) ){
deltaNULL <- 1
delta.designmatrix <- diag( dim(Xdes)[3] )
}
# lambda basis parameters for X
#--- set seed
res <- slca_set_seed(seed=seed)
seed.used <- res$seed.used
#--- inits Xlambda
Nlam <- dim(Xdes)[[4]]
res <- slca_inits_Xlambda( Xlambda.init=Xlambda.init, Xdes=Xdes, Nlam=Nlam, Xlambda_positive=Xlambda_positive,
Xlambda.fixed=Xlambda.fixed )
Xlambda <- Xlambda.init <- res$Xlambda.init
Xlambda_positive <- res$Xlambda_positive
#-- starting values for distributions
res <- slca_inits_skill_distribution( delta.designmatrix=delta.designmatrix, delta.init=delta.init,
delta.linkfct=delta.linkfct, G=G, K=K, I=I )
TP <- res$TP
n.ik <- res$n.ik
pi.k <- res$pi.k
delta <- res$delta
se.Xlambda <- 0*Xlambda
max.increment.Xlambda <- .3
#--- preparations for calc.counts
res <- gdm_prep_calc_counts( K=K, G=G, group=group, weights=weights, dat.resp=dat.resp, dat.ind=dat.ind,
use.freqpatt=use.freqpatt )
ind.group <- res$ind.group
dat.ind2 <- res$dat.ind2
#--- reducing computational burden for design matrix
res <- slca_proc_design_matrix_xlambda(Xdes=Xdes)
XdesM <- res$XdesM
NX <- res$NX
dimXdes <- res$dimXdes
#-- Xlambda constraints
V <- e2 <- V1 <- NULL
if ( ! is.null(Xlambda.constr.V) ){
V <- Xlambda.constr.V
e2 <- matrix( Xlambda.constr.c, nrow=ncol(V), ncol=1 )
V1 <- solve( crossprod(V) )
}
#-- regularization
res <- slca_proc_regularization( regular_lam=regular_lam, regular_w=regular_w, Nlam=Nlam, Xlambda.fixed=Xlambda.fixed,
regular_n=regular_n, regular_type=regular_type )
regular_lam <- res$regular_lam
regular_w <- res$regular_w
regular_lam_used <- res$regular_lam_used
regular_indicator_parameters <- res$regular_indicator_parameters
regularization <- res$regularization
#-- preliminaries PEM acceleration
if (PEM){
envir <- environment()
pem_pars <- c("delta","Xlambda")
pem_output_vars <- c("pi.k","Xlambda","delta")
parmlist <- cdm_pem_inits_assign_parmlist(pem_pars=pem_pars, envir=envir)
res <- cdm_pem_inits( parmlist=parmlist)
pem_parameter_index <- res$pem_parameter_index
pem_parameter_sequence <- res$pem_parameter_sequence
if (regularization){
PEM <- FALSE
}
}
#- for posterior calculation
gwt0 <- matrix( 1, nrow=n, ncol=TP )
#---
# initial values algorithm
max.increment <- 1
dev <- 0
iter <- 0
globconv1 <- conv1 <- 1000
disp <- paste( paste( rep(".", 70 ), collapse=""),"\n", sep="")
mindev <- Inf
iterate <- TRUE
############################################
# BEGIN MML Algorithm
############################################
while( ( iter < maxiter ) & ( ( globconv1 > globconv) | ( conv1 > conv) ) & iterate ){
#--- collect old parameters
Xlambda0 <- Xlambda
dev0 <- dev
delta0 <- delta
pi.k0 <- pi.k
#--- 1 calculate probabilities
probs <- slca_calc_prob( XdesM=XdesM, dimXdes=dimXdes, Xlambda=Xlambda )
#--- 2 calculate individual likelihood
res.hwt <- slca_calc_posterior( probs=probs, gwt0=gwt0, dat=dat, I=I, resp.ind.list=resp.ind.list )
p.xi.aj <- res.hwt$hwt
#--- 3 calculate posterior and marginal distributions
res <- gdm_calc_post( pi.k=pi.k, group=group, p.xi.aj=p.xi.aj, weights=weights, G=G, ind.group=ind.group,
use.freqpatt=use.freqpatt )
p.aj.xi <- res$p.aj.xi
pi.k <- res$pi.k
#*****
#4 calculate expected counts
# n.ik [ 1:TP, 1:I, 1:(K+1), 1:G ]
res <- slca_calc_counts( G=G, weights=weights, dat.ind=dat.ind, dat=dat, dat.resp=dat.resp, p.aj.xi=p.aj.xi, K=K,
n.ik=n.ik, TP=TP, I=I, group=group, dat.ind2=dat.ind2, ind.group=ind.group,
use.freqpatt=use.freqpatt )
n.ik <- res$n.ik
n.ik1 <- res$n.ik1
N.ik <- res$N.ik
N.ik1 <- res$N.ik1
#*****
#5 M step: Xdelta parameter estimation
# n.ik [1:TP,1:I,1:K,1:G]
# probs[1:I,1:K,1:TP]
res <- slca_est_Xlambda( Xlambda=Xlambda, Xdes=Xdes, probs=probs,
n.ik1=n.ik1, N.ik1=N.ik1, I=I, K=K, G=G, max.increment=max.increment,
TP=TP, msteps=msteps, convM=convM, Xlambda.fixed=Xlambda.fixed,
XdesM=XdesM, dimXdes=dimXdes, oldfac=oldfac,
decrease.increments=decrease.increments, dampening_factor=dampening_factor,
Xlambda.constr.V=Xlambda.constr.V, e2=e2, V1=V1, regularization=regularization,
regular_lam_used=regular_lam_used, regular_n=regular_n,
Xlambda_positive=Xlambda_positive, regular_type=regular_type )
Xlambda <- res$Xlambda
se.Xlambda <- res$se.Xlambda
max.increment <- res$max.increment
regular_penalty <- res$regular_penalty
parm_regularized <- res$parm_regularized
numb_regularized <- res$numb_regularized
#*****
#7 M step: estimate reduced skillspace
res <- slca_est_skillspace( Ngroup=Ngroup, pi.k=pi.k, delta.designmatrix=delta.designmatrix,
G=G, delta=delta, delta.fixed=delta.fixed, eps=1E-7, oldfac=oldfac,
delta.linkfct=delta.linkfct )
pi.k <- res$pi.k
delta <- res$delta
covdelta <- res$covdelta
#******
#7a P-EM acceleration
#-- PEM acceleration
if (PEM){
#-- collect all parameters in a list
parmlist <- cdm_pem_inits_assign_parmlist(pem_pars=pem_pars, envir=envir)
#-- define log-likelihood function
ll_fct <- slca_calc_loglikelihood
#- extract parameters
ll_args <- list( Xlambda=Xlambda, delta=delta, delta.designmatrix=delta.designmatrix, XdesM=XdesM,
dimXdes=dimXdes, gwt0=gwt0, dat=dat, I=I, resp.ind.list=resp.ind.list, G=G,
use.freqpatt=use.freqpatt, ind.group=ind.group, weights=weights,
Xlambda.constr.V=Xlambda.constr.V, e2=e2, V1=V1, Xlambda_positive=Xlambda_positive )
#-- apply general acceleration function
res <- cdm_pem_acceleration( iter=iter, pem_parameter_index=pem_parameter_index,
pem_parameter_sequence=pem_parameter_sequence, pem_pars=pem_pars,
PEM_itermax=PEM_itermax, parmlist=parmlist, ll_fct=ll_fct, ll_args=ll_args,
deviance.history=deviance.history )
#-- collect output
PEM <- res$PEM
pem_parameter_sequence <- res$pem_parameter_sequence
cdm_pem_acceleration_assign_output_parameters( res_ll_fct=res$res_ll_fct,
vars=pem_output_vars, envir=envir, update=res$pem_update )
}
#*****
#8 calculate likelihood
# n.ik [ TP, I, K+1, G ]
# N.ik [ TP, I, G ]
# probs [I, K+1, TP ]
ll <- slca_calc_likelihood( G=G, use.freqpatt=use.freqpatt, ind.group=ind.group,
p.xi.aj=p.xi.aj, pi.k=pi.k, weights=weights )
dev <- -2*ll
deviance.history[iter+1] <- dev
#---- display progress
Xlambda_change <- gg1 <- abs( Xlambda - Xlambda0 )
pardiff <- max( gg1 )
deltadiff <- abs( pi.k - pi.k0 )
conv1 <- max( c(pardiff,deltadiff))
globconv1 <- abs( dev - dev0)
iter <- iter +1
#---- print progress
res <- slca_print_progress_em_algorithm( progress=progress, disp=disp, iter=iter,
dev=dev, dev0=dev0, deltadiff=deltadiff, Xlambda_change=pardiff,
regularization=regularization, regular_penalty=regular_penalty,
numb_regularized=numb_regularized)
if ( globconv1 < globconv ){
iterate <- FALSE
}
# save values corresponding to minimal deviance,
# only in models with no regularization
if ( regular_lam==0 ){
if ( ( dev < mindev ) | ( iter==1 ) ){
Xlambda.min <- Xlambda
se.Xlambda.min <- se.Xlambda
pi.k.min <- pi.k
n.ik.min <- n.ik
probs.min <- probs
delta.min <- delta
covdelta.min <- covdelta
mindev <- dev
iter.min <- iter
}
} else {
iter.min <- iter
}
}
############################################
# END MML Algorithm
############################################
if ( regular_lam==0 ){
Xlambda.min -> Xlambda
se.Xlambda.min -> se.Xlambda
pi.k.min -> pi.k
n.ik.min -> n.ik
probs.min -> probs
delta.min -> delta
covdelta.min -> covdelta
mindev -> dev
# iter.min -> iter
}
# names
if ( is.null(dimnames(Xdes)[[4]] ) ){
dimnames(Xdes)[[4]] <- paste0("lam", 1:Nlam )
}
if ( is.null(dimnames(Xdes)[[3]] ) ){
dimnames(Xdes)[[3]] <- paste0("Class", 1:TP )
}
names(Xlambda) <- dimnames(Xdes)[[4]]
colnames(pi.k) <- paste0("Group", 1:G )
rownames(pi.k) <- dimnames(Xdes)[[3]]
# collect item parameters
item1 <- array( aperm( probs, c(2,1,3)), dim=c(I*maxK, TP) )
colnames(item1) <- dimnames(Xdes)[[3]]
item <- data.frame("item"=rep(colnames(dat), each=maxK),
"Cat"=rep(0:K, I), item1 )
rownames(item) <- paste0( rep(colnames(dat), each=maxK), "_Cat", rep(0:K, I) )
#-- Information criteria
ic <- slca_calc_ic( dev=dev, dat=dat, G=G, K=K, TP=TP, I=I, delta.designmatrix=delta.designmatrix,
delta.fixed=delta.fixed, Xlambda=Xlambda, Xlambda.fixed=Xlambda.fixed,
data0=data0, deltaNULL=deltaNULL, Xlambda.constr.V=Xlambda.constr.V,
regularization=regularization, regular_indicator_parameters=regular_indicator_parameters,
Xlambda_positive=Xlambda_positive )
# item fit [ items, theta, categories ]
# # n.ik [ 1:TP, 1:I, 1:(K+1), 1:G ]
probs <- aperm( probs, c(3,1,2) )
# person parameters
mle.class <- max.col( m=p.xi.aj )
map.class <- max.col( m=p.aj.xi )
#*************************
# collect output
s2 <- Sys.time()
time <- list( s1=s1,s2=s2, timediff=s2-s1)
control <- list()
control$weights <- weights
control$group <- group
res <- list( item=item, deviance=dev, ic=ic, Xlambda=Xlambda, se.Xlambda=se.Xlambda, pi.k=pi.k, pjk=probs,
n.ik=n.ik, G=G, I=I, N=n, TP=TP, delta=delta, covdelta=covdelta,
delta.designmatrix=delta.designmatrix, MLE.class=mle.class, MAP.class=map.class,
data=data, group.stat=group.stat, p.xi.aj=p.xi.aj, posterior=p.aj.xi, K.item=K.item,
time=time, iter=iter, iter.min=iter.min, converged=iter<maxiter,
deviance.history=deviance.history, AIC=ic$AIC, BIC=ic$BIC, Npars=ic$np, loglike=-dev/2,
seed.used=seed.used, PEM=PEM, Xlambda.init=Xlambda.init, delta.init=delta.init,
Xlambda_positive=Xlambda_positive, regular_penalty=regular_penalty, regular_n=regular_n,
regular_type=regular_type, regular_lam=regular_lam, regular_w=regular_w, regular_lam_used=regular_lam_used,
regular_indicator_parameters=regular_indicator_parameters, regularization=regularization,
numb_regularized=numb_regularized, parm_regularized=parm_regularized,
control=control, call=cl )
class(res) <- "slca"
#--- print progress
slca_print_progress_end( s1=s1, s2=s2, progress=progress )
return(res)
}
###################################################
|
/scratch/gouwar.j/cran-all/cranData/CDM/R/slca.R
|
## File Name: slca_calc_class_probabilities.R
## File Version: 0.04
slca_calc_class_probabilities <- function( delta, delta.designmatrix )
{
G <- ncol(delta)
pi.k <- matrix(0, nrow=nrow(delta.designmatrix), ncol=G)
for (gg in 1:G){
pi.k[,gg] <- cdm_sumnorm( exp( delta.designmatrix %*% delta[,gg] ) )
}
return(pi.k)
}
|
/scratch/gouwar.j/cran-all/cranData/CDM/R/slca_calc_class_probabilities.R
|
## File Name: slca_calc_counts.R
## File Version: 0.06
################################################
# calculation of expected counts
slca_calc_counts <- function(G, weights, dat.ind, dat, dat.resp,
p.aj.xi, K, n.ik, TP,I,group, dat.ind2, ind.group, use.freqpatt )
{
N.ik <- array( 0, dim=c(TP,I,G) )
N.ik1 <- array( 0, dim=c(TP,I) )
n.ik1 <- array( 0, dim=c(TP,I,K+1 ) )
#-------------------------------------
#--- single group
if (G==1){
gg <- 1
for (kk in 1:(K+1)){ # kk <- 1 # category 0 ( -> 1 )
dkk2 <- dat.ind2[[kk]][[gg]]
n.ik[,,kk,gg] <- crossprod( p.aj.xi, dkk2 )
n.ik1[,,kk] <- n.ik[,,kk,gg]
N.ik[,,gg] <- N.ik[,,gg] + n.ik[,,kk,gg]
}
N.ik1 <- N.ik1 + N.ik[,,gg]
}
#-------------------------------------
#--- multiple groups
if (G>1){
for (gg in 1:G){ # gg <- 1
if ( ! use.freqpatt ){
ind.gg <- ind.group[[gg]]
t.p.aj.xi.gg <- t( p.aj.xi[ind.gg,] )
}
if ( use.freqpatt ){
t.p.aj.xi.gg <- t( p.aj.xi[[gg]] )
}
for (kk in 1:(K+1)){ # kk <- 1 # category 0 ( -> 1 )
dkk2 <- dat.ind2[[kk]][[gg]]
if ( use.freqpatt ){
if (G>1){
dkk2 <- dkk2[ which(weights[,gg] > 0), ]
}
}
n.ik[,,kk,gg] <- t.p.aj.xi.gg %*% dkk2
n.ik1[,,kk] <- n.ik1[,,kk] + n.ik[,,kk,gg]
N.ik[,,gg] <- N.ik[,,gg] + n.ik[,,kk,gg]
}
N.ik1 <- N.ik1 + N.ik[,,gg]
} # end gg
} # end multiple group
#----- output
res <- list(n.ik=n.ik, N.ik=N.ik, n.ik1=n.ik1, N.ik1=N.ik1)
return(res)
}
.slca.calc.counts <- slca_calc_counts
|
/scratch/gouwar.j/cran-all/cranData/CDM/R/slca_calc_counts.R
|
## File Name: slca_calc_ic.R
## File Version: 0.26
#############################################################
# calculation of information criteria and number of parameters
slca_calc_ic <- function( dev, dat, G, K, TP,I, delta.designmatrix, delta.fixed,
Xlambda, Xlambda.fixed, data0, deltaNULL, Xlambda.constr.V,
regularization, regular_indicator_parameters, Xlambda_positive)
{
ic <- list( "deviance"=dev, "n"=nrow(data0) )
ic$traitpars <- 0
ic$itempars <- 0
ic$itempars <- length(Xlambda)
if ( ! is.null(Xlambda.fixed ) ){
ic$itempars <- ic$itempars - nrow(Xlambda.fixed )
}
#--- count number of estimated parameters
ind_regular <- ( Xlambda==0 ) * regular_indicator_parameters
ind_positive <- ( Xlambda==0 ) * Xlambda_positive
ind_nonactive <- 1 * ( ind_regular | ind_positive )
ic$nonactive <- sum(ind_nonactive)
ic$itempars <- ic$itempars - ic$nonactive
if ( ! is.null( Xlambda.constr.V ) ){
ic$itempars <- ic$itempars - ncol(Xlambda.constr.V )
}
ic$traitpars <- G * ncol(delta.designmatrix ) - G*deltaNULL
if ( ! is.null(delta.fixed ) ){
ic$traitpars <- ic$traitpars - nrow(delta.fixed )
}
#***********************************************
# information criteria
ic$np <- ic$itempars + ic$traitpars
#-- compute criteria
ic <- cdm_calc_information_criteria(ic=ic)
return(ic)
}
###################################################################
.slca.calc.ic <- slca_calc_ic
|
/scratch/gouwar.j/cran-all/cranData/CDM/R/slca_calc_ic.R
|
## File Name: slca_calc_likelihood.R
## File Version: 0.07
slca_calc_likelihood <- function(G, use.freqpatt, ind.group, p.xi.aj, pi.k, weights )
{
ll <- 0
for (gg in 1:G){
#-- do not use frequency pattern
if ( ! use.freqpatt ){
ind.gg <- ind.group[[gg]]
ll <- ll + sum( weights[ind.gg] * log( rowSums( p.xi.aj[ind.gg,] *
matrix( pi.k[,gg], nrow=length(ind.gg), ncol=nrow(pi.k), byrow=TRUE ) ) ) )
}
#-- use frequency pattern
if ( use.freqpatt ){
if (G>1){
wgg <- weights[,gg]
}
if (G==1){
wgg <- weights
}
ll <- ll + sum( wgg * log( rowSums( p.xi.aj * matrix( pi.k[,gg], nrow=nrow(p.xi.aj),
ncol=nrow(pi.k), byrow=TRUE ) ) ) )
}
}
return(ll)
}
|
/scratch/gouwar.j/cran-all/cranData/CDM/R/slca_calc_likelihood.R
|
## File Name: slca_calc_loglikelihood.R
## File Version: 0.13
slca_calc_loglikelihood <- function(Xlambda, delta, delta.designmatrix, XdesM, dimXdes, gwt0, dat, I, resp.ind.list,
G, use.freqpatt, ind.group, weights, Xlambda.constr.V, e2, V1, Xlambda_positive)
{
#-- Xlambda constraint
if ( ! is.null(Xlambda.constr.V) ){
Xlambda <- slca_est_xlambda_constraint( Xlambda=Xlambda,
Xlambda.constr.V=Xlambda.constr.V, V1=V1, e2=e2 )
}
#-- positivity constraint
Xlambda <- cdm_positivity_restriction(x=Xlambda, positive=Xlambda_positive)
#-- compute log-likelihood
probs <- slca_calc_prob( XdesM=XdesM, dimXdes=dimXdes, Xlambda=Xlambda )
pi.k <- slca_calc_class_probabilities( delta=delta, delta.designmatrix=delta.designmatrix )
res <- slca_calc_posterior( probs=probs, gwt0=gwt0, dat=dat, I=I, resp.ind.list=resp.ind.list )
ll <- slca_calc_likelihood( G=G, use.freqpatt=use.freqpatt, ind.group=ind.group, p.xi.aj=res$hwt,
pi.k=pi.k, weights=weights )
#--- output
res <- list(ll=ll, pi.k=pi.k, Xlambda=Xlambda, delta=delta)
return(res)
}
|
/scratch/gouwar.j/cran-all/cranData/CDM/R/slca_calc_loglikelihood.R
|
## File Name: slca_calc_posterior.R
## File Version: 0.02
slca_calc_posterior <- function(probs, gwt0, dat, I, resp.ind.list)
{
res <- cdm_calc_posterior( rprobs=probs, gwt=gwt0, resp=dat, nitems=I, resp.ind.list=resp.ind.list, normalization=FALSE,
thetasamp.density=NULL, snodes=0 )
return(res)
}
|
/scratch/gouwar.j/cran-all/cranData/CDM/R/slca_calc_posterior.R
|
## File Name: slca_calc_prob.R
## File Version: 0.06
#############################################################
# Rcpp function for calculating probabilities
slca_calc_prob <- function( XdesM, dimXdes, Xlambda )
{
res <- cdm_rcpp_slca_calc_probs( XdesM=XdesM, dimXdes=dimXdes, Xlambda=Xlambda )
I <- dimXdes[1]
maxK <- dimXdes[2]
TP <- dimXdes[3]
probs <- array( res, dim=c( I, maxK, TP ))
return(probs)
}
.slca.calc.prob <- slca_calc_prob
|
/scratch/gouwar.j/cran-all/cranData/CDM/R/slca_calc_prob.R
|
## File Name: slca_calc_prob0.R
## File Version: 0.04
slca_calc_prob0 <- function( Xdes, Xlambda, I,K,TP)
{
# Xdes [ 1:I, 1:(K+1), 1:TP, 1:Nlam ]
p1 <- probs <- array( 0, dim=c(I,K+1,TP) )
for (tt in 1:TP){
tmp0 <- 0
for (hh in 1:(K+1) ){
tmp1 <- exp( Xdes[, hh, tt, ] %*% Xlambda )
tmp0 <- tmp0 + tmp1
p1[, hh, tt ] <- tmp1
}
for (hh in 1:(K+1) ){
probs[,hh,tt] <- p1[, hh, tt ] / tmp0
}
}
return(probs)
}
.slca.calc.prob0 <- slca_calc_prob0
|
/scratch/gouwar.j/cran-all/cranData/CDM/R/slca_calc_prob0.R
|
## File Name: slca_est_Xlambda.R
## File Version: 0.365
# estimation of Xlambda parameters
slca_est_Xlambda <- function(Xlambda, Xdes, probs, n.ik1, N.ik1, I, K, G,
max.increment, TP,msteps, convM, Xlambda.fixed, XdesM, dimXdes, oldfac,
decrease.increments, dampening_factor=1.01, Xlambda.constr.V, e2, V1,
regularization, regular_lam_used, regular_n, Xlambda_positive, regular_type )
{
max.increment0 <- max.increment
iter <- 1
eps <- 1e-8
parchange <- 1
Xlambda00 <- Xlambda
Nlam <- length(Xlambda)
n.ik <- aperm( n.ik1, c(2,3,1) )
N.ik <- aperm( N.ik1, c(2,1) )
maxK <- K+1
#---- begin M-steps
while( ( iter <=msteps ) & ( parchange > convM) ){
Xlambda0 <- Xlambda
probs <- slca_calc_prob( XdesM=XdesM, dimXdes=dimXdes, Xlambda=Xlambda )
d2.b <- d1.b <- rep(eps, Nlam)
# probs num [1:I, 1:maxK, 1:TP]
# n.ik num [1:I, 1:maxK, 1:TP]
# N.ik num [1:I,1:TP]
# Xdes num [1:I, 1:maxK, 1:TP, 1:Nlam]
#-- calculate derivatives
res <- slca_est_Xlambda_calc_deriv( XdesM=XdesM, dimXdes=dimXdes, Xlambda=Xlambda,
probs=probs, n.ik=n.ik, N.ik=N.ik )
d1.b <- res$d1b
d2.b <- res$d2b
#-- calculate increment
res <- slca_est_Xlambda_calc_increment( d1=d1.b, d2=d2.b, x0=Xlambda,
regularization=regularization, regular_lam_used=regular_lam_used,
max.increment=max.increment, regular_type=regular_type,
regular_n=regular_n)
increment <- res$increment
max.increment <- res$max.increment
parm_regularized <- res$parm_regularized
numb_regularized <- res$numb_regularized
#-- update parameter
Xlambda <- Xlambda + increment
se.Xlambda <- sqrt( 1 / abs( d2.b+ eps ) )
#-- positivity constraint
Xlambda <- cdm_positivity_restriction(x=Xlambda, positive=Xlambda_positive)
#-- parameter fixings
res <- cdm_include_fixed_parameters( parm=Xlambda, se_parm=se.Xlambda, parm_fixed=Xlambda.fixed )
Xlambda <- res$parm
se.Xlambda <- res$se_parm
iter <- iter + 1
parchange <- max( abs(Xlambda0-Xlambda))
} # end M-steps
#-----------------------------------------
# linear constraints on Xlambda parameters
# below is code copied from sirt::rasch.pml3
#................
# linear constraints: Let e be the vector of error
# correlations, V a design matrix and c a vector.
# The constraints can be written in the form
# c=V * e . Then V*e - c=0.
# See the Neuhaus paper:
# e_cons=e + V * (V'V)^(-1) * ( c - V * e )
if ( ! is.null(Xlambda.constr.V) ){
Xlambda <- slca_est_xlambda_constraint( Xlambda=Xlambda,
Xlambda.constr.V=Xlambda.constr.V, V1=V1, e2=e2 )
}
if (oldfac > 0 ){
Xlambda <- oldfac*Xlambda00 + ( 1 - oldfac ) *Xlambda
}
max.increment <- max( abs( Xlambda - Xlambda00 ))
if (decrease.increments){
max.increment0 <- max.increment0 / dampening_factor
}
penalty <- cdm_penalty_values(x=Xlambda, regular_type=regular_type,
regular_lam=regular_lam_used)
regular_penalty <- regular_n * sum(penalty)
#----- output
res <- list(Xlambda=Xlambda, se.Xlambda=se.Xlambda, max.increment=max.increment0,
regular_penalty=regular_penalty, parm_regularized=parm_regularized,
numb_regularized=numb_regularized)
return(res)
}
.slca.est.Xlambda <- slca_est_Xlambda
|
/scratch/gouwar.j/cran-all/cranData/CDM/R/slca_est_Xlambda.R
|
## File Name: slca_est_Xlambda_calc_deriv.R
## File Version: 0.05
slca_est_Xlambda_calc_deriv <- function(XdesM, dimXdes, Xlambda, probs, n.ik, N.ik)
{
res <- cdm_rcpp_slca_calc_deriv( XdesM=XdesM, dimXdes=dimXdes, Xlambda=Xlambda,
probs=as.vector(probs), nik=as.vector(n.ik), Nik=as.vector(N.ik) )
return(res)
}
|
/scratch/gouwar.j/cran-all/cranData/CDM/R/slca_est_Xlambda_calc_deriv.R
|
## File Name: slca_est_Xlambda_calc_increment.R
## File Version: 0.11
slca_est_Xlambda_calc_increment <- function(d1, d2, x0, regularization,
regular_lam_used, max.increment, regular_type, regular_n )
{
if (regularization){
res <- cdm_calc_increment_regularization( d1=d1, d2=d2, x0=x0,
regular_lam_used=regular_lam_used, max.increment=max.increment,
regular_type=regular_type, regular_n=regular_n)
}
if (!regularization){
res <- cdm_calc_increment( d1=d1, d2=d2, max.increment=max.increment )
}
return(res)
}
|
/scratch/gouwar.j/cran-all/cranData/CDM/R/slca_est_Xlambda_calc_increment.R
|
## File Name: slca_est_skillspace.R
## File Version: 0.11
###########################################################################
# reduced skillspace estimation
slca_est_skillspace <- function(Ngroup, pi.k, delta.designmatrix, G, delta, delta.fixed,
eps=1E-10, oldfac, delta.linkfct)
{
covdelta <- as.list(1:G)
Z <- delta.designmatrix
delta0 <- delta
ND <- length(delta)
for (gg in 1:G){
if ( delta.linkfct=="log"){
ntheta <- cdm_sumnorm( Ngroup[gg] * pi.k[,gg] )
pij <- log(ntheta+eps)
mod <- stats::lm( pij ~ 0 + Z, weights=ntheta )
covbeta <- vcov(mod)
beta <- coef(mod)
}
if ( delta.linkfct=="logit"){
nj <- Ngroup[gg] * pi.k[,gg]
pij <- stats::qlogis( pi.k[,gg] + eps )
wj <- cdm_sumnorm( stats::plogis( delta.designmatrix %*% delta[,gg,drop=FALSE] ) )
wj <- wj[,1]
n <- Ngroup[gg]
mod1 <- stats::lm( pij ~ 0 + Z )
beta <- coef(mod1)
covbeta <- vcov(mod1)
}
if ( ! is.null( delta.fixed ) ){
# delta.fixed: 1st column: parameter index
# 2nd column: group index
# 3rd column: parameter value
ind.gg <- which( delta.fixed[,2]==gg )
if ( length(ind.gg) > 0 ){
beta[ delta.fixed[ind.gg,1] ] <- delta.fixed[ind.gg,3]
}
}
if ( delta.linkfct=="log"){
pi.k[,gg] <- cdm_sumnorm( exp( Z %*% beta ) / Ngroup[gg] )
}
if ( delta.linkfct=="logit"){
pi.k[,gg] <- cdm_sumnorm( exp( delta.designmatrix %*% beta ) )
}
if ( oldfac > 0 ){
beta <- oldfac*delta0[,gg] + ( 1 - oldfac)*beta
}
delta[,gg] <- beta
covdelta[[gg]] <- covbeta
}
res <- list( pi.k=pi.k, delta=delta, covdelta=covdelta )
return(res)
}
.slca.est.skillspace <- slca_est_skillspace
|
/scratch/gouwar.j/cran-all/cranData/CDM/R/slca_est_skillspace.R
|
## File Name: slca_est_xlambda_constraint.R
## File Version: 0.04
slca_est_xlambda_constraint <- function( Xlambda, Xlambda.constr.V, V1, e2 )
{
V <- Xlambda.constr.V
if (! is.null(V)){
e1 <- matrix( Xlambda, ncol=1 )
Xlambda <- ( e1 + V %*% V1 %*% ( e2 - t(V) %*% e1 ) )[,1]
}
return(Xlambda)
}
|
/scratch/gouwar.j/cran-all/cranData/CDM/R/slca_est_xlambda_constraint.R
|
## File Name: slca_inits_Xlambda.R
## File Version: 0.05
slca_inits_Xlambda <- function( Xlambda.init, Xdes, Nlam, Xlambda_positive, Xlambda.fixed)
{
if ( is.null( Xlambda_positive ) ){
Xlambda_positive <- rep( FALSE, Nlam )
}
if ( is.null( Xlambda.init ) ){
Xlambda.init <- stats::runif( Nlam, -1, 1 ) + 1 * Xlambda_positive
}
if ( ! is.null( Xlambda.fixed ) ){
Xlambda.init[ Xlambda.fixed[,1] ] <- Xlambda.fixed[,2]
Xlambda_positive[ Xlambda.fixed[,1] ] <- FALSE
}
#--- output
res <- list( Xlambda.init=Xlambda.init, Xlambda_positive=Xlambda_positive)
return(res)
}
|
/scratch/gouwar.j/cran-all/cranData/CDM/R/slca_inits_Xlambda.R
|
## File Name: slca_inits_skill_distribution.R
## File Version: 0.06
slca_inits_skill_distribution <- function( delta.designmatrix, delta.init,
delta.linkfct, G, K, I )
{
TP <- nrow(delta.designmatrix)
if ( ! is.null(delta.init) ){
delta <- delta.init
if ( delta.linkfct=="log"){
pik <- exp( delta.designmatrix %*% delta.init[,1] )
} else {
pik <- stats::plogis( delta.designmatrix %*% delta.init[,1] )
}
} else {
pik <- cdm_sumnorm( rep( 1 /TP, TP ) + stats::runif(TP, 0, .5 ) )
if ( delta.linkfct=="logit"){
g1 <- solve( crossprod( delta.designmatrix )) %*% t( delta.designmatrix) %*% pik
delta <- matrix( g1[,1], nrow=ncol(delta.designmatrix), ncol=G)
} else {
delta <- matrix( 0, ncol(delta.designmatrix), G )
delta[1,] <- 1
}
}
pi.k <- matrix( 0, TP, G )
for (gg in 1:G){
pi.k[,gg] <- pik
}
n.ik <- array( 0, dim=c(TP,I,K+1,G) )
#--------- output
res <- list(TP=TP, n.ik=n.ik, pi.k=pi.k, delta=delta)
return(res)
}
|
/scratch/gouwar.j/cran-all/cranData/CDM/R/slca_inits_skill_distribution.R
|
## File Name: slca_print_progress_em_algorithm.R
## File Version: 0.182
slca_print_progress_em_algorithm <- function(progress, disp, iter, dev, dev0, deltadiff, Xlambda_change,
regularization, regular_penalty, numb_regularized, digits_dev=4, digits_parm=6 )
{
if (progress){
cat(disp)
cat("Iteration", iter, " ", paste( Sys.time() ), "\n" )
cat( paste( " Deviance ", "=", " ", round( dev, digits_dev ),
if (iter > 1 ){ paste(" | Deviance change", "=", "") } else {""},
if( iter>1){round( - dev + dev0, digits_parm )} else { ""} ,sep=""))
if ( (dev > dev0) & (iter>1 ) & ( ! regularization) ){
cat( " Deviance increases!")
}
cat("\n")
if (regularization){
cat( paste( " Penalty", "=", round( regular_penalty, digits_dev ), " | " ) )
cat( paste( "Number of regularized parameters", "=", numb_regularized, "\n" ) )
}
cat( paste( " Maximum Xlambda parameter change", "=",
round( max( Xlambda_change ), digits_parm ), " \n" ) )
cat( paste( " Maximum distribution parameter change", "=",
round( max( deltadiff ), digits_parm ), " \n" ) )
utils::flush.console()
}
}
|
/scratch/gouwar.j/cran-all/cranData/CDM/R/slca_print_progress_em_algorithm.R
|
## File Name: slca_print_progress_end.R
## File Version: 0.04
slca_print_progress_end <- function(s1, s2, progress)
{
if (progress){
cat("----------------------------------- \n")
cat("Start:", paste(s1), "\n")
cat("End:", paste(s2), "\n")
cat("Difference:", print(s2 -s1), "\n")
cat("----------------------------------- \n")
}
}
|
/scratch/gouwar.j/cran-all/cranData/CDM/R/slca_print_progress_end.R
|
## File Name: slca_proc_data.R
## File Version: 0.03
slca_proc_data <- function(data)
{
data0 <- data <- as.matrix(data)
dat.resp0 <- dat.resp <- 1 - is.na(data)
dat <- data
dat[ is.na(data) ] <- 0
# maximal categories
K <- max(dat)
maxK <- K+1
# list of indicator data frames
dat.ind <- as.list( 1:(K+1) )
for (ii in 0:K){
dat.ind[[ii+1]] <- 1 * ( dat==ii )*dat.resp
}
I <- ncol(dat) # number of items
n <- nrow(dat)
#--- response indicators
resp.ind.list <- gdm_proc_response_indicators(dat.resp=dat.resp)
#----- output
res <- list(dat=dat, dat.ind=dat.ind, I=I, n=n, dat.resp=dat.resp, K=K, maxK=maxK,
data=data, data0=data0, resp.ind.list=resp.ind.list)
return(res)
}
|
/scratch/gouwar.j/cran-all/cranData/CDM/R/slca_proc_data.R
|
## File Name: slca_proc_design_matrix_xlambda.R
## File Version: 0.04
slca_proc_design_matrix_xlambda <- function(Xdes)
{
dimXdes <- dim(Xdes)
res <- cdm_rcpp_slca_calc_Xdes( XDES=as.vector(Xdes), dimXdes=dimXdes )
# XdesM [ii,kk,tt,ll, value ]
NX <- res$NXdesM
XdesM <- res$XdesM[1:NX,]
XdesM <- XdesM[ order( XdesM[,1]*NX + XdesM[,3] ), ]
#--- output
res <- list(XdesM=XdesM, NX=NX, dimXdes=dimXdes )
return(res)
}
|
/scratch/gouwar.j/cran-all/cranData/CDM/R/slca_proc_design_matrix_xlambda.R
|
## File Name: slca_proc_multiple_groups.R
## File Version: 0.07
slca_proc_multiple_groups <- function( group, n)
{
if ( is.null(group)){
G <- 1
group0 <- group <- rep(1,n)
} else {
group0 <- group
gr2 <- cdm_sort_unique(x=group)
G <- length(gr2)
group <- match( group, gr2 )
}
group.stat <- NULL
if (G>1){
a1 <- stats::aggregate( 1+0*group, list(group), sum )
a2 <- rep("",G)
for (gg in 1:G){
a2[gg] <- group0[ which( group==gg )[1] ]
}
group.stat <- cbind( a2, a1 )
colnames(group.stat) <- c( "group.orig", "group", "N" )
Ngroup <- a1[,2]
}
if (G==1){
Ngroup <- length(group)
}
#---- output
res <- list(G=G, group=group, group0=group0, group.stat=group.stat, Ngroup=Ngroup)
return(res)
}
|
/scratch/gouwar.j/cran-all/cranData/CDM/R/slca_proc_multiple_groups.R
|
## File Name: slca_proc_regularization.R
## File Version: 0.08
slca_proc_regularization <- function(regular_lam, regular_w, Nlam, Xlambda.fixed, regular_n, regular_type )
{
regularization <- FALSE
regular_avai <- c("lasso","mcp","scad")
if ( ! ( regular_type %in% regular_avai ) ){
stop("Only regularization methods 'lasso', 'scad' or 'mcp' can be chosen.\n")
}
if ( regular_lam > 0 ){
regularization <- TRUE
}
if ( is.null(regular_w) ){
regular_w <- rep(1,Nlam)
}
if ( ! is.null(Xlambda.fixed) ){
regular_w[ Xlambda.fixed[,1] ] <- 0
}
# regular_lam_used <- regular_lam * regular_w * regular_n
regular_lam_used <- regular_lam * regular_w
regular_indicator_parameters <- regular_lam_used > 0
#---- output
res <- list( regular_lam=regular_lam, regular_w=regular_w, regular_lam_used=regular_lam_used,
regular_indicator_parameters=regular_indicator_parameters, regularization=regularization )
return(res)
}
|
/scratch/gouwar.j/cran-all/cranData/CDM/R/slca_proc_regularization.R
|
## File Name: slca_set_seed.R
## File Version: 0.03
slca_set_seed <- function(seed)
{
seed.used <- NULL
if ( ! is.null(seed) ){
seed.used <- seed
set.seed( seed=seed.used )
}
#--- output
res <- list( seed.used=seed.used )
return(res)
}
|
/scratch/gouwar.j/cran-all/cranData/CDM/R/slca_set_seed.R
|
## File Name: solve_add_ridge.R
## File Version: 0.05
solve_add_ridge <- function(A, eps=1E-7)
{
A0 <- A
diag(A) <- diag(A0) * ( 1 + eps )
A2 <- solve(A)
return(A2)
}
|
/scratch/gouwar.j/cran-all/cranData/CDM/R/solve_add_ridge.R
|
## File Name: summary.IRT.RMSD.R
## File Version: 0.19
#*******************************************************
# Summary
summary.IRT.RMSD <- function( object, file=NULL, digits=3, ... )
{
osink( file=file, suffix=paste0( "__SUMMARY.Rout") )
cat("-----------------------------------------------------------------------------\n")
d1 <- utils::packageDescription("CDM")
cat( paste( d1$Package, " ", d1$Version, " (", d1$Date, ")", sep=""), "\n\n" )
G <- object$G
#-- summary call
cdm_print_summary_call(object=object)
cat("-----------------------------------------------------------------------------\n")
cat("Root Mean Square Deviation (RMSD) \n\n")
res0 <- IRT_RMSD_summary_print_statistics( stat_summary=object$RMSD_summary,
stat=object$RMSD, digits=digits)
cat("-----------------------------------------------------------------------------\n")
cat("Bias Corrected Root Mean Square Deviation (RMSD) \n\n")
res0 <- IRT_RMSD_summary_print_statistics( stat_summary=object$RMSD_bc_summary,
stat=object$RMSD_bc, digits=digits)
cat("-----------------------------------------------------------------------------\n")
cat("Mean Absolute Deviation (MAD) \n\n")
res0 <- IRT_RMSD_summary_print_statistics( stat_summary=object$MAD_summary,
stat=object$MAD, digits=digits)
cat("-----------------------------------------------------------------------------\n")
cat("Mean Deviation (MD) \n\n")
res0 <- IRT_RMSD_summary_print_statistics( stat_summary=object$MD_summary,
stat=object$MD, digits=digits)
csink( file=file )
}
#*******************************************************
|
/scratch/gouwar.j/cran-all/cranData/CDM/R/summary.IRT.RMSD.R
|
## File Name: summary.cdi.kli.R
## File Version: 0.05
#################################################################################
# summary S3 method
summary.cdi.kli <- function( object, digits=2, ...)
{
obji <- object$summary
V <- ncol(obji)
for (vv in 2:V){
obji[,vv] <- round( obji[,vv], digits)
}
rownames(obji) <- NULL
print(obji)
}
#####################################################################################
|
/scratch/gouwar.j/cran-all/cranData/CDM/R/summary.cdi.kli.R
|
## File Name: summary.din.R
## File Version: 1.28
################################################################################
# summary method for objects of class "din" #
################################################################################
summary.din <- function(object, top.n.skill.classes=6, overwrite=FALSE, ...)
{
# Call: generic
# Input:
# object of class din
# top.n.skill.classes: a numeric, specifying the number of skill classes,
# starting with the most frequent, to be returned. Default value is 6.
# log.file: an optional character vector, specifying the directory and/or filename for an extensive log file.
# overwrite: an optional boolean, specifying wether or not the method is supposed to
# overwrite an existing log.file. If the log.file exists and overwrite is FALSE,
# the user is asked to confirm the overwriting.
# Output: a named list, of the class summary.din (to be passed to print.summary.din),
# consisting of the following five components
# CALL: a character specifying the model rule, the number of items and the
# number of attributes underlying the items.
# IDI: a vector giving the item discrimination index. (see help file)
# SKILL.CLASSES: a vector giving the top.n.skill.classes most frequent skill
# classes and the corresponding class probability.
# AIC: a numeric giving the AIC of the specified model object.
# BIC: a numeric giving the BIC of the specified model object.
################################################################################
# extract output from din object #
################################################################################
log.file <- NULL
CALL <- paste(object$display,"on", ncol(object$data), "items for", nrow(object$skill.patt),"attributes")
AIC <- round(object$AIC, 3)
BIC <- round(object$BIC, 3)
IDI <- t(matrix(round(object$IDI, 4)))
rownames(IDI) <- ""
colnames(IDI) <- rownames(object$item)
# item parameters
item <- data.frame( "item"=colnames(object$data), "guess"=object$guess[,1],
"slip"=object$slip[,1], "IDI"=object$IDI, "rmsea"=object$itemfit.rmsea )
for (vv in 2:5){
item[,vv] <- round( item[,vv], 3 )
}
# SKILL.CLASSES <- object$attribute.patt[order(object$attribute.patt[,1], decreasing=TRUE),][
# 1:min(top.n.skill.classes, 2^length(object$skill.patt)),]
# SKILL.CLASSES <- round(t(SKILL.CLASSES)[1, ], 4)
# if(top.n.skill.classes > nrow(object$attribute.patt))
# warning("There are at most ", 2^length(object$skill.patt),
# " different skill classes. Returning all skill classes.\n")
################################################################################
# catch log file writing errors #
################################################################################
if(!is.null(log.file)){
if(file.exists(log.file)){
if(!overwrite){
cat("Press 'y' to overwrite existing file: ")
conf <- readLines(con=stdin(), n=1)
if(conf %in% c("y", "Y")){
wrn <- getOption("warn"); options(warn=-1)
err <- try({ff <- file(log.file); open(ff, "w"); close(ff)}, silent=TRUE)
options("warn"=wrn)
if(!is.null(err)){
warning("'log.file' argument ignored due to ", err)
log.file <- NULL
}
}
}
} else {
wrn <- getOption("warn"); options(warn=-1)
err <- try({ff <- file(log.file); open(ff, "w"); close(ff)}, silent=TRUE)
options("warn"=wrn)
if(!is.null(err)){
warning("'log.file' argument ignored due to ", err)
log.file <- NULL
}
}
}
################################################################################
# return list #
################################################################################
out <- list(CALL=CALL,IDI=IDI,
call=deparse(object$call),
deviance=-2*object$loglike,
AIC=AIC, BIC=BIC, item=item,
Npars=object$Npars,
log.file=file, din.object=object,
start.analysis=object$start.analysis,
end.analysis=object$end.analysis
)
class(out) <- "summary.din"
return(out)
}
|
/scratch/gouwar.j/cran-all/cranData/CDM/R/summary.din.R
|
## File Name: summary.din_identifiability.R
## File Version: 0.04
summary.din_identifiability <- function(object, ...)
{
cat("Necessary and sufficient conditions for identifiability (Gu & Xu, 2019)\n\n")
cat("Q-Matrix with\n")
cat("Number of items", "=", object$I,"\n")
cat("Number of skills", "=", object$K,"\n")
cat("Average number of skills per item", "=", round(object$qmat_stat$item_M,2),"\n")
cat("Number of items per skill: \n ")
print(object$qmat_stat$skills_items)
cat("\n** Check identifiability conditions\n")
cat("\nC1: Every skill has at least an item with single loading\n ")
print(object$is_single)
cat("\nC2: Every skill has been measured at least three items\n ")
print(object$is_three_items)
cat("\nC3: Q^\\ast submatrix has distinct columns\n ")
print(object$submat_distinct)
if (object$dina_identified){
sent <- "DINA model is identified."
} else {
sent <- "DINA model is not identified."
}
cat("\n==> ", sent, "\n")
}
|
/scratch/gouwar.j/cran-all/cranData/CDM/R/summary.din_identifiability.R
|
## File Name: summary.discrim.index.R
## File Version: 0.08
summary.discrim.index <- function( object, file=NULL, digits=3, ... )
{
osink( file=file, suffix=paste0( "__SUMMARY.Rout") )
cat("-----------------------------------------------------------------------------\n")
d1 <- utils::packageDescription("CDM")
cat( paste( d1$Package, " ", d1$Version, " (", d1$Date, ")", sep=""), "\n\n" )
cat("-----------------------------------------------------------------------------\n")
cat("Test-level discrimination index \n\n")
obji <- object$discrim_test
cdm_print_summary_data_frame(obji, from=1, digits=digits)
cat("-----------------------------------------------------------------------------\n")
cat("Item discrimination index (IDI) \n\n")
obji <- object$idi
cdm_print_summary_data_frame(obji, digits=digits)
cat("-----------------------------------------------------------------------------\n")
cat("Item-attribute discrimination index \n\n")
obji <- object$discrim_item_attribute
cdm_print_summary_data_frame(obji, from=2, digits=digits)
csink( file=file )
}
#*******************************************************
|
/scratch/gouwar.j/cran-all/cranData/CDM/R/summary.discrim.index.R
|
## File Name: summary.entropy.lca.R
## File Version: 0.04
# summary S3 method
summary.entropy.lca <- function( object, digits=2, ...)
{
obji <- object$entropy
cdm_print_summary_data_frame(obji, from=2, digits=digits, rownames_null=TRUE)
}
|
/scratch/gouwar.j/cran-all/cranData/CDM/R/summary.entropy.lca.R
|
## File Name: summary.gdina.R
## File Version: 1.792
# Summary of the GDINA model
summary.gdina <- function( object, digits=4, file=NULL, ... )
{
osink( file=file, suffix=paste0( "__SUMMARY.Rout") )
rdigits <- digits
# Parameter summary
display <- cdm_summary_display()
cat(display)
#-- print package
cdm_print_summary_package(pack="CDM")
cat("\n")
#-- summary call
cdm_print_summary_call(object=object)
#-- print computation time
cdm_print_summary_computation_time(object=object)
if (object$HOGDINA==-1){
cat("Generalized DINA Model \n") } else {
cat("Higher Order Generalized DINA Model \n") }
if ( object$G > 1 ){
cat(" Multiple Group Estmation with",object$G, "Groups \n")
# group statistics
cat("\nGroup statistics\n")
print( object$group.stat )
cat("\n")
}
cat( "\nNumber of iterations","=", object$iter )
if ( ! object$converged ){ cat("\nMaximum number of iterations was reached.") }
cat( "\nIteration with minimal deviance","=", object$iter.min, "\n\n" )
#-- information about algorithm
cat( paste0("Estimation method: ", object$method, "\n") )
cat( paste0("Optimizer: ", object$optimizer, "\n") )
cat( paste0("Monotonicity constraints: ", object$mono.constr, "\n") )
cat( paste0("Number of items at boundary monotonicity constraint: ", object$numb_bound_mono, "\n") )
if ( ! is.na(object$numb_bound_mono) > 0 ){
v1 <- paste0( paste0("Items at boundary constraint: "), paste0( object$item_bound_mono, collapse=" " ) )
cat(v1,"\n")
}
cat("\n")
cat( paste0("Parameter regularization: ", object$regularization, "\n") )
if (object$regularization){
cat( paste0("Regularization type: ", object$regular_type, "\n" ) )
cat( paste0("Regularization parameter lambda: ", object$regular_lam, "\n" ) )
cat( paste0("Regularization parameter alpha: ", object$regular_alpha, " (SCAD-L2, elastic net)\n" ) )
cat( paste0("Regularization parameter tau: ", object$regular_tau, " (truncated L1 penalty)\n" ) )
cat( paste0("Number of regularized item parameters: ", object$numb_regular_pars, "\n" ) )
}
cat("\n")
cat( "Deviance","=", round( object$deviance, 2 ) )
cat( " | Log likelihood","=", round( - object$deviance / 2, 2 ), "\n" )
if ( object$regularization | object$use_prior ){
if ( object$regularization ){
cat( "Penalty value","=", round( object$penalty, 2 ) )
}
if ( object$use_prior ){
cat( "Log prior value","=", round( object$logprior_value, 2 ) )
}
cat( " | Optimization function","=", round( object$opt_fct, 2 ), "\n" )
}
cat("\n")
cat( "Number of persons","=", object$N, "\n" )
cat( "Number of groups","=", object$G, "\n" )
cat( "Number of items","=", ncol(object$data), "\n" )
cat( "Number of estimated parameters","=", object$Npars, "\n" )
cat( "Number of estimated item parameters","=", object$Nipar, "\n" )
cat( "Number of estimated skill class parameters","=", object$Nskillpar )
cat( " (", object$Nskillclasses, "latent skill classes)\n\n")
#*** information criteria
cdm_print_summary_information_criteria(object=object)
cat(display)
cat("Used Q-matrix \n\n")
print(object$q.matrix)
cat("\n")
cat(display)
cat("\nItem Parameter Estimates \n\n")
ds <- object$coef
selvars <- intersect( c("est", "se" ), colnames(ds) )
ind <- which( colnames(ds) %in% selvars )
for (ii in ind){
ds[,ii] <- round( ds[,ii], rdigits )
}
r1 <- options()
options(scipen=999)
print(ds)
options(scipen=r1$scipen)
if ( ! is.null( object$delta.designmatrix ) ){
cat("\nNote: Standard errors are not (yet) correctly implemented!\n")
}
cat("\nRMSD (RMSEA) Item Fit Statistics\n")
print( round( object$itemfit.rmsea,3) )
cat("\nMean of RMSEA item fit:",
round( object$mean.rmsea,3 ), "\n")
# RRUM model
if (object$rrum.model){
cat("\n****\nRRUM Parametrization\n")
print( round( object$rrum.params,3), na="")
cat("\n")
}
cat(display)
cat("Model Implied Conditional Item Probabilities \n\n")
obji <- object$probitem
obji[,"prob"] <- round( obji$prob, rdigits )
print(obji)
cat(display)
cat("\nSkill Probabilities \n\n")
print(round(object$skill.patt,rdigits) )
#**** output tetrachoric or polychoric correlations
cat(display)
cat("\nPolychoric Correlations \n")
G <- object$G
for (gg in 1:G){
cat( paste0( "\nGroup ", gg, "\n") )
obji <- object$polychor[[gg]]
print( round( obji, 3 ))
}
cat("\n", display)
cat("\nSkill Pattern Probabilities \n\n")
if ( object$G==1 ){
xt <- round( object$attribute.patt[,1], rdigits )
names(xt) <- rownames( object$attribute.patt )
} else {
xt <- round( object$attribute.patt, rdigits )
rownames(xt) <- rownames( object$attribute.patt )
}
print(xt)
if (object$HOGDINA>=0){
cat("\n", display)
cat("Higher Order GDINA Model ")
cat("\n Attribute Response Function Parameters \n\n")
print( round( object$attr.rf,3) )
}
csink( file=file )
}
|
/scratch/gouwar.j/cran-all/cranData/CDM/R/summary.gdina.R
|
## File Name: summary.gdina.dif.R
## File Version: 0.02
summary.gdina.dif <- function(object,...)
{
stats <- object$difstats
for (vv in 2:ncol(stats) ){
stats[,vv] <- round(stats[,vv], digits=4)
}
print(stats)
}
|
/scratch/gouwar.j/cran-all/cranData/CDM/R/summary.gdina.dif.R
|
## File Name: summary.gdina.wald.R
## File Version: 0.07
##############################################################
# summary method
summary.gdina.wald <- function(object, digits=3,
vars=c("X2", "p", "sig", "RMSEA", "wgtdist"), ...){
stats <- object$stats
cn <- colnames(stats)
cn <- cn[-1]
sels <- c("NAttr")
for ( rule in object$cdm_rules){
sels <- c( sels, paste0( rule, "_", vars ) )
}
stats <- stats[, sels ]
cn <- colnames(stats)
cn <- cn[ - c( 1, grep("_sig", cn) ) ]
for (vv in cn){
stats[,vv] <- round(stats[,vv],digits)
}
rownames(stats) <- paste(object$stats$item)
print(stats)
}
###############################################################
|
/scratch/gouwar.j/cran-all/cranData/CDM/R/summary.gdina.wald.R
|
## File Name: summary.gdm.R
## File Version: 1.37
#*******************************************************
# Summary for gdm object
summary.gdm <- function( object, file=NULL, ... )
{
osink( file=file, suffix=paste0( "__SUMMARY.Rout") )
cat("-----------------------------------------------------------------------------\n")
d1 <- utils::packageDescription("CDM")
cat( paste( d1$Package, " ", d1$Version, " (", d1$Date, ")", sep=""), "\n\n" )
cat( "Date of Analysis:", paste( object$time$s2 ), "\n" )
cat("Computation Time:", print(object$time$s2 - object$time$s1), "\n\n")
cat("General Diagnostic Model \n\n")
modeltype <- object$irtmodel
#-- summary call
cdm_print_summary_call(object=object)
cat( " ", object$N, "Cases, ", object$I, "Items, ", object$G, "Group(s)", ",",
object$D, "Dimension(s)\n")
if (object$skillspace=="normal" ){
cat(" Normal distribution assumption\n" )
}
if (object$skillspace=="loglinear" ){
cat(" Log-linear Smoothing (up to 3 Moments)\n" )
}
if (object$skillspace=="full" ){
cat(" Saturated skill space\n" )
}
if (object$skillspace=="est" ){
cat(" Saturated skill space with estimated trait grid\n" )
}
if (object$G > 1 ){
cat("\nGroup statistics\n")
print( object$group.stat )
}
cat("\n-----------------------------------------------------------------------------\n")
cat( "Number of iterations=", object$iter )
if ( ! object$converged ){ cat("\nMaximum number of iterations was reached.") }
cat_paste( "\n\nDeviance", xx(), round( object$deviance, 2 ), " | " )
cat_paste( "Log Likelihood", xx(), round( -object$deviance/2, 2 ), "\n" )
cat_paste( "Number of persons", xx(), object$ic$n, "\n" )
cat_paste( "Number of estimated parameters", xx(), object$ic$np, "\n" )
cat_paste( " Number of estimated item parameters", xx(), object$ic$itempars, "\n" )
cat_paste( " ", object$ic$itempars.b, " Intercepts and ", object$ic$itempars.a, " Slopes \n")
cat_paste( " ", object$ic$centeredintercepts, " centered intercepts and ",
object$ic$centeredslopes, " centered slopes \n")
cat_paste( " Number of estimated distribution parameters", xx(),
object$ic$traitpars, "\n\n" )
#** print information criteria
cdm_print_summary_information_criteria(object=object)
cat("-----------------------------------------------------------------------------\n")
cat("Trait Distribution\n")
obji <- object$pi.k
cat( "\nM Trait:\n" )
print( round( t(object$mean.trait ), 3 ) )
cat( "\nSD Trait:\n" )
print( round( t(object$sd.trait ), 3 ) )
cat( "\nSkewness Trait:\n" )
print( round( t(object$skewness.trait ), 3 ) )
cat( "\nCorrelations Trait: \n" )
for (gg in 1:object$G){
cat("Group", gg, "\n")
print( round( object$correlation.trait[[gg]], 3 ) )
}
if ( object$skillspace=="est" ){
cat("\n\nEstimated Skill Distribution\n")
dfr <- data.frame( "theta.k"=object$theta.k, "pi.k"=object$pi.k )
NV <- ncol(dfr)
for (vv in 1:(NV-1) ){ dfr[,vv] <- round( dfr[,vv], 3 ) }
vv <- NV ; dfr[,vv] <- round( dfr[,vv], 5 )
print(dfr)
cat("")
}
cat( "\nEAP Reliability:\n" )
print( round( t(object$EAP.rel ), 3 ) )
cat("-----------------------------------------------------------------------------\n")
cat("Item Parameters \n")
obji <- object$item
obji[,-1] <- round( obji[,-1], 3)
print( obji )
cat("\nMean of RMSEA item fit:",
round( object$mean.rmsea,3 ), "\n")
csink( file=file )
}
#*******************************************************
|
/scratch/gouwar.j/cran-all/cranData/CDM/R/summary.gdm.R
|
## File Name: summary.mcdina.R
## File Version: 0.35
##################################################################
# Summary of the GDINA model
summary.mcdina <- function( object, digits=4, file=NULL, ... )
{
#-------------------------------------------------------
# INPUT:
# object ... result from GDINA analysis
# rdigits ... number of digits for rounding parameter estimates
#-------------------------------------------------------
rdigits <- digits
osink( file=file, suffix=paste0( "__SUMMARY.Rout") )
# Parameter summary
cat("----------------------------------------------------------------------------\n")
d1 <- utils::packageDescription("CDM")
cat( paste( d1$Package, " ", d1$Version, " (", d1$Date, ")", sep=""), "\n" )
cat( "Date of Analysis:", paste( object$time$s2 ), "\n" )
cat("Computation Time:", print(object$time$s2 - object$time$s1), "\n\n")
cat("Multiple Choice DINA Model (MC-DINA)\n")
cat( "\nNumber of iterations","=", object$iter )
if ( ! object$converged ){
cat("\nMaximum number of iterations was reached.")
}
cat( "\n\nDeviance","=", round( object$ic$dev, 2 ) )
cat( " | Loglikelihood","=", round( - object$ic$dev / 2, 2 ), "\n" )
cat( "Number of persons","=", object$ic$n, "\n" )
cat( "Number of groups","=", object$G, "\n" )
cat( "Number of items","=", ncol(object$dat), "\n" )
cat( "Number of estimated parameters","=", object$ic$np, "\n" )
cat( "Number of estimated item parameters","=", object$ic$itempars, "\n" )
cat( "Number of estimated skill class parameters","=", object$ic$traitpars )
cat( " (", object$ic$Nskillclasses, "latent skill classes)\n")
cat( "\nAIC ","=", round( object$ic$AIC, 2 ), " ; penalty=",
round( object$ic$AIC - object$ic$deviance,2 ), "\n" )
cat( "BIC ","=", round( object$ic$BIC, 2 ), " ; penalty=",
round( object$ic$BIC - object$ic$deviance,2 ), "\n" )
cat( "CAIC","=", round( object$ic$CAIC, 2 )," ; penalty=",
round( object$ic$CAIC - object$ic$deviance,2 ), "\n\n" )
###########################################################
ds <- object$item
cds <- colnames(ds)
inds <- grep( "se", cds )
ds <- ds[, - inds ]
ind <- grep( "Cat", colnames(ds) )
ds[,ind] <- round( ds[,ind], rdigits )
cat("----------------------------------------------------------------------------\n")
cat("\nItem Parameter Estimates \n\n")
r1 <- options()
options(scipen=999)
print(ds)
options(scipen=r1$scipen)
cat("----------------------------------------------------------------------------\n")
cat("\nSkill Probabilities \n\n")
print(round(object$skill.patt,rdigits) )
# cat("not yet implemented!\n")
# if ( ( object$G==1 ) & (ncol(object$q.matrix ) > 1 ) &
# max(object$NAttr==1 ) ){
# cat("----------------------------------------------------------------------------\n")
# QM <- max(object$q.matrix)
# if (QM==1){
# cat("\nTetrachoric Correlations \n\n")
# gt1 <- skill.cor( object )
# } else {
# cat("\nPolychoric Correlations \n\n")
# gt1 <- skill.polychor( object )
# }
# print(round(gt1$cor.skills,3))
# }
cat("\n----------------------------------------------------------------------------\n")
cat("\nSkill Pattern Probabilities \n\n")
if ( object$G==1 ){
xt <- round( object$attribute.patt[,1], rdigits )
names(xt) <- rownames( object$attribute.patt )
} else {
xt <- round( object$attribute.patt, rdigits )
rownames(xt) <- rownames( object$attribute.patt )
}
print(xt)
csink( file=file )
}
##########################################################################
|
/scratch/gouwar.j/cran-all/cranData/CDM/R/summary.mcdina.R
|
## File Name: summary.reglca.R
## File Version: 0.284
summary.reglca <- function( object, digits=4, file=NULL, ... )
{
osink( file=file, suffix=paste0( "__SUMMARY.Rout") )
rdigits <- digits
# Parameter summary
display <- cdm_summary_display()
cat(display)
#-- print package
cdm_print_summary_package(pack="CDM")
cat("\n")
#-- summary call
cdm_print_summary_call(object=object)
#-- print computation time
cdm_print_summary_computation_time(object=object)
cat("Regularized Latent Class Model \n")
cat( "\nNumber of iterations","=", object$iter, "\n")
if ( ! object$converged ){
cat("\nMaximum number of iterations was reached.\n")
}
regtype <- object$regular_type
if (object$regular_lam==0 ){
regtype <- "none"
}
cat( paste0("Regularization type: ", regtype, "\n" ) )
cat( paste0("Regularization parameter lambda: ", object$regular_lam, "\n" ) )
cat( paste0("Number of regularized item parameters: ", object$n_reg, "\n" ) )
cat("\n")
cat( "Deviance","=", round( object$deviance, 2 ) )
cat( " | Log likelihood","=", round( - object$deviance / 2, 2 ), "\n" )
cat( "Penalty value","=", round( object$penalty, 2 ) )
cat( " | Optimization function","=", round( object$opt_fct, 2 ), "\n" )
cat("\n")
cat( "Number of persons","=", object$N, "\n" )
cat( "Number of groups","=", object$G, "\n" )
cat( "Number of items","=", object$I, "\n" )
cat( "Number of estimated parameters","=", object$Npars, "\n" )
cat( "Number of estimated item parameters","=", object$Nipar,
"(out of", object$I * object$nclasses, "estimable parameters)", "\n" )
cat( "Number of estimated class parameters","=", object$Nskillpar )
grlab <- if (object$G==1){ "group" } else { "groups" }
cat( " (", object$nclasses, "latent classes,", object$G, grlab, ")\n")
cat( "\n")
#* information criteria
cdm_print_summary_information_criteria(object=object)
cat(display)
cat("Model Implied Conditional Item Probabilities \n\n")
res <- cdm_print_summary_data_frame(obji=object$item, from=2, digits=rdigits,
rownames_null=TRUE)
cat(display)
cat("Latent Class Probabilities \n\n")
print(round(object$class_probs,rdigits) )
csink( file=file )
}
|
/scratch/gouwar.j/cran-all/cranData/CDM/R/summary.reglca.R
|
## File Name: summary.slca.R
## File Version: 1.443
#**** Summary for slca object
summary.slca <- function( object, file=NULL, ... )
{
osink( file=file, suffix=paste0( "__SUMMARY.Rout") )
# Parameter summary
display <- cdm_summary_display()
cat(display)
#-- print package
cdm_print_summary_package(pack="CDM")
cat("\n")
#-- print call
cdm_print_summary_call(object=object)
#-- print computation time
cdm_print_summary_computation_time(object=object)
cat("Structured Latent Class Analysis - Function 'slca' \n")
modeltype <- object$irtmodel
cat( " ", object$N, "Cases, ", object$I, "Items, ", object$G, "Group(s)", ",",
object$TP, "Skill classes\n")
cat("\n **** Check carefully the number of parameters and identifiability
of the model. ***\n")
#-- group statistics
if (object$G > 1 ){
cat("\nGroup statistics\n")
print( object$group.stat )
}
cat(display)
cat( "Number of iterations=", object$iter, "\n" )
if ( ! object$converged ){
cat("Maximum number of iterations was reached.\n")
}
cat( "Iteration with minimal deviance","=", object$iter.min, "\n")
cat( "\nDeviance","=", round( object$deviance, 2 ), " | ")
cat( "Log Likelihood","=", round( -object$deviance/2, 2 ), "\n")
cat( "Penalty","=", round( object$regular_penalty, 2 ), "\n")
cat( "Number of persons","=", object$ic$n, "\n" )
cat( "Number of estimated parameters","=", object$ic$np, "\n")
cat( " Number of estimated lambda parameters","=", object$ic$itempars, "\n")
cat( " Number of non-active lambda parameters","=", object$ic$nonactive, "\n")
cat( " Number of estimated distribution parameters","=", object$ic$traitpars, "\n\n")
cat( "Regularization","=", object$regularization, "\n" )
cat( " Regularization method","=", object$regular_type, "\n" )
cat( " Regularization parameter lambda","=", object$regular_lam, "\n\n" )
#-- information criteria
cdm_print_summary_information_criteria(object=object)
cat(display)
cat("Xlambda Parameters \n")
obji <- object$Xlambda
cdm_print_summary_data_frame(obji, digits=3)
cat(display)
cat("Conditional Item Probabilities \n")
obji <- object$item
cdm_print_summary_data_frame(obji, from=3, digits=3)
cat(display)
cat("Skill Class Parameters \n")
obji <- object$delta
cdm_print_summary_data_frame(obji, digits=3)
cat(display)
cat("Skill Class Probabilities \n")
obji <- object$pi.k
cdm_print_summary_data_frame(obji, digits=4)
csink( file=file )
}
|
/scratch/gouwar.j/cran-all/cranData/CDM/R/summary.slca.R
|
## File Name: summary_sink.R
## File Version: 0.08
summary_sink <- function( object, file, append=FALSE, ...)
{
osink( file=file, suffix=".Rout", append=append )
summary(object=object, ... )
csink(file=file)
}
|
/scratch/gouwar.j/cran-all/cranData/CDM/R/summary_sink.R
|
## File Name: univar_table_statistics.R
## File Version: 0.02
univar_table_statistics <- function(freq, values=NULL)
{
K <- length(freq)
if (is.null(values)){
values <- seq(0, K-1)
}
N <- sum(freq)
probs <- freq / N
M <- sum( probs * values)
SD <- sqrt( sum( probs * values^2) - M^2 )
#--- output
res <- list(N=N, M=M, SD=SD)
return(res)
}
|
/scratch/gouwar.j/cran-all/cranData/CDM/R/univar_table_statistics.R
|
## File Name: vcov.din.R
## File Version: 1.62
##########################################################
# vcov din object
vcov.din <- function( object, extended=FALSE, infomat=FALSE,
ind.item.skillprobs=TRUE, ind.item=FALSE, diagcov=FALSE, h=.001, ... )
{
infomat.ind <- infomat
latresp <- object$control$latresp
guess <- object$guess$est
slip <- object$slip$est
item.patt.split <- object$item.patt.split
item.patt.freq <- object$item.patt.freq
attribute.patt <- object$attribute.patt$class.prob
# calculate log-likelihood for every case
weights <- item.patt.freq
resp.ind.list <- object$control$resp.ind.list
partable <- object$partable
NPars <- max(partable$parindex)
IP <- length(item.patt.freq)
parnames <- unique( partable$parnames[ partable$parindex > 0 ] )
ll.derivM <- matrix( NA, nrow=IP, ncol=NPars )
colnames(ll.derivM) <- parnames
J <- length(guess)
L <- length(attribute.patt)
#*** LL evaluated at theta
guess0 <- guess
slip0 <- slip
skillprobs0 <- attribute.patt
res <- vcov.loglike.din( weights, skillprobs0, slip0, guess0,
latresp, item.patt.split, resp.ind.list, return.p.xi.aj=TRUE)
ll1 <- res$ll
p.xi.aj <- res$p.xi.aj
#-----------------------------------------
# compute first derivative
for (pp in 1:NPars){
#*** LL evaluated at theta
guess0 <- guess
slip0 <- slip
skillprobs0 <- attribute.patt
#*** LL evaluated at theta + h
partable.pp <- partable[partable$parindex==pp,]
guess0 <- guess
slip0 <- slip
skillprobs0 <- attribute.patt
recalc.ll <- TRUE
if ( paste(partable.pp$partype[1])=="guess" ){
ind <- partable.pp$varyindex
vecadd <- rep(0,J)
vecadd[ind] <- h
guess0 <- guess0 + vecadd
}
if ( paste(partable.pp$partype[1])=="slip" ){
ind <- partable.pp$varyindex
vecadd <- rep(0,J)
vecadd[ind] <- h
slip0 <- slip0 + vecadd
}
if ( paste(partable.pp$partype[1])=="probs" ){
ind <- partable.pp$varyindex
vecadd <- rep(0,L)
vecadd[ind] <- h
skillprobs0 <- skillprobs0 + vecadd
recalc.ll <- FALSE
}
if ( recalc.ll){
ll2 <- vcov.loglike.din( weights, skillprobs0, slip0, guess0,
latresp, item.patt.split, resp.ind.list)
} else {
skillprobsM <- matrix( skillprobs0, nrow=IP, ncol=L, byrow=TRUE )
ll2 <- log( rowSums( p.xi.aj * skillprobsM ) )
}
ll.deriv1 <- ( ll2 - ll1 ) / h
ll.derivM[,pp] <- ll.deriv1
}
#cat("compute first derivative") ; a1 <- Sys.time(); print(a1-a0) ; a0 <- a1
#----------------------------------------------------------------
# compute information matrix
# infomat <- matrix( 0, nrow=NPars, ncol=NPars )
# rownames(infomat) <- colnames(infomat) <- parnames
# cat("compute second derivative") ; a1 <- Sys.time(); print(a1-a0) ; a0 <- a1
#*************************
infomat2 <- crossprod( ll.derivM, weights * ll.derivM )
if (ind.item.skillprobs){
pt1 <- partable[ partable$partype %in% c("guess","slip"), ]
pt2 <- partable[ partable$partype %in% c("probs"), ]
pt1 <- unique(setdiff( pt1$parindex, 0 ))
pt2 <- unique(setdiff( pt2$parindex, 0 ))
infomat2[ pt1, pt2 ] <- infomat2[ pt2, pt1 ] <- 0
}
if (ind.item){
pt1 <- partable[ partable$partype %in% c("guess","slip"), ]
h1 <- expand.grid( pt1$parindex, pt1$parindex )
h1 <- merge( x=h1, y=pt1[, c("parindex", "item") ], by=1 )
h1 <- merge( x=h1, y=pt1[, c("parindex", "item") ], by.x=2, by.y=1)
h1 <- h1[ h1[,3] !=h1[,4], ]
infomat2[ as.matrix( h1[,1:2] ) ] <- 0
}
infomat <- infomat2
# inverse of information matrix
if (infomat.ind ){
covmat <- infomat
cat("The information matrix is computed.\n")
} else {
covmat <- solve( infomat )
attr(covmat, "coef") <- coef(object)
# extended set of coefficients
if (extended){
A <- object$vcov.derived$A
covmat <- A %*% covmat %*% t(A)
x1 <- object$partable$value
names(x1) <- object$partable$parnames
attr(covmat,"coef") <- x1
}
}
return(covmat)
}
#########################################################################
|
/scratch/gouwar.j/cran-all/cranData/CDM/R/vcov.din.R
|
## File Name: vcov.loglike.din.R
## File Version: 0.10
#########################################################################
# compute log-likelihood for din objects
vcov.loglike.din <- function( weights, skillprobs0, slip0, guess0,
latresp, item.patt.split, resp.ind.list,
return.p.xi.aj=FALSE )
{
########################
IP <- N <- length(weights)
L <- length(skillprobs0)
J <- length(guess0)
# calculate probabilities
slipM <- matrix( slip0, nrow=nrow(latresp), ncol=ncol(latresp))
guessM <- matrix( guess0, nrow=nrow(latresp), ncol=ncol(latresp))
pj <- (1 - slipM )*latresp + guessM * ( 1 - latresp )
pjM <- array( NA, dim=c(J,2,L) )
pjM[,1,] <- 1 - pj
pjM[,2,] <- pj
skillprobsM <- matrix( skillprobs0, nrow=IP, ncol=L, byrow=TRUE )
# calculate log-likelihood
h1 <- matrix( 1, nrow=IP, ncol=L )
res.hwt <- cdm_calc_posterior(rprobs=pjM, gwt=h1, resp=item.patt.split,
nitems=J, resp.ind.list=resp.ind.list, normalization=FALSE,
thetasamp.density=NULL, snodes=0 )
p.xi.aj <- res.hwt$hwt
# Log-Likelihood (casewise)
ll2 <- log( rowSums( p.xi.aj * skillprobsM ) )
if (return.p.xi.aj){
res <- list( "ll"=ll2, "p.xi.aj"=p.xi.aj )
} else {
res <- ll2
}
return(res)
}
#########################################################################
|
/scratch/gouwar.j/cran-all/cranData/CDM/R/vcov.loglike.din.R
|
## File Name: zzz.R
## File Version: 2.12
# zzz.R
#
# This function is simply copied from mice package.
#------------------------------.onLoad-------------------------------
#.onLoad <- function(...){
# d <- packageDescription("CDM")
# cat("\n#############################\n")
# packageStartupMessage(paste(d$Package," ", d$Version," (",d$Date,")",sep=""))
# cat("#############################\n")
# return()
#}
version <- function(pkg="CDM")
{
lib <- dirname(system.file(package=pkg))
d <- utils::packageDescription(pkg)
return(paste(d$Package,d$Version,d$Date,lib))
}
# on attach CDM
.onAttach <- function(libname,pkgname)
{
d <- utils::packageDescription("CDM")
dc <- nchar(d$Version)
m1 <- paste(rep( " ", 12-dc ), collapse="")
packageStartupMessage("**********************************\n",
paste("** ", d$Package," ", d$Version," (",d$Date,")",
m1, "\n",sep=""),
paste("** Cognitive Diagnostic Models **",sep=""),
"\n**********************************\n" )
}
xx <- function(f1=1, f2=1)
{
v1 <- paste0( rep(" ",f1), collapse="" )
v2 <- paste0( rep(" ",f2), collapse="" )
res <- paste0( v1, "=", v2)
return(res)
}
|
/scratch/gouwar.j/cran-all/cranData/CDM/R/zzz.R
|
#' @keywords internal
#' @import omopgenerics
#' @import DBI
#' @importMethodsFrom DBI dbConnect
#' @importFrom dbplyr in_schema
#' @importFrom dplyr all_of matches starts_with contains ends_with
#' @importFrom utils head
#' @importFrom rlang :=
#' @importFrom purrr %||%
#' @importFrom generics compile
#' @importFrom methods is
"_PACKAGE"
NULL
utils::globalVariables(".") # so we can use `.` in dplyr pipelines.
|
/scratch/gouwar.j/cran-all/cranData/CDMConnector/R/CDMConnector-package.R
|
#' Download Eunomia data files
#'
#' Download the Eunomia data files from https://github.com/darwin-eu/EunomiaDatasets
#'
#' @param dataset_name,datasetName The data set name as found on https://github.com/darwin-eu/EunomiaDatasets. The
#' data set name corresponds to the folder with the data set ZIP files
#' @param cdm_version,cdmVersion The OMOP CDM version. This version will appear in the suffix of the data file,
#' for example: synpuf_5.3.zip. Default: '5.3'
#' @param path_to_data,pathToData The path where the Eunomia data is stored on the file system., By default the
#' value of the environment variable "EUNOMIA_DATA_FOLDER" is used.
#' @param overwrite Control whether the existing archive file will be overwritten should it already exist.
#' @return
#' Invisibly returns the destination if the download was successful.
#'
#' @importFrom utils download.file
#'
#' @examples
#' \dontrun{
#' downloadEunomiaData("GiBleed")
#' }
#' @export
downloadEunomiaData <- function(datasetName = "GiBleed",
cdmVersion = "5.3",
pathToData = Sys.getenv("EUNOMIA_DATA_FOLDER"),
overwrite = FALSE) {
checkmate::assertChoice(datasetName, choices = exampleDatasets())
if (cdmVersion != "5.3") {
rlang::abort("Only CDM v5.3 is supported currently!")
}
if (is.null(pathToData) || is.na(pathToData) || pathToData == "") {
stop("The pathToData argument must be specified. Consider setting the EUNOMIA_DATA_FOLDER environment variable, for example in the .Renviron file.")
}
if (!dir.exists(pathToData)) { dir.create(pathToData, recursive = TRUE) }
zipName <- glue::glue("{datasetName}_{cdmVersion}.zip")
if (file.exists(file.path(pathToData, zipName)) && !overwrite) {
rlang::inform(glue::glue(
"Dataset already exists ({file.path(pathToData, zipName)})
Specify `overwrite = TRUE` to overwrite existing zip archive"
))
return(invisible(pathToData))
}
pb <- cli::cli_progress_bar(format = "[:bar] :percent :elapsed",
type = "download")
withr::with_options(list(timeout = 5000), {
utils::download.file(
url = glue::glue("https://example-data.ohdsi.dev/{datasetName}.zip"),
destfile = file.path(pathToData, zipName),
mode = "wb",
method = "auto",
quiet = FALSE,
extra = list(progressfunction = function(downloaded, total) {
# Calculate the progress and update the progress bar
progress <- min(1, downloaded / total)
cli::cli_progress_update(id = pb, set = progress)
})
)
})
cli::cli_progress_done(id = pb)
cat("\nDownload completed!\n")
return(invisible(pathToData))
}
#' List the available example CDM datasets
#'
#' @return A character vector with example CDM dataset identifiers
#' @export
#'
#' @examples
#' \dontrun{
#' library(CDMConnector)
#' exampleDatasets()[1]
#' #> [1] "GiBleed"
#'
#' con <- DBI::dbConnect(duckdb::duckdb(), eunomiaDir("GiBleed"))
#' cdm <- cdm_from_con(con)
#' }
exampleDatasets <- function() {
c("GiBleed",
"synthea-allergies-10k",
"synthea-anemia-10k",
"synthea-breast_cancer-10k",
"synthea-contraceptives-10k",
"synthea-covid19-10k",
"synthea-covid19-200k",
"synthea-dermatitis-10k",
"synthea-heart-10k",
"synthea-hiv-10k",
"synthea-lung_cancer-10k",
"synthea-medications-10k",
"synthea-metabolic_syndrome-10k",
"synthea-opioid_addiction-10k",
"synthea-rheumatoid_arthritis-10k",
"synthea-snf-10k",
"synthea-surgery-10k",
"synthea-total_joint_replacement-10k",
"synthea-veteran_prostate_cancer-10k",
"synthea-veterans-10k",
"synthea-weight_loss-10k",
"empty_cdm")
}
#' @export
#' @rdname exampleDatasets
example_datasets <- exampleDatasets
#' @rdname downloadEunomiaData
#' @export
download_eunomia_data <- function(dataset_name = "GiBleed",
cdm_version = "5.3",
path_to_data = Sys.getenv("EUNOMIA_DATA_FOLDER"),
overwrite = FALSE) {
downloadEunomiaData(datasetName = dataset_name,
cdmVersion = cdm_version,
pathToData = path_to_data,
overwrite = overwrite)
}
#' Create a copy of an example OMOP CDM dataset
#'
#' @description
#' Creates a copy of a Eunomia database, and returns the path to the new database file.
#' If the dataset does not yet exist on the user's computer it will attempt to download the source data
#' to the the path defined by the EUNOMIA_DATA_FOLDER environment variable.
#'
#' @param datasetName,dataset_name One of "GiBleed" (default),
#' "synthea-allergies-10k",
#' "synthea-anemia-10k",
#' "synthea-breast_cancer-10k",
#' "synthea-contraceptives-10k",
#' "synthea-covid19-10k",
#' "synthea-covid19-200k",
#' "synthea-dermatitis-10k",
#' "synthea-heart-10k",
#' "synthea-hiv-10k",
#' "synthea-lung_cancer-10k",
#' "synthea-medications-10k",
#' "synthea-metabolic_syndrome-10k",
#' "synthea-opioid_addiction-10k",
#' "synthea-rheumatoid_arthritis-10k",
#' "synthea-snf-10k",
#' "synthea-surgery-10k",
#' "synthea-total_joint_replacement-10k",
#' "synthea-veteran_prostate_cancer-10k",
#' "synthea-veterans-10k",
#' "synthea-weight_loss-10k"
#'
#' @param cdmVersion,cdm_version The OMOP CDM version. Currently only "5.3" is supported.
#' @param databaseFile,database_file The full path to the new copy of the example CDM dataset.
#'
#' @return The file path to the new Eunomia dataset copy
#' @examples
#' \dontrun{
#' con <- DBI::dbConnect(duckdb::duckdb(), eunomiaDir("GiBleed"))
#' DBI::dbDisconnect(con, shutdown = TRUE)
#' }
#' @export
eunomiaDir <- function(datasetName = "GiBleed",
cdmVersion = "5.3",
databaseFile = tempfile(fileext = ".duckdb")) {
if (Sys.getenv("EUNOMIA_DATA_FOLDER") == "") {
rlang::abort("Set the `EUNOMIA_DATA_FOLDER` environment variable in your .Renviron file.")
}
checkmate::assertChoice(cdmVersion, c("5.3"))
rlang::check_installed("duckdb")
checkmate::assertChoice(datasetName, choices = exampleDatasets())
# duckdb database are tied to a specific version of duckdb until it reaches v1.0
duckdbVersion <- substr(utils::packageVersion("duckdb"), 1, 3)
datasetLocation <- file.path(Sys.getenv("EUNOMIA_DATA_FOLDER"),
glue::glue("{datasetName}_{cdmVersion}_{duckdbVersion}.duckdb"))
datasetAvailable <- file.exists(datasetLocation)
archiveLocation <- file.path(Sys.getenv("EUNOMIA_DATA_FOLDER"),
glue::glue("{datasetName}_{cdmVersion}.zip"))
archiveAvailable <- file.exists(archiveLocation)
if (!datasetAvailable && !archiveAvailable) {
rlang::inform(paste("Downloading", datasetName))
downloadEunomiaData(datasetName = datasetName, cdmVersion = cdmVersion)
archiveAvailable <- file.exists(archiveLocation)
if (isFALSE(archiveAvailable)) rlang::abort("Dataset download failed!")
}
if (!datasetAvailable && archiveAvailable) {
rlang::inform(paste("Creating CDM database", archiveLocation))
tempFileLocation <- tempfile()
utils::unzip(zipfile = archiveLocation, exdir = tempFileLocation)
unzipLocation <- file.path(tempFileLocation, glue::glue("{datasetName}"))
dataFiles <- sort(list.files(path = unzipLocation, pattern = "*.parquet"))
if (isFALSE(length(dataFiles) > 0)) {
rlang::abort(glue::glue("Data file does not contain any .parquet files to load into the database!\nTry removing the file {archiveLocation}."))
}
con <- DBI::dbConnect(duckdb::duckdb(datasetLocation))
# If the function is successful dbDisconnect will be called twice generating a warning.
# If this function is unsuccessful, still close connection on exit.
on.exit(suppressWarnings(DBI::dbDisconnect(con, shutdown = TRUE)), add = TRUE)
on.exit(unlink(tempFileLocation), add = TRUE)
specs <- spec_cdm_field[[cdmVersion]] %>%
dplyr::mutate(cdmDatatype = dplyr::if_else(.data$cdmDatatype == "varchar(max)", "varchar(2000)", .data$cdmDatatype)) %>%
dplyr::mutate(cdmFieldName = dplyr::if_else(.data$cdmFieldName == '"offset"', "offset", .data$cdmFieldName)) %>%
dplyr::mutate(cdmDatatype = dplyr::case_when(
dbms(con) == "postgresql" & .data$cdmDatatype == "datetime" ~ "timestamp",
dbms(con) == "redshift" & .data$cdmDatatype == "datetime" ~ "timestamp",
TRUE ~ cdmDatatype)) %>%
tidyr::nest(col = -"cdmTableName") %>%
dplyr::mutate(col = purrr::map(col, ~setNames(as.character(.$cdmDatatype), .$cdmFieldName)))
files <- tools::file_path_sans_ext(basename(list.files(unzipLocation)))
tables <- specs$cdmTableName
for (i in cli::cli_progress_along(tables)) {
if (isFALSE(tables[i] %in% files)) next
fields <- specs %>%
dplyr::filter(.data$cdmTableName == specs$cdmTableName[i]) %>%
dplyr::pull(.data$col) %>%
unlist()
DBI::dbCreateTable(con,
inSchema("main", specs$cdmTableName[i], dbms = dbms(con)),
fields = fields)
cols <- paste(glue::glue('"{names(fields)}"'), collapse = ", ")
table_path <- file.path(unzipLocation, paste0(specs$cdmTableName[i], ".parquet"))
sql <- glue::glue("INSERT INTO main.{tables[i]}({cols})
SELECT {cols} FROM '{table_path}'")
DBI::dbExecute(con, sql)
}
DBI::dbDisconnect(con, shutdown = TRUE)
}
rc <- file.copy(from = datasetLocation, to = databaseFile)
if (isFALSE(rc)) {
rlang::abort(glue::glue("File copy from {datasetLocation} to {databaseFile} failed!"))
}
return(databaseFile)
}
#' @export
#' @rdname eunomiaDir
eunomia_dir <- function(dataset_name = "GiBleed",
cdm_version = "5.3",
database_file = tempfile(fileext = ".duckdb")) {
eunomiaDir(datasetName = dataset_name,
cdmVersion = cdm_version,
databaseFile = database_file)
}
#' Has the Eunomia dataset been cached?
#'
#' @param dataset_name,datasetName Name of the Eunomia dataset to check. Defaults to "GiBleed".
#' @param cdm_version,cdmVersion Version of the Eunomia dataset to check. Must be "5.3" or "5.4".
#'
#' @return TRUE if the eunomia example dataset is available and FASLE otherwise
#' @export
eunomia_is_available <- function(dataset_name = "GiBleed",
cdm_version = "5.3") {
if (Sys.getenv("EUNOMIA_DATA_FOLDER") == "") {
rlang::abort("Set the environment variable EUNOMIA_DATA_FOLDER to the eunomia cache location")
}
stopifnot(is.character(cdm_version), length(cdm_version) == 1, cdm_version %in% c("5.3", "5.4"))
# check for zip archive of csv source files
archiveName <- paste0(dataset_name, "_", cdm_version, ".zip")
archiveLocation <- file.path(Sys.getenv("EUNOMIA_DATA_FOLDER"), archiveName)
return(file.exists(archiveLocation))
}
#' @rdname eunomia_is_available
#' @export
eunomiaIsAvailable <- function(datasetName = "GiBleed",
cdmVersion = "5.3") {
eunomia_is_available(dataset_name = datasetName,
cdm_version = cdmVersion)
}
|
/scratch/gouwar.j/cran-all/cranData/CDMConnector/R/Eunomia.R
|
#' Create a CDM reference object from a database connection
#'
#' @param con A DBI database connection to a database where an OMOP CDM v5.4 or
#' v5.3 instance is located.
#' @param cdm_schema,cdmSchema The schema where the OMOP CDM tables are located. Defaults
#' to NULL.
#' @param write_schema,writeSchema An optional schema in the CDM database that the user has
#' write access to.
#' @param cohort_tables,cohortTables A character vector listing the cohort table names to be
#' included in the CDM object.
#' @param cdm_version,cdmVersion The version of the OMOP CDM: "5.3" (default), "5.4",
#' "auto". "auto" attempts to automatically determine the cdm version using
#' heuristics. Cohort tables must be in the write_schema.
#' @param cdm_name,cdmName The name of the CDM. If NULL (default) the cdm_source_name
#'. field in the CDM_SOURCE table will be used.
#' @param achilles_schema,achillesSchema An optional schema in the CDM database
#' that contains achilles tables.
#'
#' @return A list of dplyr database table references pointing to CDM tables
#' @importFrom dplyr all_of matches starts_with ends_with contains
#' @export
cdm_from_con <- function(con,
cdm_schema,
write_schema,
cohort_tables = NULL,
cdm_version = "5.3",
cdm_name = NULL,
achilles_schema = NULL) {
if (!DBI::dbIsValid(con)) {
cli::cli_abort("The connection is not valid. Is the database connection open?")
}
if (missing(write_schema)) {
cli::cli_abort("{.arg write_schema} is now required to create a cdm object with a database backend.
Please make sure you have a schema in your database where you can create new tables and provide it in the `write_schema` argument.
If your schema has muliple parts please provide a length 2 character vector: `write_schema = c('my_db', 'my_schema')`")
}
checkmate::assert_character(cdm_name, any.missing = FALSE, len = 1, null.ok = TRUE)
checkmate::assert_character(cdm_schema, min.len = 1, max.len = 3, any.missing = F)
checkmate::assert_character(write_schema, min.len = 1, max.len = 3, any.missing = F)
checkmate::assert_character(cohort_tables, null.ok = TRUE, min.len = 1)
checkmate::assert_character(achilles_schema, min.len = 1, max.len = 3, any.missing = F, null.ok = TRUE)
checkmate::assert_choice(cdm_version, choices = c("5.3", "5.4", "auto"), null.ok = TRUE)
# create source object and validate connecion
src <- dbSource(con = con, writeSchema = write_schema)
con <- attr(src, "dbcon")
# read omop tables
dbTables <- listTables(con, schema = cdm_schema)
omop_tables <- omopgenerics::omopTables()
omop_tables <- omop_tables[which(omop_tables %in% tolower(dbTables))]
if (length(omop_tables) == 0) {
rlang::abort("There were no cdm tables found in the cdm_schema!")
}
cdm_tables_in_db <- dbTables[which(tolower(dbTables) %in% omop_tables)]
if (all(cdm_tables_in_db == toupper(cdm_tables_in_db))) {
omop_tables <- toupper(omop_tables)
} else if (!all(cdm_tables_in_db == tolower(cdm_tables_in_db))) {
rlang::abort("CDM database tables should be either all upppercase or all lowercase!")
}
cdmTables <- purrr::map(
omop_tables, ~ dplyr::tbl(src = src, schema = cdm_schema, name = .)
) %>%
rlang::set_names(tolower(omop_tables))
if(is.null(cdm_name)){
if("cdm_source" %in% names(cdmTables)){
cdm_name <- cdmTables$cdm_source %>%
utils::head(1) %>%
dplyr::pull("cdm_source_name")
}
}
if(is.null(cdm_name) ||
length(cdm_name) != 1 ||
is.na(cdm_name)) {
cli::cli_alert_warning("cdm name not specified and could not be inferred from the cdm source table")
cdm_name <- "An OMOP CDM database"
}
if (!is.null(achilles_schema)) {
achillesReqTables <- omopgenerics::achillesTables()
acTables <- listTables(con, schema = achilles_schema)
achilles_tables <- achillesReqTables[which(achillesReqTables %in% tolower(acTables))]
if (length(achilles_tables) != 3) {
cli::cli_abort("Achilles tables not found in {achilles_schema}!")
}
achillesTables <- purrr::map(
achilles_tables,
~ dplyr::tbl(src = src, schema = achilles_schema, .)
) %>%
rlang::set_names(tolower(achilles_tables))
} else {
achillesTables <- list()
}
cdm <- omopgenerics::newCdmReference(
tables = c(cdmTables, achillesTables),
cdmName = cdm_name,
cdmVersion = cdm_version
)
write_schema_tables <- listTables(con, schema = write_schema)
for (cohort_table in cohort_tables) {
nms <- paste0(cohort_table, c("", "_set", "_attrition"))
x <- purrr::map(nms, function(nm) {
if (nm %in% write_schema_tables) {
dplyr::tbl(src = src, schema = write_schema, name = nm)
} else if (nm %in% toupper(write_schema_tables)) {
dplyr::tbl(src = src, schema = write_schema, name = toupper(nm))
} else {
NULL
}
})
cdm[[cohort_table]] <- x[[1]]
if(is.null(cdm[[cohort_table]])) {
rlang::abort(glue::glue("cohort table `{cohort_table}` not found!"))
}
cdm[[cohort_table]] <- cdm[[cohort_table]] |>
omopgenerics::newCohortTable(
cohortSetRef = x[[2]],
cohortAttritionRef = x[[3]]
)
}
if (dbms(con) == "snowflake") {
s <- write_schema %||% cdm_schema
# Assign temp table schema
if ("prefix" %in% names(s)) {
s <- s[names(s) != "prefix"]
}
if ("catalog" %in% names(s)) {
stopifnot("schema" %in% names(s))
s <- c(unname(s["catalog"]), unname(s["schema"]))
}
if (length(s) == 2) {
s2 <- glue::glue_sql("{DBI::dbQuoteIdentifier(con, s[1])}.{DBI::dbQuoteIdentifier(con, s[2])}")
} else {
s2 <- DBI::dbQuoteIdentifier(con, s[1])
}
DBI::dbExecute(con, glue::glue_sql("USE SCHEMA {s2}"))
}
# TO BE REMOVED WHEN CIRCER WORKS WITH CDM OBJECT
attr(cdm, "cdm_schema") <- cdm_schema
# TO BE REMOVED WHEN DOWNSTREAM PACKAGES NO LONGER USE THESE ATTRIBUTES
attr(cdm, "write_schema") <- write_schema
attr(cdm, "dbcon") <- attr(attr(cdm, "cdm_source"), "dbcon")
return(cdm)
}
#' @export
#' @importFrom dplyr tbl
tbl.db_cdm <- function(src, schema, name, ...) {
con <- attr(src, "dbcon")
fullName <- inSchema(schema = schema, table = name, dbms = dbms(con))
x <- dplyr::tbl(src = con, fullName) |>
dplyr::rename_all(tolower) |>
omopgenerics::newCdmTable(src = src, name = tolower(name))
return(x)
}
#' @rdname cdm_from_con
#' @export
cdmFromCon <- function(con,
cdmSchema,
writeSchema,
cohortTables = NULL,
cdmVersion = "5.3",
cdmName = NULL,
achillesSchema = NULL) {
cdm_from_con(
con = con,
cdm_schema = cdmSchema,
write_schema = writeSchema,
cohort_tables = cohortTables,
cdm_version = cdmVersion,
cdm_name = cdmName,
achilles_schema = achillesSchema
)
}
detect_cdm_version <- function(con, cdm_schema = NULL) {
cdm_tables <- c("visit_occurrence", "cdm_source", "procedure_occurrence")
if (!all(cdm_tables %in% listTables(con, schema = cdm_schema))) {
rlang::abort(paste0(
"The ",
paste(cdm_tables, collapse = ", "),
" tables are required for auto-detection of cdm version."
))
}
cdm <- purrr::map(
cdm_tables, ~dplyr::tbl(con, inSchema(cdm_schema, ., dbms(con))) %>%
dplyr::rename_all(tolower)) %>%
rlang::set_names(tolower(cdm_tables))
# Try a few different things to figure out what the cdm version is
visit_occurrence_names <- cdm$visit_occurrence %>%
head() %>%
dplyr::collect() %>%
colnames() %>%
tolower()
if ("admitting_source_concept_id" %in% visit_occurrence_names) {
return("5.3")
}
if ("admitted_from_concept_id" %in% visit_occurrence_names) {
return("5.4")
}
procedure_occurrence_names <- cdm$procedure_occurrence %>%
head() %>%
dplyr::collect() %>%
colnames() %>%
tolower()
if ("procedure_end_date" %in% procedure_occurrence_names) {
return("5.4")
}
cdm_version <- cdm$cdm_source %>% dplyr::pull(.data$cdm_version)
if (isTRUE(grepl("5\\.4", cdm_version))) return("5.4")
if (isTRUE(grepl("5\\.3", cdm_version))) return("5.3")
if ("episode" %in% listTables(con, schema = cdm_schema)) {
return("5.4")
} else {
return("5.3")
}
}
#' Get the CDM version
#'
#' Extract the CDM version attribute from a cdm_reference object
#'
#' @param cdm A cdm object
#'
#' @return "5.3" or "5.4"
#' @export
#'
#' @examples
#' \dontrun{
#' library(CDMConnector)
#' con <- DBI::dbConnect(duckdb::duckdb(), eunomia_dir())
#' cdm <- cdm_from_con(con, "eunomia", "main")
#' version(cdm)
#'
#' DBI::dbDisconnect(con, shutdown = TRUE)
#' }
version <- function(cdm) {
checkmate::assert_class(cdm, "cdm_reference")
versionNumber <- attr(cdm, "cdm_version")
if (!(versionNumber %in% c("5.3", "5.4"))) {
rlang::abort("cdm object version attribute is not 5.3 or 5.4.
Contact the maintainer.")
}
return(versionNumber)
}
#' Get the CDM name
#'
#' Extract the CDM name attribute from a cdm_reference object
#'
#' @param cdm A cdm object
#'
#' @return The name of the CDM as a character string
#' @export
#'
#' @examples
#' \dontrun{
#' library(CDMConnector)
#' con <- DBI::dbConnect(duckdb::duckdb(), eunomia_dir())
#' cdm <- cdm_from_con(con, "eunomia", "main")
#' cdmName(cdm)
#' #> [1] "eunomia"
#'
#' DBI::dbDisconnect(con, shutdown = TRUE)
#' }
cdmName <- function(cdm) {
checkmate::assert_class(cdm, "cdm_reference")
return(attr(cdm, "cdm_name"))
}
#' @rdname cdmName
#' @export
cdm_name <- cdmName
# con = database connection
# write_schema = schema with write access
# add = checkmate collection
verify_write_access <- function(con, write_schema, add = NULL) {
checkmate::assert_character(
write_schema,
min.len = 1,
max.len = 3,
min.chars = 1,
any.missing = FALSE
)
checkmate::assert_class(add, "AssertCollection", null.ok = TRUE)
checkmate::assert_true(.dbIsValid(con))
tablename <- paste(c(sample(letters, 5, replace = TRUE), "_test_table"), collapse = "")
df1 <- data.frame(chr_col = "a", numeric_col = 1, stringsAsFactors = FALSE)
# Note: ROracle does not support integer round trip
suppressMessages(
DBI::dbWriteTable(con,
name = inSchema(schema = write_schema, table = tablename, dbms = dbms(con)),
value = df1,
overwrite = TRUE)
)
withr::with_options(list(databaseConnectorIntegerAsNumeric = FALSE), {
df2 <- dplyr::tbl(con, inSchema(write_schema, tablename, dbms = dbms(con))) %>%
dplyr::collect() %>%
as.data.frame() %>%
dplyr::rename_all(tolower) %>% # dbWriteTable can create uppercase column names on snowflake
dplyr::select("chr_col", "numeric_col") # bigquery can reorder columns
})
DBI::dbRemoveTable(con, inSchema(write_schema, tablename, dbms = dbms(con)))
if (!isTRUE(all.equal(df1, df2))) {
msg <- paste("Write access to schema", write_schema, "could not be verified.")
if (is.null(add)) {
rlang::abort(msg)
} else {
add$push(msg)
}
}
invisible(NULL)
}
#' CDM table selection helper
#'
#' The OMOP CDM tables are grouped together and the `tbl_group` function allows
#' users to easily create a CDM reference including one or more table groups.
#'
#' {\figure{cdm54.png}{options: width="100\%" alt="CDM 5.4"}}
#'
#' The "default" table group is meant to capture the most commonly used set
#' of CDM tables. Currently the "default" group is: person,
#' observation_period, visit_occurrence,
#' visit_detail, condition_occurrence, drug_exposure, procedure_occurrence,
#' device_exposure, measurement, observation, death, note, note_nlp, specimen,
#' fact_relationship, location, care_site, provider, payer_plan_period,
#' cost, drug_era, dose_era, condition_era, concept, vocabulary,
#' concept_relationship, concept_ancestor, concept_synonym, drug_strength
#'
#' @param group A character vector of CDM table groups: "vocab", "clinical",
#' "all", "default", "derived".
#'
#' @return A character vector of CDM tables names in the groups
#' @export
#'
#' @examples
#' \dontrun{
#' con <- DBI::dbConnect(RPostgres::Postgres(),
#' dbname = "cdm",
#' host = "localhost",
#' user = "postgres",
#' password = Sys.getenv("PASSWORD"))
#'
#' cdm <- cdm_from_con(con, cdm_name = "test", cdm_schema = "public") %>%
#' cdm_select_tbl(tbl_group("vocab"))
#' }
tbl_group <- function(group) {
# groups are defined in the internal package dataframe called spec_cdm_table
# created by a script in the extras folder
checkmate::assert_subset(group, c("vocab", "clinical", "all", "default", "derived"))
# use v5.3 here. The set of table groups between 5.3 and 5.4 are the same.
spec <- spec_cdm_table[["5.3"]]
purrr::map(group, ~ spec[spec[[paste0("group_", .)]], ]$cdmTableName) %>%
unlist() %>%
unique()
}
#' @export
#' @rdname tbl_group
tblGroup <- tbl_group
#' Get the database management system (dbms) from a cdm_reference or DBI
#' connection
#'
#' @param con A DBI connection or cdm_reference
#'
#' @return A character string representing the dbms that can be used with
#' SqlRender
#' @export
#'
#' @examples
#' \dontrun{
#' con <- DBI::dbConnect(duckdb::duckdb(), dbdir = eunomia_dir())
#' cdm <- cdm_from_con(con)
#' dbms(cdm)
#' dbms(con)
#' }
dbms <- function(con) {
if (methods::is(con, "cdm_reference")) {
con <- attr(con, "dbcon")
} else if (methods::is(con, "Pool")) {
if (!rlang::is_installed("pool")) {
rlang::abort("Please install the pool package.")
}
con <- pool::localCheckout(con)
}
checkmate::assertClass(con, "DBIConnection")
if (!is.null(attr(con, "dbms"))) {
return(attr(con, "dbms"))
}
result <- switch(
class(con),
"Microsoft SQL Server" = "sql server",
"PqConnection" = "postgresql",
"RedshiftConnection" = "redshift",
"BigQueryConnection" = "bigquery",
"SQLiteConnection" = "sqlite",
"duckdb_connection" = "duckdb",
"Spark SQL" = "spark",
"OraConnection" = "oracle",
"Oracle" = "oracle",
"Snowflake" = "snowflake"
# add mappings from various connection classes to dbms here
)
if (is.null(result)) {
rlang::abort(glue::glue("{class(con)} is not a supported connection type."))
}
return(result)
}
#' Collect a list of lazy queries and save the results as files
#'
#' @param cdm A cdm object
#' @param path A folder to save the cdm object to
#' @param format The file format to use: "parquet" (default), "csv", "feather" or "duckdb".
#'
#' @return Invisibly returns the cdm input
#' @export
#'
#' @examples
#' \dontrun{
#' con <- DBI::dbConnect(duckdb::duckdb(), dbdir = eunomia_dir())
#' vocab <- cdm_from_con(con, "main") %>%
#' cdm_select_tbl("concept", "concept_ancestor")
#' stow(vocab, here::here("vocab_tables"))
#' DBI::dbDisconnect(con, shutdown = TRUE)
#' }
stow <- function(cdm, path, format = "parquet") {
checkmate::assert_class(cdm, "cdm_reference")
checkmate::assert_choice(format, c("parquet", "csv", "feather", "duckdb"))
path <- path.expand(path)
checkmate::assert_true(file.exists(path))
if (format %in% c("parquet", "feather")) {
rlang::check_installed("arrow")
}
switch(
format,
parquet = purrr::walk2(
cdm,
names(cdm),
~ arrow::write_parquet(dplyr::collect(.x), file.path(path, paste0(.y, ".parquet")))
),
csv = purrr::walk2(
cdm,
names(cdm),
~ readr::write_csv(dplyr::collect(.x), file.path(path, paste0(.y, ".csv")))
),
feather = purrr::walk2(
cdm,
names(cdm),
~ arrow::write_feather(dplyr::collect(.x), file.path(path, paste0(.y, ".feather")))
),
duckdb = {
rlang::check_installed("duckdb")
con <- DBI::dbConnect(duckdb::duckdb(file.path(path, "cdm.duckdb")))
purrr::walk(names(cdm), ~DBI::dbWriteTable(con, name = ., value = dplyr::collect(cdm[[.]])))
DBI::dbDisconnect(con, shutdown = TRUE)
}
)
invisible(cdm)
}
#' Create a CDM reference from a folder containing parquet, csv, or feather
#' files
#'
#' @param path A folder where an OMOP CDM v5.4 instance is located.
#' @param format What is the file format to be read in? Must be "auto"
#' (default), "parquet", "csv", "feather".
#' @param cdm_version,cdmVersion The version of the cdm (5.3 or 5.4)
#' @param cdm_name,cdmName A name to use for the cdm.
#' @param as_data_frame,asDataFrame TRUE (default) will read files into R as dataframes.
#' FALSE will read files into R as Arrow Datasets.
#' @return A list of dplyr database table references pointing to CDM tables
#' @export
cdm_from_files <- function(path,
format = "auto",
cdm_version = "5.3",
cdm_name = NULL,
as_data_frame = TRUE) {
checkmate::assert_choice(format, c("auto", "parquet", "csv", "feather"))
checkmate::assert_logical(as_data_frame, len = 1, null.ok = FALSE)
checkmate::assert_true(file.exists(path))
checkmate::assert_choice(cdm_version, choices = c("5.3", "5.4"))
checkmate::assert_character(cdm_name, null.ok = TRUE)
rlang::check_installed("arrow")
path <- path.expand(path)
files <- list.files(path, full.names = TRUE)
if (format == "auto") {
format <- unique(tools::file_ext(files))
if (length(format) > 1) {
rlang::abort(paste("Multiple file formats detected:", paste(format, collapse = ", ")))
}
checkmate::assert_choice(format, c("parquet", "csv", "feather"))
}
cdm_tables <- tools::file_path_sans_ext(basename(list.files(path)))
cdm_table_files <- file.path(path, paste0(cdm_tables, ".", format))
purrr::walk(cdm_table_files, ~checkmate::assert_file_exists(., "r"))
cdm <- switch(
format,
parquet = purrr::map(cdm_table_files, function(.) {
arrow::read_parquet(., as_data_frame = as_data_frame)
}),
csv = purrr::map(cdm_table_files, function(.) {
arrow::read_csv_arrow(., as_data_frame = as_data_frame)
}),
feather = purrr::map(cdm_table_files, function(.) {
arrow::read_feather(., as_data_frame = as_data_frame)
})
)
# Try to get the cdm name if not supplied
if (is.null(cdm_name) && ("cdm_source" %in% names(cdm))) {
cdm_source <- cdm$cdm_source %>%
head() %>%
dplyr::collect() %>%
dplyr::rename_all(tolower)
cdm_name <- dplyr::coalesce(cdm_source$cdm_source_name[1],
cdm_source$cdm_source_abbreviation[1])
}
if (is.null(cdm_name)) {
rlang::abort("cdm_name must be supplied!")
}
names(cdm) <- tolower(cdm_tables)
# Try to get the cdm name if not supplied
if (is.null(cdm_name) &&
!is.null(names(cdm)) &&
("cdm_source" %in% names(cdm))) {
cdm_source <- cdm[["cdm_source"]] %>%
head() %>%
dplyr::collect() %>%
dplyr::rename_all(tolower)
cdm_name <- dplyr::coalesce(cdm_source$cdm_source_name[1],
cdm_source$cdm_source_abbreviation[1])
}
if (is.null(cdm_name)) {
rlang::abort("cdm_name must be supplied!")
}
class(cdm) <- "cdm_reference"
attr(cdm, "cdm_schema") <- NULL
attr(cdm, "write_schema") <- NULL
attr(cdm, "dbcon") <- NULL
attr(cdm, "cdm_version") <- cdm_version
attr(cdm, "cdm_name") <- cdm_name
return(cdm)
}
#' @rdname cdm_from_files
#' @export
cdmFromFiles <- function(path,
format = "auto",
cdmVersion = "5.3",
cdmName = NULL,
asDataFrame = TRUE) {
cdm_from_files(path = path,
format = format,
cdm_version = cdmVersion,
cdm_name = cdmName,
as_data_frame = asDataFrame)
}
#' Extract CDM metadata
#'
#' Extract the name, version, and selected record counts from a cdm.
#'
#' @param cdm A cdm object
#'
#' @return A named list of attributes about the cdm including selected fields
#' from the cdm_source table and record counts from the person and
#' observation_period tables
#' @export
#'
#' @examples
#' \dontrun{
#' library(CDMConnector)
#' con <- DBI::dbConnect(duckdb::duckdb(), eunomia_dir())
#' cdm <- cdm_from_con(con, "main")
#' snapshot(cdm)
#'
#' DBI::dbDisconnect(con, shutdown = TRUE)
#' }
snapshot <- function(cdm) {
assert_tables(cdm, tables = c("cdm_source", "vocabulary"), empty.ok = TRUE)
assert_tables(cdm, tables = c("person", "observation_period"))
person_count <- dplyr::tally(cdm$person, name = "n") %>% dplyr::pull(.data$n)
observation_period_count <- dplyr::tally(cdm$observation_period, name = "n") %>%
dplyr::pull(.data$n)
observation_period_range <- cdm$observation_period %>%
dplyr::summarise(
max = max(.data$observation_period_end_date, na.rm = TRUE),
min = min(.data$observation_period_start_date, na.rm = TRUE)
) %>%
dplyr::collect()
snapshot_date <- as.character(format(Sys.Date(), "%Y-%m-%d"))
vocab_version <-
cdm$vocabulary %>%
dplyr::filter(.data$vocabulary_id == "None") %>%
dplyr::pull(.data$vocabulary_version)
if (length(vocab_version) == 0) {
vocab_version <- NA_character_
}
cdm_source_name <- cdm$cdm_source %>% dplyr::pull(.data$cdm_source_name)
cdm_source <- dplyr::collect(cdm$cdm_source)
if (nrow(cdm_source) == 0) {
cdm_source <- dplyr::tibble(
vocabulary_version = vocab_version,
cdm_source_name = "",
cdm_holder = "",
cdm_release_date = "",
cdm_version = attr(cdm, "cdm_version"),
source_description = "",
source_documentation_reference = ""
)
}
cdm_source %>%
dplyr::mutate(
cdm_name = dplyr::coalesce(attr(cdm, "cdm_name"), as.character(NA)),
vocabulary_version = dplyr::coalesce(
.env$vocab_version, .data$vocabulary_version
),
person_count = .env$person_count,
observation_period_count = .env$observation_period_count,
earliest_observation_period_start_date =
.env$observation_period_range$min,
latest_observation_period_end_date = .env$observation_period_range$max,
snapshot_date = .env$snapshot_date
) %>%
dplyr::select(
"cdm_name",
"cdm_source_name",
"cdm_description" = "source_description",
"cdm_documentation_reference" = "source_documentation_reference",
"cdm_version",
"cdm_holder",
"cdm_release_date",
"vocabulary_version",
"person_count",
"observation_period_count",
"earliest_observation_period_start_date",
"latest_observation_period_end_date",
"snapshot_date"
) %>%
dplyr::mutate_all(as.character)
}
#' Disconnect the connection of the cdm object
#'
#' @param cdm cdm reference
#'
#' @export
cdmDisconnect <- function(cdm) {
if (!("cdm_reference" %in% class(cdm))) {
cli::cli_abort("cdm should be a cdm_reference")
}
DBI::dbDisconnect(cdmCon(cdm), shutdown = TRUE)
}
#' @rdname cdmDisconnect
#' @export
cdm_disconnect <- cdmDisconnect
#' Select a subset of tables in a cdm reference object
#'
#' This function uses syntax similar to `dplyr::select` and can be used to
#' subset a cdm reference object to a specific tables
#'
#' @param cdm A cdm reference object created by `cdm_from_con`
#' @param ... One or more table names of the tables of the `cdm` object.
#' `tidyselect` is supported, see `dplyr::select()` for details on the semantics.
#'
#' @return A cdm reference object containing the selected tables
#' @export
#'
#' @examples
#' \dontrun{
#' con <- DBI::dbConnect(duckdb::duckdb(), dbdir = eunomia_dir())
#'
#' cdm <- cdm_from_con(con, "main")
#'
#' cdm_select_tbl(cdm, person)
#' cdm_select_tbl(cdm, person, observation_period)
#' cdm_select_tbl(cdm, tbl_group("vocab"))
#' cdm_select_tbl(cdm, "person")
#'
#' DBI::dbDisconnect(con)
#' }
cdm_select_tbl <- function(cdm, ...) {
tables <- names(cdm) %>% rlang::set_names(names(cdm))
selected <- names(tidyselect::eval_select(rlang::quo(c(...)), data = tables))
if (length(selected) == 0) {
rlang::abort("No tables selected!")
}
tables_to_drop <- dplyr::setdiff(tables, selected)
for (i in tables_to_drop) {
cdm[i] <- NULL
}
cdm
}
#' Get cdm write schema
#'
#' @param cdm A cdm reference object created by `cdm_from_con`
#'
#' @return The database write schema
#' @export
#'
#' @examples
#' \dontrun{
#' con <- DBI::dbConnect(duckdb::duckdb(), dbdir = eunomia_dir())
#'
#' cdm <- cdm_from_con(con = con, cdm_name = "Eunomia",
#' cdm_schema = "main", write_schema = "main")
#'
#' cdmWriteSchema(cdm)
#'
#' DBI::dbDisconnect(con)
#' }
cdmWriteSchema <- function(cdm) {
attr(attr(cdm, "cdm_source"), "write_schema")
}
#' Get underlying database connection
#'
#' @param cdm A cdm reference object created by `cdm_from_con`
#'
#' @return A reference to the database containing tables in the cdm reference
#' @export
#'
#' @examples
#' \dontrun{
#' con <- DBI::dbConnect(duckdb::duckdb(), dbdir = eunomia_dir())
#'
#' cdm <- cdm_from_con(con = con, cdm_name = "Eunomia",
#' cdm_schema = "main", write_schema = "main")
#'
#' cdmCon(cdm)
#'
#' DBI::dbDisconnect(con)
#' }
cdmCon <- function(cdm) {
attr(attr(cdm, "cdm_source"), "dbcon")
}
|
/scratch/gouwar.j/cran-all/cranData/CDMConnector/R/cdm.R
|
# Copyright 2022 DARWIN EU®
#
# This file is part of DrugUtilisation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# internal function to add a filter query to all tables in a cdm
# person_subset should be a tbl_sql reference to a
# table in the database with one column "person_id".
# There should be no duplicated rows in this table.
# The person_subset table should be a temporary table in the database.
# These requirements are not checked but assumed to be true.
cdm_sample_person <- function(cdm, person_subset) {
checkmate::assert_class(cdm, "cdm_reference")
checkmate::assert_class(person_subset, "tbl_sql")
for (nm in names(cdm)) {
if ("person_id" %in% colnames(cdm[[nm]])) {
cdm[[nm]] <- dplyr::inner_join(cdm[[nm]], person_subset, by = "person_id")
} else if ("subject_id" %in% colnames(cdm[[nm]])) {
cdm[[nm]] <- dplyr::inner_join(cdm[[nm]], person_subset, by = c("subject_id" = "person_id"))
}
}
return(cdm)
}
#' Subset a cdm to the individuals in one or more cohorts
#'
#' `cdmSubset` will return a new cdm object that contains lazy queries pointing
#' to each of the cdm tables but subset to individuals in a generated cohort.
#' Since the cdm tables are lazy queries, the subset operation will only be
#' done when the tables are used. `computeQuery` can be used to run the SQL
#' used to subset a cdm table and store it as a new table in the database.
#'
#' `r lifecycle::badge("experimental")`
#'
#' @param cdm A cdm_reference object
#' @param cohortTable,cohort_table The name of a cohort table in the cdm reference
#' @param cohortId,cohort_id IDs of the cohorts that we want to subset from the cohort
#' table. If NULL (default) all cohorts in cohort table are considered.
#' @param verbose Should subset messages be printed? TRUE or FALSE (default)
#'
#' @return A modified cdm_reference with all clinical tables subset
#' to just the persons in the selected cohorts.
#'
#' @export
#'
#' @examples
#' \dontrun{
#' library(CDMConnector)
#' library(dplyr, warn.conflicts = FALSE)
#'
#' con <- DBI::dbConnect(duckdb::duckdb(), eunomia_dir())
#'
#' cdm <- cdm_from_con(con, cdm_schema = "main", write_schema = "main")
#'
#' # generate a cohort
#' path <- system.file("cohorts2", mustWork = TRUE, package = "CDMConnector")
#'
#' cohortSet <- readCohortSet(path) %>%
#' filter(cohort_name == "GIBleed_male")
#'
#' # subset cdm to persons in the generated cohort
#' cdm <- generateCohortSet(cdm, cohortSet = cohortSet, name = "gibleed")
#'
#' cdmGiBleed <- cdmSubsetCohort(cdm, cohortTable = "gibleed")
#'
#' cdmGiBleed$person %>%
#' tally()
#' #> # Source: SQL [1 x 1]
#' #> # Database: DuckDB 0.6.1
#' #> n
#' #> <dbl>
#' #> 1 237
#'
#' cdm$person %>%
#' tally()
#' #> # Source: SQL [1 x 1]
#' #> # Database: DuckDB 0.6.1
#' #> n
#' #> <dbl>
#' #> 1 2694
#'
#'
#' DBI::dbDisconnect(con, shutdown = TRUE)
#' }
cdmSubsetCohort <- function(cdm,
cohortTable = "cohort",
cohortId = NULL,
verbose = FALSE) {
checkmate::assertClass(cdm, "cdm_reference")
checkmate::assertCharacter(cohortTable, len = 1)
checkmate::assertTRUE(cohortTable %in% names(cdm))
checkmate::assertClass(cdm[[cohortTable]], "cohort_table")
checkmate::assertIntegerish(cohortId, min.len = 1, null.ok = TRUE)
checkmate::assertLogical(verbose, len = 1)
cohort_colnames <- colnames(cdm[[cohortTable]])
if (!("subject_id" %in% cohort_colnames)) {
rlang::abort(glue::glue("subject_id column is not in cdm[['{cohortTable}']] table!"))
}
if (!("cohort_definition_id" %in% cohort_colnames)) {
rlang::abort(glue::glue("cohort_definition_id column is not in cdm[['{cohortTable}']] table!"))
}
subjects <- cdm[[cohortTable]]
if (!is.null(cohortId)) {
subjects <- subjects %>%
dplyr::filter(.data$cohort_definition_id %in% .env$cohortId)
}
n_subjects <- subjects %>%
dplyr::distinct(.data$subject_id) %>%
dplyr::tally() %>%
dplyr::pull("n")
if (n_subjects == 0 && verbose) {
rlang::inform("Selected cohorts are empty. No subsetting will be done.")
return(cdm)
} else if (verbose) {
rlang::inform(glue::glue("Subsetting cdm to {n_subjects} persons"))
}
n_subjects_person_table <- subjects %>%
dplyr::inner_join(cdm$person, by = c("subject_id" = "person_id")) %>%
dplyr::distinct(.data$subject_id) %>%
dplyr::tally() %>%
dplyr::pull("n")
if (n_subjects != n_subjects_person_table) {
rlang::warn(glue::glue(
"Not all cohort subjects are present in person table.
- N cohort subjects: {n_subjects}
- N cohort subjects in cdm person table: {n_subjects_person_table}"))
}
prefix <- unique_prefix()
person_subset <- subjects %>%
dplyr::select(person_id = "subject_id") %>%
dplyr::distinct() %>%
dplyr::compute(name = glue::glue("person_sample{prefix}_"), temporary = FALSE)
cdm_sample_person(cdm, person_subset)
}
#' @rdname cdmSubsetCohort
#' @export
cdm_subset_cohort <- function(cdm,
cohort_table = "cohort",
cohort_id = NULL,
verbose = FALSE) {
cdmSubsetCohort(cdm = cdm,
cohortTable = cohort_table,
cohortId = cohort_id,
verbose = verbose)
}
#' Subset a cdm object to a random sample of individuals
#'
#' `cdmSample` takes a cdm object and returns a new cdm that includes only a
#' random sample of persons in the cdm. Only `person_id`s in both the person
#' table and observation_period table will be considered.
#'
#' `r lifecycle::badge("experimental")`
#'
#' @param cdm A cdm_reference object
#' @param n Number of persons to include in the cdm
#'
#' @return A modified cdm_reference object where all clinical tables are lazy
#' queries pointing to subset
#'
#' @export
#'
#' @examples
#' \dontrun{
#' library(CDMConnector)
#' library(dplyr, warn.conflicts = FALSE)
#'
#' con <- DBI::dbConnect(duckdb::duckdb(), eunomia_dir())
#'
#' cdm <- cdm_from_con(con, cdm_schema = "main")
#'
#' cdmSampled <- cdmSample(cdm, n = 2)
#'
#' cdmSampled$person %>%
#' select(person_id)
#' #> # Source: SQL [2 x 1]
#' #> # Database: DuckDB 0.6.1
#' #> person_id
#' #> <dbl>
#' #> 1 155
#' #> 2 3422
#'
#' DBI::dbDisconnect(con, shutdown = TRUE)
#' }
cdmSample <- function(cdm, n) {
checkmate::assertClass(cdm, "cdm_reference")
checkmate::assertIntegerish(n, len = 1, lower = 1, upper = 1e9, null.ok = FALSE)
assert_tables(cdm, "person")
# Note temporary = TRUE in dbWriteTable does not work on all dbms but we want a temp table here.
person_subset <- cdm[["person"]] %>%
dplyr::select("person_id") %>%
dplyr::distinct() %>%
dplyr::slice_sample(n = n) %>%
dplyr::rename_all(tolower) %>%
dplyr::compute()
cdm_sample_person(cdm, person_subset)
}
#' @rdname cdmSample
#' @export
cdm_sample <- cdmSample
#' Subset a cdm object to a set of persons
#'
#' `cdmSubset` takes a cdm object and a list of person IDs as input. It
#' returns a new cdm that includes data only for persons matching the provided
#' person IDs. Generated cohorts in the cdm will also be subset to
#' the IDs provided.
#'
#' `r lifecycle::badge("experimental")`
#'
#' @param cdm A cdm_reference object
#' @param person_id,personId A numeric vector of person IDs to include in the cdm
#'
#' @return A modified cdm_reference object where all clinical tables are lazy
#' queries pointing to subset
#'
#' @export
#'
#' @examples
#' \dontrun{
#' library(CDMConnector)
#' library(dplyr, warn.conflicts = FALSE)
#'
#' con <- DBI::dbConnect(duckdb::duckdb(), eunomia_dir())
#'
#' cdm <- cdm_from_con(con, cdm_schema = "main")
#'
#' cdm2 <- cdmSubset(cdm, personId = c(2, 18, 42))
#'
#' cdm2$person %>%
#' select(1:3)
#' #> # Source: SQL [3 x 3]
#' #> # Database: DuckDB 0.6.1
#' #> person_id gender_concept_id year_of_birth
#' #> <dbl> <dbl> <dbl>
#' #> 1 2 8532 1920
#' #> 2 18 8532 1965
#' #> 3 42 8532 1909
#'
#' DBI::dbDisconnect(con, shutdown = TRUE)
#' }
cdmSubset <- function(cdm, personId) {
checkmate::assertClass(cdm, "cdm_reference")
checkmate::assertIntegerish(personId,
min.len = 1,
max.len = 1e6,
null.ok = FALSE)
writeSchema <- cdmWriteSchema(cdm)
if (is.null(writeSchema)) rlang::abort("write_schema is required for subsetting a cdm!")
assertWriteSchema(cdm)
con <- cdmCon(cdm)
prefix <- unique_prefix()
DBI::dbWriteTable(con,
name = inSchema(writeSchema, glue::glue("temp{prefix}_"), dbms(con)),
value = data.frame(person_id = as.integer(personId)),
overwrite = TRUE)
# Note temporary = TRUE in dbWriteTable does not work on all dbms but we want a temp table here.
person_subset <- dplyr::tbl(con, inSchema(writeSchema, glue::glue("temp{prefix}_"), dbms(con))) %>%
dplyr::rename_all(tolower) %>% # just in case
compute(name = glue::glue("person_subset_{prefix}"), temporary = TRUE)
DBI::dbRemoveTable(con, inSchema(writeSchema, glue::glue("temp{prefix}_"), dbms(con)))
cdm_sample_person(cdm, person_subset)
}
#' @rdname cdmSubset
#' @export
cdm_subset <- function(cdm, person_id){
cdmSubset(cdm = cdm, personId = person_id)
}
#' Flatten a cdm into a single observation table
#'
#' This experimental function transforms the OMOP CDM into a single observation
#' table. This is only recommended for use with a filtered CDM or a cdm that is
#' small in size.
#'
#' `r lifecycle::badge("experimental")`
#'
#' @param cdm A cdm_reference object
#' @param domain Domains to include. Must be a subset of "condition", "drug",
#' "procedure", "measurement", "visit", "death", "observation".
#' @param include_concept_name,includeConceptName Should concept_name and type_concept_name be
#' include in the output table? TRUE (default) or FALSE
#'
#' @return A lazy query that when evaluated will result in a single cdm table
#' @export
#'
#' @examples
#' \dontrun{
#' library(CDMConnector)
#' library(dplyr, warn.conflicts = FALSE)
#'
#' con <- DBI::dbConnect(duckdb::duckdb(), eunomia_dir())
#'
#' cdm <- cdm_from_con(con, cdm_schema = "main")
#'
#' all_observations <- cdmSubset(cdm, personId = c(2, 18, 42)) %>%
#' cdmFlatten() %>%
#' collect()
#'
#' all_observations
#' #> # A tibble: 213 × 8
#' #> person_id observation_. start_date end_date type_. domain obser. type_.
#' #> <dbl> <dbl> <date> <date> <dbl> <chr> <chr> <chr>
#' #> 1 2 40213201 1986-09-09 1986-09-09 5.81e5 drug pneumo <NA>
#' #> 2 18 4116491 1997-11-09 1998-01-09 3.20e4 condi Escher <NA>
#' #> 3 18 40213227 2017-01-04 2017-01-04 5.81e5 drug tetanu <NA>
#' #> 4 42 4156265 1974-06-13 1974-06-27 3.20e4 condi Facial <NA>
#' #> 5 18 40213160 1966-02-23 1966-02-23 5.81e5 drug poliov <NA>
#' #> 6 42 4198190 1933-10-29 1933-10-29 3.80e7 proce Append <NA>
#' #> 7 2 4109685 1952-07-13 1952-07-27 3.20e4 condi Lacera <NA>
#' #> 8 18 40213260 2017-01-04 2017-01-04 5.81e5 drug zoster <NA>
#' #> 9 42 4151422 1985-02-03 1985-02-03 3.80e7 proce Sputum <NA>
#' #> 10 2 4163872 1993-03-29 1993-03-29 3.80e7 proce Plain <NA>
#' #> # ... with 203 more rows, and abbreviated variable names observation_concept_id,
#' #> # type_concept_id, observation_concept_name, type_concept_name
#'
#' DBI::dbDisconnect(con, shutdown = TRUE)
#' }
cdmFlatten <- function(cdm,
domain = c("condition", "drug", "procedure"),
includeConceptName = TRUE) {
checkmate::assertClass(cdm, "cdm_reference")
checkmate::assertCharacter(domain, min.len = 1)
checkmate::assertSubset(domain, choices = c("condition",
"drug",
"procedure",
"measurement",
"visit",
"death",
"observation"))
checkmate::assertLogical(includeConceptName, len = 1)
queryList <- list()
if ("condition" %in% domain) {
assert_tables(cdm, "condition_occurrence")
queryList[["condition"]] <- cdm$condition_occurrence %>%
dplyr::transmute(
person_id = .data$person_id,
observation_concept_id = .data$condition_concept_id,
start_date = .data$condition_start_date,
end_date = .data$condition_end_date,
type_concept_id = .data$condition_type_concept_id) %>%
dplyr::distinct() %>%
dplyr::mutate(domain = "condition")
}
if ("drug" %in% domain) {
assert_tables(cdm, "drug_exposure")
queryList[["drug"]] <- cdm$drug_exposure %>%
dplyr::transmute(
person_id = .data$person_id,
observation_concept_id = .data$drug_concept_id,
start_date = .data$drug_exposure_start_date,
end_date = .data$drug_exposure_end_date,
type_concept_id = .data$drug_type_concept_id) %>%
dplyr::distinct() %>%
dplyr::mutate(domain = "drug")
}
if ("procedure" %in% domain) {
assert_tables(cdm, "procedure_occurrence")
queryList[["procedure"]] <- cdm$procedure_occurrence %>%
dplyr::transmute(
person_id = .data$person_id,
observation_concept_id = .data$procedure_concept_id,
start_date = .data$procedure_date,
end_date = .data$procedure_date,
type_concept_id = .data$procedure_type_concept_id) %>%
dplyr::distinct() %>%
dplyr::mutate(domain = "procedure")
}
if ("measurement" %in% domain) {
assert_tables(cdm, "measurement")
queryList[["measurement"]] <- cdm$measurement %>%
dplyr::transmute(
person_id = .data$person_id,
observation_concept_id = .data$measurement_concept_id,
start_date = .data$measurement_date,
end_date = .data$measurement_date,
type_concept_id = .data$measurement_type_concept_id) %>%
dplyr::distinct() %>%
dplyr:: mutate(domain = "measurement")
}
if ("visit" %in% domain) {
assert_tables(cdm, "visit")
queryList[["visit"]] <- cdm$visit_occurrence %>%
dplyr::transmute(
person_id = .data$person_id,
observation_concept_id = .data$visit_concept_id,
start_date = .data$visit_start_date,
end_date = .data$visit_end_date,
type_concept_id = .data$visit_type_concept_id) %>%
dplyr::distinct() %>%
dplyr::mutate(domain = "visit")
}
if ("death" %in% domain) {
assert_tables(cdm, "death")
queryList[["death"]] <- cdm$death %>%
dplyr::transmute(
person_id = .data$person_id,
observation_concept_id = .data$cause_concept_id,
start_date = .data$death_date,
end_date = .data$death_date,
type_concept_id = .data$death_type_concept_id) %>%
dplyr::distinct() %>%
dplyr::mutate(domain = "death")
}
if ("observation" %in% domain) {
assert_tables(cdm, "observation")
queryList[["death"]] <- cdm$observation %>%
dplyr::transmute(
person_id = .data$person_id,
observation_concept_id = .data$observation_concept_id,
start_date = .data$observation_date,
end_date = .data$observation_date,
type_concept_id = .data$observation_type_concept_id) %>%
dplyr::distinct() %>%
dplyr::mutate(domain = "observation")
}
if (includeConceptName) {
assert_tables(cdm, "concept")
out <- queryList %>%
purrr::reduce(dplyr::union) %>%
dplyr::left_join(dplyr::transmute(cdm$concept,
observation_concept_id = .data$concept_id,
observation_concept_name = .data$concept_name),
by = "observation_concept_id") %>%
dplyr::left_join(dplyr::transmute(cdm$concept,
type_concept_id = .data$concept_id,
type_concept_name = .data$concept_name),
by = "type_concept_id") %>%
dplyr::ungroup() %>%
dplyr::distinct()
} else {
out <- purrr::reduce(queryList, dplyr::union) %>%
dplyr::ungroup() %>%
dplyr::distinct()
}
# collect?
return(out)
}
#' @rdname cdmFlatten
#' @export
cdm_flatten <- function(cdm,
domain = c("condition", "drug", "procedure"),
include_concept_name = TRUE){
cdmFlatten(cdm = cdm,
domain = domain,
includeConceptName = include_concept_name)
}
|
/scratch/gouwar.j/cran-all/cranData/CDMConnector/R/cdmSubset.R
|
#' Create a CDM object from a pre-defined set of environment variables
#'
#' This function is intended to be used with the Darwin execution engine. The execution engine
#' runs OHDSI studies in a pre-defined runtime environment and makes several environment
#' variables available for connecting to a CDM database. Programmer writing code to run
#' on the execution engine and simply use `cdm <- cdm_from_environment()` to create a cdm reference
#' object to use for their analysis and the database connection and cdm object should be
#' automatically created. This obviates the need for site specific code for connecting
#' to the database and creating the cdm reference object.
#'
#' The environment variables used by this function and provided by the execution engine are listed below.
#'
#' \itemize{
#' \item{DBMS_TYPE: one of "postgresql", "sql server", "redshift", "duckdb", "snowflake".}
#' \item{DATA_SOURCE_NAME: a free text name for the CDM given by the person running the study.}
#' \item{CDM_VERSION: one of "5.3", "5.4".}
#' \item{DBMS_CATALOG: The database catalog. Important primarily for compound schema names used in SQL Server and Snowflake.}
#' \item{DBMS_SERVER: The database server URL.}
#' \item{DBMS_NAME: The database name used for creating the connection.}
#' \item{DBMS_PORT: The database port number.}
#' \item{DBMS_USERNAME: The database username needed to authenticate.}
#' \item{DBMS_PASSWORD: The database password needed to authenticate.}
#' \item{CDM_SCHEMA: The schema name where the OMOP CDM is located in the database.}
#' \item{WRITE_SCHEMA: The shema where the user has write access and tables will be created during study execution.}
#' }
#'
#' @param write_prefix (string) An optional prefix to use for all tables written to the CDM.
#'
#' @return A cdm_reference object
#' @export
#'
#' @examples
#' \dontrun{
#'
#' library(CDMConnector)
#'
#' # This will only work in an evironment where the proper variables are present.
#' cdm <- cdm_from_environment()
#'
#' # Proceed with analysis using the cdm object.
#'
#' # Close the database connection when done.
#' cdm_disconnect(cdm)
#' }
cdm_from_environment <- function(write_prefix = "") {
vars <- c("DBMS_TYPE",
"DATA_SOURCE_NAME",
"CDM_VERSION",
"DBMS_CATALOG",
"DBMS_SERVER",
"DBMS_NAME",
"DBMS_PORT",
"DBMS_USERNAME",
"DBMS_PASSWORD",
"CDM_SCHEMA",
"WRITE_SCHEMA")
supported_db <- c("postgresql", "sql server", "redshift", "duckdb", "snowflake")
if (!(Sys.getenv("DBMS_TYPE") %in% supported_db)) {
cli::cli_abort("The environment variable DBMS_TYPE must be on one of {paste(supported_db, collapse = ', ')} not `{Sys.getenv('DBMS_TYPE')}`.")
}
if (Sys.getenv("DBMS_TYPE") == "duckdb") {
db <- Sys.getenv("DBMS_NAME")
if (db == "") {
db <- "GiBleed"
}
checkmate::assert_choice(db, CDMConnector::example_datasets())
con <- DBI::dbConnect(duckdb::duckdb(), CDMConnector::eunomia_dir(db))
cdm <- CDMConnector::cdm_from_con(con, "main", "main", cdm_version = "5.3", cdm_name = db)
return(cdm)
}
# "DBMS_CATALOG" is not required
for (v in vars) {
if (Sys.getenv(v) == "" && v != "DBMS_CATALOG") {
cli::cli_abort("Environment variable {v} is required but not set!")
}
}
stringr::str_count(Sys.getenv("CDM_SCHEMA"), "\\.")
if (Sys.getenv("DBMS_TYPE") %in% c("postgresql", "redshift")) {
drv <- switch (Sys.getenv("DBMS_TYPE"),
"postgresql" = RPostgres::Postgres(),
"redshift" = RPostgres::Redshift()
)
con <- DBI::dbConnect(drv = drv,
dbname = Sys.getenv("DBMS_NAME"),
host = Sys.getenv("DBMS_SERVER"),
user = Sys.getenv("DBMS_USERNAME"),
password = Sys.getenv("DBMS_PASSWORD"),
port = Sys.getenv("DBMS_PORT"))
if (!DBI::dbIsValid(con)) {
cli::cli_abort("Database connection failed!")
}
} else if (Sys.getenv("DBMS_TYPE") == "sql server") {
con <- DBI::dbConnect(odbc::odbc(),
Driver = "ODBC Driver 17 for SQL Server",
Server = Sys.getenv("DBMS_SERVER"),
Database = Sys.getenv("DBMS_NAME"),
UID = Sys.getenv("DBMS_USERNAME"),
PWD = Sys.getenv("DBMS_PASSWORD"),
TrustServerCertificate="yes",
Port = Sys.getenv("DBMS_PORT"))
if (!DBI::dbIsValid(con)) {
cli::cli_abort("Database connection failed!")
}
} else if (Sys.getenv("DBMS_TYPE") == "snowflake") {
con <- DBI::dbConnect(odbc::odbc(),
DRIVER = "SnowflakeDSIIDriver",
SERVER = Sys.getenv("DBMS_SERVER"),
DATABASE = Sys.getenv("DBMS_NAME"),
UID = Sys.getenv("DBMS_USERNAME"),
PWD = Sys.getenv("DBMS_PASSWORD"),
WAREHOUSE = "COMPUTE_WH_XS")
if (!DBI::dbIsValid(con)) {
cli::cli_abort("Database connection failed!")
}
} else {
cli::cli_abort("{Sys.getenv('DBMS_TYPE')} is not a supported database type!")
}
# split schemas. If write schema has a dot we need to interpret it as catalog.schema
# cdm schema should not have a dot
if (stringr::str_detect(Sys.getenv("WRITE_SCHEMA"), "\\.")) {
write_schema <- stringr::str_split(Sys.getenv("WRITE_SCHEMA"), "\\.")[[1]]
if (length(write_schema) != 2) {
cli::cli_abort("write_schema can have at most one period (.)!")
}
stopifnot(nchar(write_schema[1]) > 0, nchar(write_schema[2]) > 0)
write_schema <- c(catalog = write_schema[1], schema = write_schema[2])
} else {
write_schema <- c(schema = Sys.getenv("WRITE_SCHEMA"))
}
if (write_prefix != "") {
if (Sys.getenv("DBMS_TYPE") != "snowflake") {
write_schema <- c(write_schema, prefix = write_prefix)
}
}
if (stringr::str_detect(Sys.getenv("CDM_SCHEMA"), "\\.")) {
cli::cli_abort("CDM_SCHEMA cannot contain a period (.)! Use DBMS_CATALOG to add a catalog.")
}
if (Sys.getenv("DBMS_CATALOG") != "") {
cdm_schema <- c(catalog = Sys.getenv("DBMS_CATALOG"), schema = Sys.getenv("CDM_SCHEMA"))
} else {
cdm_schema <- Sys.getenv("CDM_SCHEMA")
}
cdm <- CDMConnector::cdm_from_con(
con = con,
cdm_schema = cdm_schema,
write_schema = write_schema,
cdm_version = Sys.getenv("CDM_VERSION"),
cdm_name = Sys.getenv("DATA_SOURCE_NAME"))
if (length(names(cdm)) == 0) {
cli::cli_abort("CDM object creation failed!")
}
return(cdm)
}
|
/scratch/gouwar.j/cran-all/cranData/CDMConnector/R/cdm_from_environment.R
|
# Internal function to remove overlapping periods in cohorts
# This is used as a helper inside other cohort manipulation functions
# @param x A cohort table (dataframe, tbl_dbi, arrow table...) that may have overlapping periods
# @return A dplyr query that collapses any overlapping periods. This is very similar to union.
cohort_collapse <- function(x) {
checkmate::assert_true(methods::is(x, "tbl_dbi"))
checkmate::assert_subset(c("cohort_definition_id", "subject_id", "cohort_start_date", "cohort_end_date"), colnames(x))
checkmate::assertTRUE(DBI::dbIsValid(x$src$con))
# note this assumes all columns are fully populated and cohort_end_date >= cohort_start_date
# TODO do we need to confirm this assumption?
con <- x$src$con
min_start_sql <- dbplyr::sql(glue::glue('min({DBI::dbQuoteIdentifier(con, "cohort_start_date")})'))
max_end_sql <- dbplyr::sql(glue::glue('max({DBI::dbQuoteIdentifier(con, "cohort_end_date")})'))
x %>%
dplyr::group_by(.data$cohort_definition_id, .data$subject_id, .add = FALSE) %>%
dbplyr::window_order(.data$cohort_start_date, .data$cohort_end_date) %>%
dplyr::mutate(
prev_start = dplyr::coalesce(
dbplyr::win_over(
min_start_sql,
partition = c("cohort_definition_id", "subject_id"),
frame = c(-Inf, -1),
order = "cohort_start_date",
con = con),
.data$cohort_start_date),
prev_end = dplyr::coalesce(
dbplyr::win_over(
max_end_sql,
partition = c("cohort_definition_id", "subject_id"),
frame = c(-Inf, -1),
order = "cohort_start_date",
con = con),
.data$cohort_end_date)
) %>%
dplyr::mutate(groups = cumsum(
dplyr::case_when(
.data$prev_start <= .data$cohort_start_date && .data$cohort_start_date <= .data$prev_end ~ 0L,
TRUE ~ 1L)
)) %>%
dplyr::group_by(.data$cohort_definition_id, .data$subject_id, .data$groups, .add = FALSE) %>%
dplyr::summarize(cohort_start_date = min(.data$cohort_start_date, na.rm = TRUE),
cohort_end_date = max(.data$cohort_end_date, na.rm = TRUE),
.groups = "drop") %>%
dplyr::select("cohort_definition_id", "subject_id", "cohort_start_date", "cohort_end_date")
}
#' Union all cohorts in a cohort set with cohorts in a second cohort set
#'
#' @param x A tbl reference to a cohort table with one or more generated cohorts
#' @param y A tbl reference to a cohort table with one generated cohort
#'
#' @return A lazy query that when executed will resolve to a new cohort table with
#' one the same cohort_definitions_ids in x resulting from the union of all cohorts
#' in x with the single cohort in y cohort table
#' @export
cohort_union <- function(x, y) {
checkmate::assert_class(x, "tbl")
checkmate::assert_class(y, "tbl")
checkmate::assert_subset(c("cohort_definition_id", "subject_id", "cohort_start_date", "cohort_end_date"), colnames(x))
checkmate::assert_subset(c("cohort_definition_id", "subject_id", "cohort_start_date", "cohort_end_date"), colnames(y))
cohort_id <- y %>%
dplyr::distinct(.data$cohort_definition_id) %>%
dplyr::pull(1)
if (length(cohort_id) != 1) {
rlang::abort("cohort table y can only contain one cohort when performing an union!")
}
y %>%
dplyr::distinct(.data$subject_id, .data$cohort_start_date, .data$cohort_end_date) %>%
dplyr::cross_join(dplyr::distinct(x, .data$cohort_definition_id)) %>%
dplyr::select("cohort_definition_id", "subject_id", "cohort_start_date", "cohort_end_date") %>%
dplyr::union_all(x) %>%
cohort_collapse()
}
#' @rdname cohort_union
#' @export
cohortUnion <- cohort_union
# Intersect all cohorts in a single cohort table
#
# @param x A tbl reference to a cohort table with one or more cohorts
# @param y A tbl reference to a cohort table with one cohort
#
# @return A lazy query that when executed will resolve to a new cohort table with
# one cohort_definition_id resulting from the intersection of all cohorts x with the cohort in y
# @export
#
#
# TODO rewrite cohort_intersect and add tests
# cohort_intersect <- function(x, y) {
# checkmate::assert_class(x, "tbl")
# checkmate::assert_class(y, "tbl")
# checkmate::assert_subset(c("cohort_definition_id", "subject_id", "cohort_start_date", "cohort_end_date"), colnames(x))
# checkmate::assert_subset(c("cohort_definition_id", "subject_id", "cohort_start_date", "cohort_end_date"), colnames(y))
#
# # collapse cohorts just
# x <- cohort_collapse(x) %>%
# compute(temporary = TRUE)
#
# y <- y %>%
# dplyr::mutate(cohort_definition_id = -1) %>%
# cohort_collapse() %>%
# dplyr::select(-"cohort_definition_id") %>%
# computeQuery(temporary = TRUE)
#
#
# # collapse cohort table y into a single cohort
# # for each interval in y, create a record for each cohort id x
# x <- y %>%
# dplyr::cross_join(dplyr::distinct(x, .data$cohort_definition_id)) %>%
# dplyr::select("cohort_definition_id", "subject_id", "cohort_start_date", "cohort_end_date") %>%
# dplyr::union_all(x)
#
# # create every possible interval
# candidate_intervals <- x %>%
# dplyr::select("cohort_definition_id", "subject_id", cohort_date = "cohort_start_date") %>%
# dplyr::union_all(dplyr::select(x, "cohort_definition_id", "subject_id", cohort_date = "cohort_end_date")) %>%
# dplyr::group_by(.data$cohort_definition_id, .data$subject_id) %>%
# dplyr::mutate(cohort_date_seq = dplyr::row_number(.data$cohort_date)) %>%
# dplyr::mutate(candidate_start_date = .data$cohort_date,
# candidate_end_date = dplyr::lead(.data$cohort_date, order_by = c("cohort_date", "cohort_date_seq")))
#
# # get intervals that are contained within all of the cohorts
# x %>%
# dplyr::inner_join(candidate_intervals, by = "subject_id") %>%
# dplyr::filter(.data$candidate_start_date >= .data$cohort_start_date,
# .data$candidate_end_date <= .data$cohort_end_date) %>%
# dplyr::distinct(.data$cohort_definition_id,
# .data$subject_id,
# .data$candidate_start_date,
# .data$candidate_end_date) %>%
# dplyr::group_by(.data$subject_id,
# .data$candidate_start_date,
# .data$candidate_end_date) %>%
# dplyr::summarise(n_cohorts_interval_is_inside = dplyr::n(), .groups = "drop") %>%
# # only keep intervals that are inside all cohorts we want to intersect (i.e. all cohorts in the input cohort table)
# dplyr::filter(.data$n_cohorts_interval_is_inside == 2) %>%
# dplyr::mutate(cohort_definition_id = .env$id) %>%
# dplyr::select("cohort_definition_id",
# "subject_id",
# cohort_start_date = "candidate_start_date",
# cohort_end_date = "candidate_end_date") %>%
# cohort_collapse()
# }
# Keep only the earliest record for each person in a cohort
#
# @param x A generated cohort set
#
# @return A lazy query on a generated cohort set
cohort_first <- function(x) {
cols <- c("cohort_definition_id", "subject_id", "cohort_start_date", "cohort_end_date")
checkmate::assert_subset(colnames(x), cols)
x %>%
dplyr::group_by(.data$subject_id, .data$cohort_definition_id, .add = FALSE) %>%
dplyr::slice_min(.data$cohort_start_date, order_by = "cohort_start_date", n = 1, with_ties = FALSE) %>%
dplyr::ungroup()
}
# Keep only the latest record for each person in a cohort
#
# @param x A generated cohort set
#
# @return A lazy query on a generated cohort set
cohort_last <- function(x) {
cols <- c("cohort_definition_id", "subject_id", "cohort_start_date", "cohort_end_date")
checkmate::assert_subset(colnames(x), cols)
x %>%
dplyr::group_by(.data$subject_id, .data$cohort_definition_id, .add = FALSE) %>%
dplyr::slice_max(.data$cohort_start_date, order_by = "cohort_start_date", n = 1, with_ties = FALSE) %>%
dplyr::ungroup()
}
# Add or subtract days from the start or end of a cohort set
#
# @param x A generated cohort set table reference
# @param days The number of days to add. Can by any positive or negative integer
# @param from Reference date to add or subtract days to. "start" or "end" (default)
#
# @return A lazy tbl query on a the cohort table
cohort_pad_end <- function(x, days = NULL, from = "end") {
cols <- c("cohort_definition_id", "subject_id", "cohort_start_date", "cohort_end_date")
checkmate::assert_subset(colnames(x), cols)
checkmate::check_integerish(days, len = 1, null.ok = TRUE)
checkmate::check_choice(from, choices = c("start", "end"))
if (is.null(days)) {
return(x)
}
if (from == "start" && days < 0) {
rlang::abort("cohort_end_date cannot be before cohort_start_date!")
}
date_col <- paste0("cohort_", from, "_date")
x %>%
dplyr::ungroup() %>%
dplyr::mutate(cohort_end_date = CDMConnector::dateadd(date = date_col, number = days, interval = "day")) %>%
cohort_collapse() %>% # TODO what if end < start, remove row?
dplyr::filter(.data$cohort_start_date <= .data$cohort_end_date)
}
# Add or subtract days from the start or end of a cohort set
#
# @param x A generated cohort set table reference
# @param days The number of days to add. Can by any positive or negative integer
# @param from Reference date to add or subtract days to. "start" or "end" (default)
#
# @return A lazy tbl query on a the cohort table
cohort_pad_start <- function(x, days = NULL, from = "start") {
cols <- c("cohort_definition_id", "subject_id", "cohort_start_date", "cohort_end_date")
checkmate::assert_subset(colnames(x), cols)
checkmate::check_integerish(days, len = 1, null.ok = TRUE)
checkmate::check_choice(from, choices = c("start", "end"))
if (is.null(days)) {
return(x)
}
if (from == "end" && days > 0) {
rlang::abort("cohort_start_date cannot be after cohort_end_date!")
}
date_col <- paste0("cohort_", from, "_date")
x %>%
dplyr::mutate(cohort_start_date = CDMConnector::dateadd(date = date_col, number = days, interval = "day")) %>%
dplyr::ungroup() %>%
cohort_collapse() %>%
dplyr::filter(.data$cohort_start_date <= .data$cohort_end_date)
}
#' Collapse cohort records within a certain number of days
#'
#' @param x A generated cohort set
#' @param gap When two cohort records are 'gap' days apart or less the periods will be
#' collapsed into a single record
#'
#' @return A lazy query on a generated cohort set
#' @export
cohort_erafy <- function(x, gap) {
checkmate::assert_class(x, "tbl")
checkmate::assertIntegerish(gap, len = 1)
cols <- c("cohort_definition_id", "subject_id", "cohort_start_date", "cohort_end_date")
checkmate::assert_subset(colnames(x), cols)
checkmate::assertIntegerish(gap, len = 1)
x %>%
cohort_pad_end(gap, from = "end") %>%
cohort_collapse() %>%
cohort_pad_end(-gap, from = "end")
}
#' @rdname cohort_erafy
#' @export
cohortErafy <- cohort_erafy
# cohort_under_observation <- function(.data) {
# checkmate::assert_class(.data, "tbl")
# cols <- c("cohort_definition_id", "subject_id", "cohort_start_date", "cohort_end_date")
#
# cdm <- attr(tbl, "cdm_reference")
# checkmate::assert_class(.data, "cdm_reference")
# assertTables(cdm, "observation_period", empty.ok = FALSE)
#
# .data %>%
# dplyr::left_join(cdm$observation_period, by = c("cohort_id" = "person_id")) %>%
# dplyr::filter((.data$observation_period_start_date <= .data$cohort_start_date && .data$cohort_start_date <= .data$observation_period_end_date) ||
# (.data$observation_period_start_date <= .data$cohort_end_date && .data$cohort_end_date <= .data$observation_period_end_date)) %>%
# dplyr::mutate(cohort_start_date = ifelse(cohort_start_date < observation_period_start_date, observation_period_start_date, cohort_start_date),
# cohort_end_date = ifelse(observation_period_end_date < cohort_end_date, observation_period_end_date, cohort_end_date)) %>%
# cohort_collapse()
# }
# #' @rdname cohort_under_observation
# #' @export
# cohortUnderObservation <- cohort_under_observation
# cohort_setdiff <- function(x, y) {
# checkmate::assert_class(x, "tbl")
# checkmate::assert_class(y, "tbl")
# cols <- c("cohort_definition_id", "subject_id", "cohort_start_date", "cohort_end_date")
# checkmate::assert_true(all(cols %in% colnames(x)))
# checkmate::assert_true(all(cols %in% colnames(y)))
#
# # remove days in the second cohort table from the first cohort table
# x %>%
# dplyr::left_join(dplyr::distinct(dplyr::select(y, "subject_id", remove_start = "cohort_start_date", remove_end = "cohort_end_date")), by = "subject_id") %>%
# dplyr::mutate(
# cohort_start_date = dplyr::case_when(
# # cohort x is inside cohort y interval
# remove_start <= cohort_start_date && cohort_start_date <= remove_end &&
# remove_start <= cohort_end_date && cohort_end_date <= remove_end
# ~ NULL,
# # cohort x starts inside y and ends later than y
# remove_start <= cohort_start_date && cohort_start_date <= remove_end &&
# cohort_end_date > remove_end
# ~ !!dateadd("remove_end", 1L),
# # cohort x is entirely before cohort y
# cohort_start_date < remove_start && cohort_start_date <= remove_end && # start is inside remove interval
# remove_start <= cohort_end_date && cohort_end_date <= remove_end, # end is inside remove interval
# cohort_start_date <= observation_period_start_date && observation_period_start_date <= cohort_end_date ~ !!dateadd("cohort_end_date", 1)
# ),
# cohort_end_date = dplyr::case_when(
# observation_period_start_date < cohort_start_date || cohort_end_date < observation_period_start_date ~ observation_period_start_date,
# cohort_start_date <= observation_period_start_date && observation_period_start_date <= cohort_end_date ~ !!dateadd("cohort_end_date", 1)
# )
# ) %>%
# cohort_collapse()
# }
|
/scratch/gouwar.j/cran-all/cranData/CDMConnector/R/cohortTransformations.R
|
createCohortTables <- function(con, writeSchema, name, computeAttrition) {
checkmate::assertCharacter(writeSchema, min.len = 1, max.len = 3, any.missing = FALSE)
checkmate::assertCharacter(name, len = 1, any.missing = FALSE)
checkmate::assertTRUE(DBI::dbIsValid(con))
checkmate::assertLogical(computeAttrition, len = 1, any.missing = FALSE)
if(name != tolower(name)) {
rlang::abort("cohort table name must be lowercase!")
}
# oracle and snowflake use uppercase table names by default which causes
# issues when switching between ohdsi-sql (unquoted identifiers) and dbplyr sql (quoted identifiers)
# dbAppendTable does not work using bigrquery https://github.com/r-dbi/bigrquery/issues/539
# update v1.3 (Jan 2024) - oracle and bigquery not supported. Quote tables/columns on snowflake.
existingTables <- list_tables(con, writeSchema)
if (name %in% existingTables) {
DBI::dbRemoveTable(con, inSchema(writeSchema, name, dbms(con)))
}
DBI::dbCreateTable(con,
name = inSchema(writeSchema, name, dbms(con)),
fields = c(
cohort_definition_id = "INT",
subject_id = "BIGINT",
# subject_id = "INT",
cohort_start_date = "DATE",
cohort_end_date = "DATE"))
stopifnot(name %in% listTables(con, writeSchema))
if (computeAttrition) {
nm <- paste0(name, "_inclusion")
if (nm %in% existingTables) {
DBI::dbRemoveTable(con, inSchema(writeSchema, nm, dbms(con)))
}
DBI::dbCreateTable(con,
name = inSchema(writeSchema, nm, dbms(con)),
fields = c(
cohort_definition_id = "INT",
rule_sequence = "INT",
name = ifelse(dbms(con) == "bigquery", "STRING", "VARCHAR(255)"),
description = ifelse(dbms(con) == "bigquery", "STRING", "VARCHAR(1000)"))
)
nm <- paste0(name, "_inclusion_result") # used for attrition
if (nm %in% existingTables) {
DBI::dbRemoveTable(con, inSchema(writeSchema, nm, dbms(con)))
}
DBI::dbCreateTable(con,
name = inSchema(writeSchema, nm, dbms(con)),
fields = c(
cohort_definition_id = "INT",
inclusion_rule_mask = "INT",
person_count = "INT",
mode_id = "INT")
)
nm <- paste0(name, "_inclusion_stats")
if (nm %in% existingTables) {
DBI::dbRemoveTable(con, inSchema(writeSchema, nm, dbms(con)))
}
DBI::dbCreateTable(con,
name = inSchema(writeSchema, nm, dbms(con)),
fields = c(
cohort_definition_id = "INT",
rule_sequence = "INT",
person_count = "INT",
gain_count = "INT",
person_total = "INT",
mode_id = "INT")
)
nm <- paste0(name, "_summary_stats")
if (nm %in% existingTables) {
DBI::dbRemoveTable(con, inSchema(writeSchema, nm, dbms(con)))
}
DBI::dbCreateTable(con,
name = inSchema(writeSchema, nm, dbms(con)),
fields = c(
cohort_definition_id = "INT",
base_count = "INT",
final_count = "INT",
mode_id = "INT")
)
nm <- paste0(name, "_censor_stats")
if (nm %in% existingTables) {
DBI::dbRemoveTable(con, inSchema(writeSchema, nm, dbms(con)))
}
DBI::dbCreateTable(con,
name = inSchema(writeSchema, nm, dbms(con)),
fields = c(
cohort_definition_id = "INT",
lost_count = "INT")
)
}
}
|
/scratch/gouwar.j/cran-all/cranData/CDMConnector/R/cohort_ddl.R
|
# Run a dplyr query and store the result in a permanent table
#
# @param x A dplyr query
# @param name Name of the table to be created
# @param schema Schema to create the new table in
# Can be a length 1 or 2 vector.
# (e.g. schema = "my_schema", schema = c("my_schema", "dbo"))
# @param overwrite If the table already exists in the remote database
# should it be overwritten? (TRUE or FALSE)
#
# @return A dplyr reference to the newly created table
#
# internal function
.computePermanent <- function(x, name, schema = NULL, overwrite = TRUE) {
checkmate::assertCharacter(schema, min.len = 1, max.len = 2, null.ok = TRUE)
schema <- unname(schema)
checkmate::assertCharacter(name, len = 1)
checkmate::assertClass(x, "tbl_sql")
checkmate::assertLogical(overwrite, len = 1)
fullNameQuoted <- getFullTableNameQuoted(x, name, schema)
existingTables <- listTables(x$src$con, schema = schema)
if (name %in% existingTables) {
if (overwrite) {
# DBI::dbRemoveTable(x$src$con, DBI::SQL(fullNameQuoted))
DBI::dbRemoveTable(x$src$con, inSchema(schema, name, dbms = dbms(x$src$con)))
} else {
rlang::abort(paste(fullNameQuoted, "already exists.",
"Set overwrite = TRUE to recreate it."))
}
}
if (dbms(x$src$con) %in% c("duckdb", "oracle", "snowflake", "bigquery")) {
if (length(schema) == 2) {
sql <- dbplyr::build_sql("CREATE TABLE ",
dbplyr::ident(schema[1]), dbplyr::sql("."),
dbplyr::ident(schema[2]), dbplyr::sql("."), dbplyr::ident(name),
" AS ", dbplyr::sql_render(x), con = x$src$con)
} else {
sql <- dbplyr::build_sql("CREATE TABLE ",
if (!is.null(schema)) dbplyr::ident(schema),
if (!is.null(schema)) dbplyr::sql("."), dbplyr::ident(name),
" AS ", dbplyr::sql_render(x), con = x$src$con)
}
} else if (dbms(x$src$con) == "spark") {
sql <- dbplyr::build_sql("CREATE ",
if (overwrite) dbplyr::sql("OR REPLACE "), "TABLE ",
if (!is.null(schema)) dbplyr::ident(schema),
if (!is.null(schema)) dbplyr::sql("."), dbplyr::ident(name),
" AS ", dbplyr::sql_render(x), con = x$src$con)
} else {
sql <- glue::glue("SELECT * INTO {fullNameQuoted}
FROM ({dbplyr::sql_render(x)}) x")
}
DBI::dbExecute(x$src$con, sql)
dplyr::tbl(x$src$con, inSchema(schema = schema, table = name, dbms = dbms(x$src$con)))
}
#' Run a dplyr query and add the result set to an existing
#'
#' @param x A dplyr query
#' @param name Name of the table to be appended. If it does not already exist it
#' will be created.
#' @param schema Schema where the table exists. Can be a length 1 or 2 vector.
#' (e.g. schema = "my_schema", schema = c("my_schema", "dbo"))
#'
#' @return A dplyr reference to the newly created table
#' @export
#'
#' @examples
#' \dontrun{
#' library(CDMConnector)
#'
#' con <- DBI::dbConnect(duckdb::duckdb(), dbdir = eunomia_dir())
#' concept <- dplyr::tbl(con, "concept")
#'
#' # create a table
#' rxnorm_count <- concept %>%
#' dplyr::filter(domain_id == "Drug") %>%
#' dplyr::mutate(isRxnorm = (vocabulary_id == "RxNorm")) %>%
#' dplyr::count(domain_id, isRxnorm) %>%
#' compute("rxnorm_count")
#'
#' # append to an existing table
#' rxnorm_count <- concept %>%
#' dplyr::filter(domain_id == "Procedure") %>%
#' dplyr::mutate(isRxnorm = (vocabulary_id == "RxNorm")) %>%
#' dplyr::count(domain_id, isRxnorm) %>%
#' appendPermanent("rxnorm_count")
#'
#' DBI::dbDisconnect(con, shutdown = TRUE)
#'
#' }
appendPermanent <- function(x, name, schema = NULL) {
checkmate::assertCharacter(schema, min.len = 1, max.len = 3, null.ok = TRUE)
checkmate::assertCharacter(name, len = 1)
checkmate::assertClass(x, "tbl_sql")
# TODO try dbAppendTable
if ("prefix" %in% names(schema)) {
name <- paste0(schema["prefix"], name)
schema <- schema[names(schema) != "prefix"]
}
fullNameQuoted <- getFullTableNameQuoted(x, name, schema)
existingTables <- listTables(x$src$con, schema = schema)
if (!(tolower(name) %in% tolower(existingTables))) {
return(.computePermanent(x = x,
name = name,
schema = schema,
overwrite = FALSE))
}
if (dbms(x$src$con) == "bigquery") {
insertStatment <- "insert into"
} else {
insertStatment <- "INSERT INTO"
}
sql <- glue::glue("{insertStatment} {fullNameQuoted} {dbplyr::sql_render(x)}")
DBI::dbExecute(x$src$con, sql)
dplyr::tbl(x$src$con, inSchema(schema, name, dbms = dbms(x$src$con)))
}
#' @rdname appendPermanent
#' @export
append_permanent <- appendPermanent
#' Create a unique table name for temp tables
#'
#' @return A string that can be used as a dbplyr temp table name
#' @export
uniqueTableName <- function() {
i <- getOption("dbplyr_table_name", 0) + 1
options(dbplyr_table_name = i)
sprintf("dbplyr_%03i", i)
}
#' @rdname uniqueTableName
#' @export
unique_table_name <- uniqueTableName
#' Execute dplyr query and save result in remote database
#'
#' This function is a wrapper around `dplyr::compute` that is tested on several
#' database systems. It is needed to handle edge cases where `dplyr::compute`
#' does not produce correct SQL.
#'
#' @param x A dplyr query
#' @param name The name of the table to create.
#' @param temporary Should the table be temporary: TRUE (default) or FALSE
#' @param schema The schema where the table should be created. Ignored if
#' temporary = TRUE.
#' @param overwrite Should the table be overwritten if it already exists: TRUE (default)
#' or FALSE Ignored if temporary = TRUE.
#' @param ... Further arguments passed on the `dplyr::compute`
#'
#' @return A `dplyr::tbl()` reference to the newly created table.
#' @export
#'
#' @examples
#' \dontrun{
#' library(CDMConnector)
#'
#' con <- DBI::dbConnect(duckdb::duckdb(), dbdir = eunomia_dir())
#' cdm <- cdm_from_con(con, "main")
#'
#' # create a temporary table in the remote database from a dplyr query
#' drugCount <- cdm$concept %>%
#' dplyr::count(domain_id == "Drug") %>%
#' computeQuery()
#'
#' # create a permanent table in the remote database from a dplyr query
#' drugCount <- cdm$concept %>%
#' dplyr::count(domain_id == "Drug") %>%
#' computeQuery("tmp_table", temporary = FALSE, schema = "main")
#'
#' DBI::dbDisconnect(con, shutdown = TRUE)
#' }
computeQuery <- function(x,
name = uniqueTableName(),
temporary = TRUE,
schema = NULL,
overwrite = TRUE,
...) {
lifecycle::deprecate_soft("1.3", "computeQuery()", with = "dplyr::compute()")
.computeQuery(x,
name = name,
temporary = temporary,
schema = schema,
overwrite = overwrite,
...)
}
.computeQuery <- function(x,
name = uniqueTableName(),
temporary = TRUE,
schema = NULL,
overwrite = TRUE,
...) {
if (is.data.frame(x) || (methods::is(x, "Table") && methods::is(x, "ArrowTabular"))) {
return(x)
}
if ("cdm_reference" %in% class(x)) {
rlang::abort("You passed a cdm object into computeQuery which only accepts single tables or lazy queries!")
}
checkmate::assertLogical(temporary, len = 1)
checkmate::assertLogical(overwrite, len = 1)
if (nchar(dbplyr::sql_render(x)) > 10000) {
rlang::warn("Your SQL query is over 10,000 characters which can cause issues on some database platforms!\nTry calling computeQuery earlier in your pipeline.")
}
if (isFALSE(temporary)) {
checkmate::assertCharacter(schema, min.len = 1, max.len = 3)
# handle prefixes
if ("prefix" %in% names(schema)) {
checkmate::assertCharacter(schema["prefix"], len = 1, min.chars = 1, pattern = "[a-zA-Z1-9_]+")
name <- paste0(schema["prefix"], name)
schema <- schema[names(schema) != "prefix"]
}
checkmate::assertCharacter(schema, min.len = 1, max.len = 2)
}
cdm_reference <- attr(x, "cdm_reference") # might be NULL
con <- x$src$con
if (temporary) {
# handle overwrite for temp tables
# TODO test overwrite of temp tables this across all dbms
if (name %in% list_tables(con)) {
if (isFALSE(overwrite)) {
rlang::abort(glue::glue("table {name} already exists and overwrite is FALSE!"))
}
if (dbms(con) %in% c("sql server")) {
DBI::dbExecute(con, glue::glue("DROP TABLE IF EXISTS #{name};"))
} else {
DBI::dbRemoveTable(con, name)
}
}
if (methods::is(con, "OraConnection") || methods::is(con, "Oracle")) {
# https://github.com/tidyverse/dbplyr/issues/621#issuecomment-1362229669
name <- paste0("ORA$PTT_", name)
sql <- dbplyr::build_sql(
"CREATE PRIVATE TEMPORARY TABLE \n",
dbplyr::ident(name),
dbplyr::sql(" ON COMMIT PRESERVE DEFINITION \n"),
" AS\n",
dbplyr::sql_render(x),
con = con
)
DBI::dbExecute(con, sql)
out <- dplyr::tbl(con, name)
} else if (methods::is(con, "Spark SQL")) {
sql <- dbplyr::build_sql(
"CREATE ", if (overwrite) dbplyr::sql("OR REPLACE "),
"TEMPORARY VIEW \n",
dbplyr::ident(name), " AS\n",
dbplyr::sql_render(x),
con = con
)
DBI::dbExecute(con, sql)
out <- dplyr::tbl(con, name)
} else if (dbms(con) == "bigquery" && methods::is(con, "BigQueryConnection")) {
sql <- dbplyr::build_sql(
"CREATE TABLE ", dbplyr::ident(name), " \n",
"OPTIONS(expiration_timestamp=TIMESTAMP_ADD(CURRENT_TIMESTAMP(), INTERVAL 1 DAY)) AS\n",
dbplyr::sql_render(x),
con = con
)
DBI::dbExecute(con, sql)
out <- dplyr::tbl(con, name)
} else if (dbms(con) == "sql server") {
suppressMessages({ # Suppress the "Created a temporary table named" message
out <- dplyr::compute(x, name = name, temporary = temporary, ...)
})
} else {
out <- dplyr::compute(x, name = name, temporary = temporary, ...)
}
} else {
# not temporary
out <- .computePermanent(x, name = name, schema = schema, overwrite = overwrite)
}
# retain attributes
for (n in names(attributes(x))) {
if (!(n %in% names(attributes(out)))) {
attr(out, n) <- attr(x, n)
}
}
class(out) <- class(x)
return(out)
}
#' @rdname computeQuery
#' @export
compute_query <- computeQuery
# Get the full table name consisting of the schema and table name.
#
# @param x A dplyr query
# @param name Name of the table to be created.
# @param schema Schema to create the new table in
# Can be a length 1 or 2 vector.
# (e.g. schema = "my_schema", schema = c("my_schema", "dbo"))
#
# @return the full table name
getFullTableNameQuoted <- function(x, name, schema) {
checkmate::assertClass(x, "tbl_sql")
checkmate::assertCharacter(schema, min.len = 1, max.len = 2, null.ok = TRUE)
checkmate::assertCharacter(name, len = 1)
connection <- x$src$con
if (length(schema) == 2) {
fullNameQuoted <- paste(DBI::dbQuoteIdentifier(connection, schema[[1]]),
DBI::dbQuoteIdentifier(connection, schema[[2]]),
DBI::dbQuoteIdentifier(connection, name),
sep = ".")
} else if (length(schema) == 1) {
fullNameQuoted <- paste(DBI::dbQuoteIdentifier(connection, schema),
DBI::dbQuoteIdentifier(connection, name),
sep = ".")
} else {
fullNameQuoted <- DBI::dbQuoteIdentifier(connection, name)
}
return(fullNameQuoted)
}
|
/scratch/gouwar.j/cran-all/cranData/CDMConnector/R/compute.R
|
#' Copy a cdm object from one database to another
#'
#' It may be helpful to be able to easily copy a small test cdm from a local
#' database to a remote for testing. copy_cdm_to takes a cdm object and a connection.
#' It copies the cdm to the remote database connection. CDM tables can be prefixed
#' in the new database allowing for multiple cdms in a single shared database
#' schema.
#'
#' `r lifecycle::badge("experimental")`
#'
#' @param con A DBI datbase connection created by `DBI::dbConnect`
#' @param cdm A cdm reference object created by `CDMConnector::cdmFromCon` or `CDMConnector::cdm_from_con`
#' @param schema schema name in the remote database where the user has write permission
#' @param overwrite Should the cohort table be overwritten if it already exists? TRUE or FALSE (default)
#'
#' @return A cdm reference object pointing to the newly created cdm in the remote database
#' @export
copy_cdm_to <- function(con, cdm, schema, overwrite = FALSE) {
checkmate::assertTRUE(DBI::dbIsValid(con))
checkmate::assertClass(cdm, "cdm_reference")
if (dbms(con) == "bigquery") rlang::abort("copy_cdm_to on BigQuery is not yet supported!")
checkmate::assertCharacter(schema, min.len = 1, max.len = 3, all.missing = F)
checkmate::assertLogical(overwrite, len = 1)
# create a new source
newSource <- dbSource(con = con, writeSchema = schema)
# insert person and observation_period
cdmTables <- list()
for (tab in c("person", "observation_period")) {
cdmTables[[tab]] <- omopgenerics::insertTable(
cdm = newSource,
name = tab,
table = cdm[[tab]] |> dplyr::collect() |> dplyr::as_tibble(),
overwrite = overwrite
)
}
# create cdm object
newCdm <- omopgenerics::newCdmReference(
tables = cdmTables, cdmName = omopgenerics::cdmName(cdm)
)
# copy all other tables
tables_to_copy <- names(cdm)
tables_to_copy <- tables_to_copy[
!tables_to_copy %in% c("person", "observation_period")
]
for (i in cli::cli_progress_along(tables_to_copy)) {
table_name <- tables_to_copy[i]
cohort <- inherits(cdm[[table_name]], "cohort_table")
if (cohort) {
set <- omopgenerics::settings(cdm[[table_name]]) |> dplyr::as_tibble()
att <- omopgenerics::attrition(cdm[[table_name]]) |> dplyr::as_tibble()
newCdm <- omopgenerics::insertTable(
cdm = newCdm, name = paste0(table_name, "_set"), table = set,
overwrite = overwrite
)
newCdm <- omopgenerics::insertTable(
cdm = newCdm, paste0(table_name, "_attrition"), table = att,
overwrite = overwrite
)
}
newCdm <- omopgenerics::insertTable(
cdm = newCdm,
name = table_name,
table = cdm[[table_name]] |> dplyr::collect() |> dplyr::as_tibble(),
overwrite = overwrite
)
if (cohort) {
newCdm[[table_name]] <- omopgenerics::newCohortTable(
table = newCdm[[table_name]],
cohortSetRef = newCdm[[paste0(table_name, "_set")]],
cohortAttritionRef = newCdm[[paste0(table_name, "_attrition")]]
)
newCdm[[paste0(table_name, "_set")]] <- NULL
newCdm[[paste0(table_name, "_attrition")]] <- NULL
}
}
return(newCdm)
}
#' @rdname copy_cdm_to
#' @export
copyCdmTo <- copy_cdm_to
|
/scratch/gouwar.j/cran-all/cranData/CDMConnector/R/copy_cdm_to.R
|
#' Add days or years to a date in a dplyr query
#'
#' This function must be "unquoted" using the "bang bang" operator (!!). See example.
#'
#' @param date The name of a date column in the database table as a character string
#' @param number The number of units to add. Can be a positive or negative whole number.
#' @param interval The units to add. Must be either "day" (default) or "year"
#'
#' @return Platform specific SQL that can be used in a dplyr query.
#' @export
#' @importFrom rlang !!
#'
#' @examples
#' \dontrun{
#' con <- DBI::dbConnect(duckdb::duckdb())
#' date_tbl <- dplyr::copy_to(con, data.frame(date1 = as.Date("1999-01-01")),
#' name = "tmpdate", overwrite = TRUE, temporary = TRUE)
#'
#' df <- date_tbl %>%
#' dplyr::mutate(date2 = !!dateadd("date1", 1, interval = "year")) %>%
#' dplyr::collect()
#'
#' DBI::dbDisconnect(con, shutdown = TRUE)
#' }
dateadd <- function(date, number, interval = "day") {
checkmate::assertCharacter(interval, len = 1)
checkmate::assertSubset(interval, choices = c("day", "year"))
checkmate::assertCharacter(date, len = 1)
if(!(checkmate::testCharacter(number, len = 1) || checkmate::testIntegerish(number, len = 1))) {
rlang::abort("`number` must a character string with a column name or a number.")
}
dot <- get(".", envir = parent.frame())
db <- CDMConnector::dbms(dot$src$con)
if (db %in% c("oracle", "snowflake")) {
date <- as.character(DBI::dbQuoteIdentifier(dot$src$con, date))
if (is.character(number)) {
number <- as.character(DBI::dbQuoteIdentifier(dot$src$con, number))
}
}
if (db %in% c("spark", "oracle") && interval == "year") {
# spark and oracle sql requires number of days in dateadd
if (is.numeric(number)) {
number <- floor(number*365.25)
} else {
number <- paste(number, "* 365.25")
}
}
sql <- switch (db,
"redshift" = glue::glue("DATEADD({interval}, {number}, {date})"),
"oracle" = glue::glue("({date} + NUMTODSINTERVAL({number}, 'day'))"),
"postgresql" = glue::glue("({date} + {number}*INTERVAL'1 {interval}')"),
"sql server" = glue::glue("DATEADD({interval}, {number}, {date})"),
"spark" = glue::glue("date_add({date}, {number})"),
"duckdb" = glue::glue("({date} + {number}*INTERVAL'1 {interval}')"),
"sqlite" = glue::glue("CAST(STRFTIME('%s', DATETIME({date}, 'unixepoch', ({number})||' {interval}s')) AS REAL)"),
"bigquery" = glue::glue("DATE_ADD({date}, INTERVAL {number} {toupper(interval)})"),
"snowflake" = glue::glue('DATEADD({interval}, {number}, {date})'),
rlang::abort(glue::glue("Connection type {paste(class(dot$src$con), collapse = ', ')} is not supported!"))
)
dbplyr::sql(as.character(sql))
}
#' Compute the difference between two days
#'
#' This function must be "unquoted" using the "bang bang" operator (!!). See example.
#'
#' @param start The name of the start date column in the database as a string.
#' @param end The name of the end date column in the database as a string.
#' @param interval The units to use for difference calculation. Must be either "day" (default) or "year".
#'
#' @return Platform specific SQL that can be used in a dplyr query.
#' @export
#' @importFrom rlang !!
#'
#' @examples
#' \dontrun{
#' con <- DBI::dbConnect(duckdb::duckdb())
#' date_tbl <- dplyr::copy_to(con, data.frame(date1 = as.Date("1999-01-01")),
#' name = "tmpdate", overwrite = TRUE, temporary = TRUE)
#'
#' df <- date_tbl %>%
#' dplyr::mutate(date2 = !!dateadd("date1", 1, interval = "year")) %>%
#' dplyr::mutate(dif_years = !!datediff("date1", "date2", interval = "year")) %>%
#' dplyr::collect()
#'
#' DBI::dbDisconnect(con, shutdown = TRUE)
#' }
datediff <- function(start, end, interval = "day") {
checkmate::assertCharacter(interval, len = 1)
checkmate::assertSubset(interval, choices = c("day", "month", "year"))
checkmate::assertCharacter(start, len = 1)
checkmate::assertCharacter(end, len = 1)
dot <- get(".", envir = parent.frame())
db <- CDMConnector::dbms(dot$src$con)
if (interval == "day") {
if (db == "oracle") {
start <- as.character(DBI::dbQuoteIdentifier(dot$src$con, start))
end <- as.character(DBI::dbQuoteIdentifier(dot$src$con, end))
}
sql <- switch (
db,
"redshift" = glue::glue("DATEDIFF(day, {start}, {end})"),
"oracle" = glue::glue("CEIL(CAST({end} AS DATE) - CAST({start} AS DATE))"),
"postgresql" = glue::glue("(CAST({end} AS DATE) - CAST({start} AS DATE))"),
"sql server" = glue::glue("DATEDIFF(day, {start}, {end})"),
"spark" = glue::glue("datediff({end},{start})"),
"duckdb" = glue::glue("datediff('day', {start}, {end})"),
"sqlite" = glue::glue("(JULIANDAY(end, 'unixepoch') - JULIANDAY(start, 'unixepoch'))"),
"bigquery" = glue::glue("DATE_DIFF({end}, {start}, DAY)"),
"snowflake" = glue::glue('DATEDIFF(day, "{start}", "{end}")'),
rlang::abort(glue::glue("Connection type {paste(class(dot$src$con), collapse = ', ')} is not supported!"))
)
} else {
# datepart will quote oracle names
dayStart <- datepart(start, "day", db)
monthStart <- datepart(start, "month", db)
yearStart <- datepart(start, "year", db)
dayEnd <- datepart(end, "day", db)
monthEnd <- datepart(end, "month", db)
yearEnd <- datepart(end, "year", db)
if (interval == "month") {
sql <- glue::glue(
"FLOOR(({yearEnd} * 1200 + {monthEnd} * 100 + {dayEnd} -
({yearStart} * 1200 + {monthStart} * 100 + {dayStart})) / 100)"
)
} else {
sql <- glue::glue(
"FLOOR(({yearEnd} * 10000 + {monthEnd} * 100 + {dayEnd} -
({yearStart} * 10000 + {monthStart} * 100 + {dayStart})) / 10000)"
)
}
}
dbplyr::sql(as.character(sql))
}
#' as.Date dbplyr translation wrapper
#'
#' This is a workaround for using as.Date inside dplyr verbs against a database
#' backend. This function should only be used inside dplyr verbs where the first
#' argument is a database table reference. `asDate` must be unquoted with !! inside
#' dplyr verbs (see example).
#'
#' @param x an R expression
#'
#' @export
#' @examples
#' \dontrun{
#' con <- DBI::dbConnect(odbc::odbc(), "Oracle")
#' date_tbl <- dplyr::copy_to(con,
#' data.frame(y = 2000L, m = 10L, d = 10L),
#' name = "tmp",
#' temporary = TRUE)
#'
#' df <- date_tbl %>%
#' dplyr::mutate(date_from_parts = !!asDate(paste0(
#' .data$y, "/",
#' .data$m, "/",
#' .data$d
#' ))) %>%
#' dplyr::collect()
#' }
asDate <- function(x) {
x_quo <- rlang::enquo(x)
.data <- get(".", envir = parent.frame())
dialect <- CDMConnector::dbms(.data$src$con)
if (dialect == "oracle") {
x <- dbplyr::partial_eval(x_quo, data = .data)
x <- dbplyr::translate_sql(!!x, con = .data$src$con)
x <- glue::glue("TO_DATE({x}, 'YYYY-MM-DD')")
return(dplyr::sql(x))
} else if (dialect == "spark") {
x <- dbplyr::partial_eval(x_quo, data = .data)
x <- dbplyr::translate_sql(!!x, con = .data$src$con)
x <- glue::glue("TO_DATE({x})")
return(dplyr::sql(x))
} else {
return(rlang::expr(as.Date(!!x_quo)))
}
}
#' @rdname asDate
#' @export
as_date <- asDate
#' Extract the day, month or year of a date in a dplyr pipeline
#'
#' @param date Character string that represents to a date column.
#' @param interval Interval to extract from a date. Valid options are "year", "month", or "day".
#' @param dbms Database system, if NULL it is auto detected.
#'
#' @export
#' @examples
#' \dontrun{
#' con <- DBI::dbConnect(duckdb::duckdb(), ":memory:")
#' date_tbl <- dplyr::copy_to(con,
#' data.frame(birth_date = as.Date("1993-04-19")),
#' name = "tmp",
#' temporary = TRUE)
#' df <- date_tbl %>%
#' dplyr::mutate(year = !!datepart("birth_date", "year")) %>%
#' dplyr::mutate(month = !!datepart("birth_date", "month")) %>%
#' dplyr::mutate(day = !!datepart("birth_date", "day")) %>%
#' dplyr::collect()
#' DBI::dbDisconnect(con, shutdown = TRUE)
#' }
datepart <- function(date, interval = "year", dbms = NULL) {
checkmate::assertCharacter(date, len = 1)
checkmate::assertChoice(interval, c("year", "month", "day"))
supported <- c("redshift", "oracle", "postgresql", "sql server", "spark", "duckdb", "sqlite", "bigquery", "snowflake")
checkmate::assertChoice(dbms, choices = supported, null.ok = TRUE)
if (is.null(dbms)) {
dot <- get(".", envir = parent.frame())
dbms <- CDMConnector::dbms(dot$src$con)
}
sql <- switch (dbms,
"redshift" = "DATE_PART({interval}, {date})",
"oracle" = 'EXTRACT({toupper(interval)} FROM "{date}")',
"postgresql" = "EXTRACT({toupper(interval)} FROM {date})", # TODO use a more dbplyr approach to build sql
"sql server" = "{toupper(interval)}({date})",
"spark" = "{toupper(interval)}({date})",
"duckdb" = "date_part('{interval}', {date})",
"sqlite" = ifelse(interval == "year",
"CAST(STRFTIME('%Y', {date}, 'unixepoch') AS INT)",
"CAST(STRFTIME('%{substr(interval, 1, 1)}', {date}, 'unixepoch') AS INT)"),
"bigquery" = "EXTRACT({toupper(interval)} from {date})",
"snowflake" = 'DATE_PART({interval}, "{date}")'
)
dbplyr::sql(as.character(glue::glue(sql)))
}
|
/scratch/gouwar.j/cran-all/cranData/CDMConnector/R/dateadd.R
|
#' Create a source for a cdm in a database.
#'
#' @param con Connection to a database.
#' @param writeSchema Schema where cohort tables are. You must have read and
#' write access to it.
#'
#' @export
#'
dbSource <- function(con, writeSchema) {
# initial checks
if (methods::is(con, "Pool")) {
if (!rlang::is_installed("pool")) {
cli::cli_abort("Please install the pool package.")
}
con <- pool::localCheckout(con)
}
if (methods::is(con, "DatabaseConnectorConnection")) {
cli::cli_warn(
"Not all functionality is supported when DatabaseConnector as your
database driver! Some issues may occur."
)
}
checkmate::assert_true(.dbIsValid(con))
if (dbms(con) %in% c("duckdb", "sqlite") && missing(writeSchema)) {
writeSchema <- c(schema = "main")
}
checkmate::assert_character(writeSchema, min.len = 1, max.len = 3)
source <- structure(
.Data = list(),
"dbcon" = con,
"write_schema" = writeSchema
)
class(source) <- "db_cdm"
source <- omopgenerics::newCdmSource(src = source, sourceType = dbms(con))
return(source)
}
#' @export
insertTable.db_cdm <- function(cdm,
name,
table,
overwrite = TRUE) {
src <- cdm
checkmate::assertCharacter(name, len = 1, any.missing = FALSE)
con <- attr(src, "dbcon")
writeSchema <- attr(src, "write_schema")
fullName <- inSchema(schema = writeSchema, table = name, dbms = dbms(con))
if (overwrite) {
omopgenerics::dropTable(cdm = src, name = name)
}
if (!inherits(table, "data.frame")) {
table <- table |> dplyr::collect()
}
DBI::dbWriteTable(conn = con, name = fullName, value = table)
x <- dplyr::tbl(src = con, fullName) |>
omopgenerics::newCdmTable(src = src, name = name)
return(x)
}
#' @export
#' @importFrom tidyselect starts_with ends_with matches
dropTable.db_cdm <- function(cdm, name) {
# initial checks
schema <- attr(cdm, "write_schema")
con <- attr(cdm, "dbcon")
checkmate::assertTRUE(DBI::dbIsValid(con))
# correct names
allTables <- listTables(con, schema = schema)
if(length(allTables) == 0) {
return(invisible(TRUE))
}
names(allTables) <- allTables
toDrop <- names(tidyselect::eval_select(
expr = dplyr::any_of(name), data = allTables
))
# drop tables
for (i in seq_along(toDrop)) {
DBI::dbRemoveTable(conn = con, name = inSchema(
schema = schema, table = toDrop[i], dbms = dbms(con)
))
}
return(invisible(TRUE))
}
#' @export
#' @importFrom dplyr compute
compute.db_cdm <- function(x, name, temporary = FALSE, overwrite = TRUE, ...) {
# check source and name
source <- attr(x, "tbl_source")
if (is.null(source)) cli::cli_abort("table source not found.")
oldName <- attr(x, "tbl_name")
if (is.null(oldName)) cli::cli_abort("table name not found.")
# whether an intermediate table will be needed
if (!temporary & !is.na(oldName)) {
if (oldName == name) {
intermediate <- TRUE
intername <- paste0(c(sample(letters, 5), "_test_table"), collapse = "")
} else {
intermediate <- FALSE
}
} else {
intermediate <- FALSE
}
# get schema
schema <- attr(source, "write_schema")
if (is.null(schema)) cli::cli_abort("write_schema can not be NULL.")
# remove db_con class
class(x) <- class(x)[!class(x) %in% "db_cdm"]
if (intermediate) {
x <- x |>
.computeQuery(
name = intername, temporary = FALSE, schema = schema, overwrite = FALSE
)
}
x <- x |>
.computeQuery(
name = name, temporary = temporary, schema = schema, overwrite = overwrite
)
if (intermediate) {
dropTable(cdm = source, name = intername)
}
return(x)
}
#' @export
#' @importFrom omopgenerics insertFromSource
insertFromSource.db_cdm <- function(cdm, value) {
if (inherits(value, "data.frame")) {
cli::cli_abort(
"To insert a local table to a cdm_reference object use insertTable
function."
)
}
if (!inherits(value, "tbl_lazy")) {
cli::cli_abort(
"Can't assign an object of class: {paste0(class(value), collapse = ", ")}
to a db_con cdm_reference object."
)
}
con <- cdmCon(cdm)
schema <- cdmWriteSchema(cdm)
if (!identical(con, dbplyr::remote_con(value))) {
cli::cli_abort(
"The cdm object and the table have different connection sources."
)
}
remoteName <- dbplyr::remote_name(value)
if ("dbplyr" == substr(remoteName, 1, 6)) {
remoteName <- NA_character_
} else if ("prefix" %in% names(schema)) {
prefix <- schema["prefix"] |> unname()
if (substr(remoteName, 1, nchar(prefix)) == prefix) {
remoteName <- substr(remoteName, nchar(prefix) + 1, nchar(remoteName))
}
}
value <- omopgenerics::newCdmTable(
table = value, src = attr(cdm, "cdm_source"), name = remoteName
)
return(value)
}
|
/scratch/gouwar.j/cran-all/cranData/CDMConnector/R/dbSource.R
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#' Read a set of cohort definitions into R
#'
#' A "cohort set" is a collection of cohort definitions. In R this is stored in
#' a dataframe with cohort_definition_id, cohort_name, and cohort columns.
#' On disk this is stored as a folder with a CohortsToCreate.csv file and
#' one or more json files.
#' If the CohortsToCreate.csv file is missing then all of the json files in the
#' folder will be used, cohort_definition_id will be automatically assigned
#' in alphabetical order, and cohort_name will match the file names.
#'
#' @param path The path to a folder containing Circe cohort definition
#' json files and optionally a csv file named CohortsToCreate.csv with columns
#' cohortId, cohortName, and jsonPath.
#' @importFrom jsonlite read_json
#' @importFrom dplyr tibble
#' @export
read_cohort_set <- function(path) {
checkmate::checkCharacter(path, len = 1, min.chars = 1)
if (!fs::is_dir(path)) {
rlang::abort(glue::glue("{path} is not a directory!"))
}
if (!dir.exists(path)) {
rlang::abort(glue::glue("The directory {path} does not exist!"))
}
if (file.exists(file.path(path, "CohortsToCreate.csv"))) {
cohortsToCreate <- readr::read_csv(file.path(path, "CohortsToCreate.csv"), show_col_types = FALSE) %>%
dplyr::mutate(jsonPath = file.path(path, .data$jsonPath)) %>%
dplyr::mutate(cohort = purrr::map(.data$jsonPath, jsonlite::read_json)) %>%
dplyr::mutate(json = purrr::map(.data$jsonPath, readr::read_file)) %>%
dplyr::mutate(cohort_definition_id = .data$cohortId, cohort_name = .data$cohortName)
} else {
jsonFiles <- sort(list.files(path, pattern = "\\.json$", full.names = TRUE))
cohortsToCreate <- dplyr::tibble(
cohort_definition_id = seq_along(jsonFiles),
cohort_name = tools::file_path_sans_ext(basename(jsonFiles)),
json_path = jsonFiles) %>%
dplyr::mutate(cohort = purrr::map(.data$json_path, jsonlite::read_json)) %>%
dplyr::mutate(json = purrr::map(.data$json_path, readr::read_file)) %>%
dplyr::mutate(cohort_name = stringr::str_replace_all(tolower(.data$cohort_name), "\\s", "_")) %>%
dplyr::mutate(cohort_name = stringr::str_remove_all(.data$cohort_name, "[^a-z1-9_]"))
}
# snakecase name can be used for column names or filenames
cohortsToCreate <- cohortsToCreate %>%
dplyr::mutate(cohort_name_snakecase = snakecase::to_snake_case(.data$cohort_name)) %>%
dplyr::select("cohort_definition_id", "cohort_name", "cohort", "json", "cohort_name_snakecase")
class(cohortsToCreate) <- c("CohortSet", class(cohortsToCreate))
return(cohortsToCreate)
}
#' @export
#' @rdname read_cohort_set
readCohortSet <- read_cohort_set
#' Generate a cohort set on a cdm object
#'
#' @description
#' A "chort_table" object consists of four components
#' \itemize{
#' \item{A remote table reference to an OHDSI cohort table with at least
#' the columns: cohort_definition_id, subject_id, cohort_start_date,
#' cohort_end_date. Additional columns are optional and some analytic
#' packages define additional columns specific to certain analytic
#' cohorts.}
#' \item{A **settings attribute** which points to a remote table containing
#' cohort settings including the names of the cohorts.}
#' \item{An **attrition attribute** which points to a remote table with
#' attrition information recorded during generation. This attribute is
#' optional. Since calculating attrition takes additional compute it
#' can be skipped resulting in a NULL attrition attribute.}
#' \item{A **cohortCounts attribute** which points to a remote table
#' containing cohort counts}
#' }
#'
#' Each of the three attributes are tidy tables. The implementation of this
#' object is experimental and user feedback is welcome.
#'
#' `r lifecycle::badge("experimental")`
#'
#' One key design principle is that cohort_table objects are created once
#' and can persist across analysis execution but should not be modified after
#' creation. While it is possible to modify a cohort_table object doing
#' so will invalidate it and it's attributes may no longer be accurate.
#'
#' @param cdm A cdm reference created by CDMConnector. write_schema must be
#' specified.
#' @param name Name of the cohort table to be created. This will also be used
#' as a prefix for the cohort attribute tables.
#' @param cohort_set,cohortSet Can be a cohortSet object created with `readCohortSet()`,
#' a single Capr cohort definition,
#' or a named list of Capr cohort definitions.
#' @param compute_attrition,computeAttrition Should attrition be computed? TRUE (default) or FALSE
#' @param overwrite Should the cohort table be overwritten if it already
#' exists? TRUE (default) or FALSE
#' @export
#' @examples
#' \dontrun{
#' library(CDMConnector)
#' con <- DBI::dbConnect(duckdb::duckdb(), eunomia_dir())
#' cdm <- cdm_from_con(con,
#' cdm_schema = "main",
#' write_schema = "main")
#'
#' cohortSet <- readCohortSet(system.file("cohorts2", package = "CDMConnector"))
#' cdm <- generateCohortSet(cdm, cohortSet, name = "cohort")
#'
#' print(cdm$cohort)
#'
#' attrition(cdm$cohort)
#' settings(cdm$cohort)
#' cohortCount(cdm$cohort)
#' }
generateCohortSet <- function(cdm,
cohortSet,
name,
computeAttrition = TRUE,
overwrite = TRUE) {
rlang::check_installed("CirceR")
rlang::check_installed("SqlRender")
if (!is.data.frame(cohortSet)) {
if (!is.list(cohortSet)) {
rlang::abort("cohortSet must be a dataframe or a named list of Capr cohort definitions")
}
checkmate::assertList(cohortSet,
types = "Cohort",
min.len = 1,
names = "strict",
any.missing = FALSE)
cohortSet <- dplyr::tibble(
cohort_definition_id = seq_along(cohortSet),
cohort_name = names(cohortSet),
cohort = purrr::map(cohortSet, ~jsonlite::fromJSON(generics::compile(.), simplifyVector = FALSE)),
json = purrr::map_chr(cohortSet, generics::compile)
)
class(cohortSet) <- c("CohortSet", class(cohortSet))
}
checkmate::assertDataFrame(cohortSet, min.rows = 1, col.names = "named")
cli::cli_alert_info("Generating {nrow(cohortSet)} cohort{?s}")
withr::local_options(list("cli.progress_show_after" = 0, "cli.progress_clear" = FALSE))
checkmate::assertClass(cdm, "cdm_reference")
con <- cdmCon(cdm)
checkmate::assertTRUE(DBI::dbIsValid(con))
checkmate::assert_character(name, len = 1, min.chars = 1, any.missing = FALSE, pattern = "[a-zA-Z0-9_]+")
assert_write_schema(cdm)
checkmate::assertLogical(computeAttrition, len = 1)
checkmate::assertLogical(overwrite, len = 1)
write_schema <- cdmWriteSchema(cdm)
checkmate::assert_character(write_schema,
min.chars = 1,
min.len = 1,
max.len = 3,
null.ok = FALSE)
if ("prefix" %in% names(write_schema)) {
prefix <- unname(write_schema["prefix"])
} else {
prefix <- ""
}
# Handle OHDSI cohort sets
if ("cohortId" %in% names(cohortSet) && !("cohort_definition_id" %in% names(cohortSet))) {
cohortSet$cohort_definition_id <- cohortSet$cohortId
}
if ("cohortName" %in% names(cohortSet) && !("cohort_name" %in% names(cohortSet))) {
cohortSet$cohort_name <- cohortSet$cohortName
}
if (!("cohort" %in% names(cohortSet)) && ("json" %in% names(cohortSet))) {
cohortColumn <- list()
for (i in seq_len(nrow(cohortSet))) {
x <- cohortSet$json[i]
if (!validUTF8(x)) { x <- stringi::stri_enc_toutf8(x, validate = TRUE) }
if (!validUTF8(x)) { rlang::abort("Failed to convert json UTF-8 encoding") }
cohortColumn[[i]] <- jsonlite::fromJSON(x, simplifyVector = FALSE)
}
cohortSet$cohort <- cohortColumn
}
checkmate::assertTRUE(all(c("cohort_definition_id", "cohort_name", "json") %in% colnames(cohortSet)))
# check name -----
checkmate::assertCharacter(name, len = 1, min.chars = 1, pattern = "[a-z_]+")
if (paste0(prefix, name) != tolower(paste0(prefix, name))) {
cli::cli_abort("Cohort table name {.code {paste0(prefix, name)}} must be lowercase!")
}
# Make sure tables do not already exist
existingTables <- listTables(con, write_schema)
for (x in paste0(name, c("", "_count", "_set", "_attrition"))) {
if (x %in% existingTables) {
if (overwrite) {
DBI::dbRemoveTable(con, inSchema(write_schema, x, dbms = dbms(con)))
} else {
cli::cli_abort("The cohort table {paste0(prefix, name)} already exists.\nSpecify overwrite = TRUE to overwrite it.")
}
}
}
# Create the OHDSI-SQL for each cohort ----
cohortSet$sql <- character(nrow(cohortSet))
for (i in seq_len(nrow(cohortSet))) {
cohortJson <- cohortSet$json[[i]]
cohortExpression <- CirceR::cohortExpressionFromJson(expressionJson = cohortJson)
cohortSql <- CirceR::buildCohortQuery(expression = cohortExpression,
options = CirceR::createGenerateOptions(
generateStats = computeAttrition))
cohortSet$sql[i] <- SqlRender::render(cohortSql, warnOnMissingParameters = FALSE)
}
createCohortTables(con, write_schema, name, computeAttrition)
# Run the OHDSI-SQL ----
cdm_schema <- attr(cdm, "cdm_schema")
checkmate::assertCharacter(cdm_schema, max.len = 3, min.len = 1, min.chars = 1)
if ("prefix" %in% names(cdm_schema)) {
cdm_schema_sql <- glue::glue_sql_collapse(DBI::dbQuoteIdentifier(con, cdm_schema[-which(names(cdm_schema) == "prefix")]), sep = ".")
} else {
cdm_schema_sql <- glue::glue_sql_collapse(DBI::dbQuoteIdentifier(con, cdm_schema), sep = ".")
}
if ("prefix" %in% names(write_schema)) {
write_schema_sql <- paste(DBI::dbQuoteIdentifier(con, write_schema[-which(names(write_schema) == "prefix")]), collapse = ".")
} else {
write_schema_sql <- paste(DBI::dbQuoteIdentifier(con, write_schema), collapse = ".")
}
# dropTempTableIfExists <- function(con, table) {
# # used for dropping temp emulation tables
# suppressMessages(
# DBI::dbExecute(
# con,
# SqlRender::translate(
# glue::glue("IF OBJECT_ID('#{table}', 'U') IS NOT NULL DROP TABLE #{table};"),
# targetDialect = dbms(con))
# )
# )
# }
generate <- function(i) {
pct <- ""
cli::cli_progress_step("Generating cohort ({i}/{nrow(cohortSet)}{pct}) - {cohortSet$cohort_name[i]})", spinner = interactive())
sql <- cohortSet$sql[i] %>%
SqlRender::render(
cdm_database_schema = cdm_schema_sql,
vocabulary_database_schema = cdm_schema_sql,
target_database_schema = write_schema_sql,
results_database_schema.cohort_inclusion = paste0(write_schema_sql, ".", DBI::dbQuoteIdentifier(con, paste0(prefix, name, "_inclusion"))),
results_database_schema.cohort_inclusion_result = paste0(write_schema_sql, ".", DBI::dbQuoteIdentifier(con, paste0(prefix, name, "_inclusion_result"))),
results_database_schema.cohort_summary_stats = paste0(write_schema_sql, ".", DBI::dbQuoteIdentifier(con, paste0(prefix, name, "_summary_stats"))),
results_database_schema.cohort_censor_stats = paste0(write_schema_sql, ".", DBI::dbQuoteIdentifier(con, paste0(prefix, name, "_censor_stats"))),
results_database_schema.cohort_inclusion = paste0(write_schema_sql, ".", DBI::dbQuoteIdentifier(con, paste0(prefix, name, "_inclusion"))),
target_cohort_table = DBI::dbQuoteIdentifier(con, paste0(prefix, name)),
target_cohort_id = cohortSet$cohort_definition_id[i],
warnOnMissingParameters = FALSE
)
if (dbms(con) == "snowflake") {
# we don't want to use temp emulation on snowflake. We want to use actual temp tables.
sql <- stringr::str_replace_all(sql, "CREATE TABLE #", "CREATE TEMPORARY TABLE ") %>%
stringr::str_replace_all("create table #", "create temporary table ") %>%
stringr::str_replace_all("#", "")
# temp tables created by circe that can be left dangling.
tempTablesToDrop <- c(
"Codesets",
"qualified_events",
"cohort_rows",
"Inclusion",
"strategy_ends",
"inclusion_events",
"included_events",
"final_cohort",
"inclusion_rules",
"BEST_EVENTS")
for (j in seq_along(tempTablesToDrop)) {
DBI::dbExecute(con, paste("drop table if exists", tempTablesToDrop[j]))
}
namesToQuote <- c("cohort_definition_id",
"subject_id",
"cohort_start_date",
"cohort_end_date",
"mode_id",
"inclusion_rule_mask",
"person_count",
"rule_sequence",
"gain_count",
"person_total",
"base_count", "final_count")
for (n in namesToQuote) {
sql <- stringr::str_replace_all(sql, n, DBI::dbQuoteIdentifier(con, n))
}
}
# total hack workaround for circe - temp23019_chrt0_inclusion"_stats
quoteSymbol <- substr(as.character(DBI::dbQuoteIdentifier(con, "a")), 1, 1)
sql <- stringr::str_replace_all(sql,
paste0("_inclusion", quoteSymbol, "_stats"),
paste0("_inclusion_stats", quoteSymbol))
# if parameters exist in the sql (starting with @), stop.
stopifnot(length(unique(stringr::str_extract_all(sql, "@\\w+"))[[1]]) == 0)
# remove comments from SQL which are causing an issue on spark
# --([^\n])*?\n => match strings starting with -- followed by anything except a newline
sql <- stringr::str_replace_all(sql, "--([^\n])*?\n", "\n")
sql <- SqlRender::translate(sql,
targetDialect = CDMConnector::dbms(con),
tempEmulationSchema = "SQL ERROR")
if (stringr::str_detect(sql, "SQL ERROR")) {
cli::cli_abort("sqlRenderTempEmulationSchema being used for cohort generation!
Please open a github issue at {.url https://github.com/darwin-eu/CDMConnector/issues} with your cohort definition.")
}
if (dbms(con) == "duckdb") {
# hotfix for duckdb sql translation https://github.com/OHDSI/SqlRender/issues/340
sql <- gsub("'-1 \\* (\\d+) day'", "'-\\1 day'", sql)
}
sql <- stringr::str_replace_all(sql, "\\s+", " ")
sql <- stringr::str_split(sql, ";")[[1]] %>%
stringr::str_trim() %>%
stringr::str_c(";") %>% # remove empty statements
stringr::str_subset("^;$", negate = TRUE)
for (k in seq_along(sql)) {
# cli::cat_rule(glue::glue("sql {k} with {nchar(sql[k])} characters."))
# cli::cat_line(sql[k])
DBI::dbExecute(con, sql[k], immediate = TRUE)
if (interactive()) {
pct <- ifelse(k == length(sql), "", glue::glue(" ~ {floor(100*k/length(sql))}%"))
cli::cli_progress_update()
}
}
}
# this loop makes cli updates look correct
for (i in seq_len(nrow(cohortSet))) {
generate(i)
}
cohort_ref <- dplyr::tbl(con, inSchema(write_schema, name, dbms = dbms(con)))
# Create attrition attribute ----
if (computeAttrition) {
cohort_attrition_ref <- computeAttritionTable(
cdm = cdm,
cohortStem = name,
cohortSet = cohortSet,
overwrite = overwrite
) |>
dplyr::collect()
} else {
cohort_attrition_ref <- NULL
}
# Create cohort_set attribute -----
# if (paste0(name, "_set") %in% existingTables) {
# DBI::dbRemoveTable(con, inSchema(write_schema, paste0(name, "_set"), dbms = dbms(con)))
# }
cdm[[name]] <- cohort_ref |>
omopgenerics::newCdmTable(src = attr(cdm, "cdm_source"), name = name)
# browser()
# Create the object. Let the constructor handle getting the counts.----
cdm[[name]] <- omopgenerics::newCohortTable(
table = cdm[[name]],
cohortSetRef = cohortSet[,c("cohort_definition_id", "cohort_name")],
cohortAttritionRef = cohort_attrition_ref)
cli::cli_progress_done()
return(cdm)
}
#' @rdname generateCohortSet
#' @export
generate_cohort_set <- function(cdm,
cohort_set,
name = "cohort",
compute_attrition = TRUE,
overwrite = TRUE) {
generateCohortSet(cdm = cdm,
cohortSet = cohort_set,
name = name,
computeAttrition = compute_attrition,
overwrite = overwrite)
}
#' Constructor for cohort_table objects
#'
#' `r lifecycle::badge("superseded")`
#'
#' Please use `omopgenerics::newCohortTable()` instead.
#'
#' This constructor function is to be used by analytic package developers to
#' create `cohort_table` objects.
#'
#' @details
#' A `cohort_table` is a set of person-time from an OMOP CDM database.
#' A `cohort_table` can be represented by a table with three columns:
#' subject_id, cohort_start_date, cohort_end_date. Subject_id is the same as
#' person_id in the OMOP CDM. A `cohort_table` is a collection of one
#' or more `cohort_table` and can be represented as a table with four
#' columns: cohort_definition_id, subject_id, cohort_start_date,
#' cohort_end_date.
#'
#' This constructor function defines the `cohort_table` object in R.
#'
#' The object is an extension of a `tbl_sql` object defined in dplyr. This is
#' a lazy database query that points to a cohort table in the database with
#' at least the columns cohort_definition_id, subject_id, cohort_start_date,
#' cohort_end_date. The table could optionally have more columns as well.
#'
#' In addition the `cohort_table` object has three optional attributes.
#' These are: cohort_set, cohort_attrition, cohort_count.
#' Each of these attributes is also a lazy SQL query (`tbl_sql`) that points
#' to a table in a database and is described below.
#'
#' ## cohort_set
#'
#' cohort_set is a table with one row per cohort_definition_id. The first
#' two columns of the cohort_set table are: cohort_definition_id, and
#' cohort_name. Additional columns can be added. The cohort_set table is meant
#' to store metadata about the cohort definition. Since this table is required it
#' will be created if it it is not supplied.
#'
#' ## cohort_attrition
#'
#' cohort_attrition is an optional table that stores attrition information
#' recorded during the cohort generation process such as how many persons were
#' dropped at each step of inclusion rule application. The first column of this
#' table should be `cohort_definition_id` but all other columns currently
#' have no constraints.
#'
#' ## cohort_count
#'
#' cohort_count is a option attribute table that records the number of records
#' and the number of unique persons in each cohort in a `cohort_table`.
#' It is derived metadata that can be re-derived as long as cohort_set,
#' the complete list of cohorts in the set, is available. Column names of
#' cohort_count are: cohort_definition_id, number_records,
#' number_subjects. This table is required for cohort_table objects and
#' will be created if not supplied.
#'
#' @param cohort_ref,cohortRef A `tbl_sql` object that points to a remote cohort table
#' with the following first four columns: cohort_definition_id,
#' subject_id, cohort_start_date, cohort_end_date. Additional columns are
#' optional.
#' @param cohort_set_ref,cohortSetRef A `tbl_sql` object that points to a remote table
#' with the following first two columns: cohort_definition_id, cohort_name.
#' Additional columns are optional. cohort_definition_id should be a primary
#' key on this table and uniquely identify rows.
#' @param cohort_attrition_ref,cohortAttritionRef A `tbl_sql` object that points to an attrition
#' table in a remote database with the first column being cohort_definition_id.
#' @param cohort_count_ref,cohortCountRef A `tbl_sql` object that points to a cohort_count
#' table in a remote database with columns cohort_definition_id, cohort_entries,
#' cohort_subjects.
#' @param overwrite Should tables be overwritten if they already exist? TRUE or FALSE (default)
#'
#' @return A `cohort_table` object that is a `tbl_sql` reference
#' to a cohort table in the write_schema of an OMOP CDM
#' @export
#'
#' @include reexports-omopgenerics.R
#'
#' @examples
#' \dontrun{
#' # This function is for developers who are creating cohort_table
#' # objects in their packages. The function should accept a cdm_reference
#' # object as the first argument and return a cdm_reference object with the
#' # cohort table added. The second argument should be `name` which will be
#' # the prefix for the database tables, the name of the cohort table in the
#' # database and the name of the cohort table in the cdm object.
#' # Other optional arguments can be added after the first two.
#'
#' generateCustomCohort <- function(cdm, name, ...) {
#'
#' # accept a cdm_reference object as input
#' checkmate::assertClass(cdm, "cdm_reference")
#' con <- attr(cdm, "dbcon")
#'
#' # Create the tables in the database however you like
#' # All the tables should be prefixed with `name`
#' # The cohort table should be called `name` in the database
#'
#' # Create the dplyr table references
#' cohort_ref <- dplyr::tbl(con, name)
#' cohort_set <- dplyr::tbl(con, paste0(name, "_set"))
#' cohort_attrition_ref <- dplyr::tbl(con, paste0(name, "_attrition"))
#' cohort_count_ref <- dplyr::tbl(con, paste0(name, "_count"))
#'
#' # add to the cdm
#' cdm[[name]] <- cohort_ref
#'
#' # create the generated cohort set object using the constructor
#' cdm[[name]] <- new_generated_cohort_set(
#' cdm[[name]],
#' cohort_set_ref = cohort_set_ref,
#' cohort_attrition_ref = cohort_attrition_ref,
#' cohort_count_ref = cohort_count_ref)
#'
#' return(cdm)
#' }
#' }
new_generated_cohort_set <- function(cohort_ref,
cohort_set_ref = NULL,
cohort_attrition_ref = NULL,
cohort_count_ref = NULL,
overwrite) {
lifecycle::deprecate_warn(
when = "1.3",
what = "new_generated_cohort_set()",
with = "newCohortTable()"
)
if (!is.null(cohort_count_ref)) {
cli::cli_warn("cohort_count_ref is no longer a required argument for new_generated_cohort_set")
}
if (!missing(overwrite)) {
cli::cli_warn("overwrite is no longer a required argument for new_generated_cohort_set")
}
omopgenerics::newCohortTable(
table = cohort_ref,
cohortSetRef = cohort_set_ref,
cohortAttritionRef = cohort_attrition_ref
)
}
#' @rdname new_generated_cohort_set
#' @export
newGeneratedCohortSet <- function(cohortRef,
cohortSetRef = NULL,
cohortAttritionRef = NULL,
cohortCountRef = NULL,
overwrite) {
if (!missing(overwrite)) {
cli::cli_warn("overwrite is no longer a required argument for new_generated_cohort_set")
}
new_generated_cohort_set(
cohort_ref = cohortRef,
cohort_set_ref = cohortSetRef,
cohort_attrition_ref = cohortAttritionRef,
cohort_count_ref = cohortCountRef
)
}
#' Get attrition table from a cohort_table object
#'
#' @param x A cohort_table object
#'
#' @export
cohortAttrition <- function(x) {
lifecycle::deprecate_warn("1.3", "cohortAttrition()", "attrition()")
omopgenerics::attrition(x)
}
#' @rdname cohortAttrition
#' @export
cohort_attrition <- function(x) {
lifecycle::deprecate_warn("1.3", "cohort_attrition()", "attrition()")
omopgenerics::attrition(x)
}
#' Get cohort settings from a cohort_table object
#'
#' @param x A cohort_table object
#'
#' @export
cohortSet <- function(x) {
lifecycle::deprecate_warn("1.3", "cohortSet()", "settings()")
omopgenerics::settings(x)
}
#' @rdname cohortSet
#' @export
cohort_set <- function(x) {
lifecycle::deprecate_warn("1.3", "cohort_set()", "settings()")
omopgenerics::settings(x)
}
#' Get cohort counts from a generated_cohort_set object.
#'
#' @param cohort A generated_cohort_set object.
#'
#' @return A table with the counts.
#' @rdname cohort_count
#' @export
#'
#' @examples
#' \dontrun{
#' library(CDMConnector)
#' library(dplyr)
#'
#' con <- DBI::dbConnect(duckdb::duckdb(), eunomia_dir())
#' cdm <- cdm_from_con(con = con, cdm_schema = "main", write_schema = "main")
#' cdm <- generateConceptCohortSet(
#' cdm = cdm, conceptSet = list(pharyngitis = 4112343), name = "new_cohort"
#' )
#' cohort_count(cdm$new_cohort)
#' }
cohort_count <- omopgenerics::cohortCount
# Compute the attrition for a set of cohorts (internal function)
#
# @description This function computes the attrition for a set of cohorts. It
# uses the inclusion_result table so the cohort should be previously generated
# using stats = TRUE.
#
# @param cdm A cdm reference created by CDMConnector.
# @param cohortStem Stem for the cohort tables.
# @param cohortSet Cohort set of the generated tables.
# @param cohortId Cohort definition id of the cohorts that we want to generate
# the attrition. If NULL all cohorts from cohort set will be used.
# @param overwrite Should the attrition table be overwritten if it already exists? TRUE or FALSE
#
# @importFrom rlang :=
# @return the attrition as a data.frame
computeAttritionTable <- function(cdm,
cohortStem,
cohortSet,
cohortId = NULL,
overwrite = FALSE) {
checkmate::assertClass(cdm, "cdm_reference")
checkmate::assertCharacter(cohortStem, len = 1, min.chars = 1)
checkmate::assertLogical(overwrite, len = 1)
checkmate::assertDataFrame(cohortSet, min.rows = 0, col.names = "named")
checkmate::assertNames(colnames(cohortSet),
must.include = c("cohort_definition_id", "cohort")
)
if (is.null(cohortId)) {
cohortId <- cohortSet$cohort_definition_id
}
checkmate::assertNumeric(cohortId, any.missing = FALSE, min.len = 1)
checkmate::assertTRUE(all(cohortId %in% cohortSet$cohort_definition_id))
con <- cdmCon(cdm)
checkmate::assertTRUE(DBI::dbIsValid(con))
inclusionResultTableName <- paste0(cohortStem, "_inclusion_result")
# if (dbms(cdmCon(cdm)) %in% c("oracle", "snowflake")) {
# inclusionResultTableName <- toupper(inclusionResultTableName)
# }
schema <- cdmWriteSchema(cdm)
checkmate::assertCharacter(schema, min.len = 1, max.len = 3, min.chars = 1)
if (paste0(cohortStem, "_attrition") %in% listTables(con, schema = schema)) {
if (overwrite) {
DBI::dbRemoveTable(con, inSchema(schema, paste0(cohortStem, "_attrition"), dbms = dbms(con)))
} else {
rlang::abort(paste0(cohortStem, "_attrition already exists in the database. Set overwrite = TRUE."))
}
}
# Bring the inclusion result table to R memory
inclusionResult <- dplyr::tbl(con, inSchema(schema, inclusionResultTableName, dbms(con))) %>%
dplyr::collect() %>%
dplyr::rename_all(tolower) %>%
dplyr::mutate(inclusion_rule_mask = as.numeric(.data$inclusion_rule_mask))
attritionList <- list()
for (i in seq_along(cohortId)) {
id <- cohortId[i]
inclusionName <- NULL
for (k in seq_along(cohortSet$cohort[[i]]$InclusionRules)) {
if ("name" %in% names(cohortSet$cohort[[i]]$InclusionRules[[k]])) {
inclusionName <- c(
inclusionName, cohortSet$cohort[[i]]$InclusionRules[[k]]$name
)
} else {
inclusionName <- c(inclusionName, "Unnamed criteria")
}
}
numberInclusion <- length(inclusionName)
if (numberInclusion == 0) {
#cohortTableName <- paste0(cohortStem, "_cohort")
cohortTableName <- cohortStem
attrition <- dplyr::tibble(
cohort_definition_id = id,
number_records = dplyr::tbl(con, inSchema(schema, cohortTableName, dbms(con))) %>%
dplyr::rename_all(tolower) %>%
dplyr::filter(.data$cohort_definition_id == id) %>%
dplyr::tally() %>%
dplyr::pull("n") %>%
as.numeric(),
number_subjects = dplyr::tbl(con, inSchema(schema, cohortTableName, dbms(con))) %>%
dplyr::rename_all(tolower) %>%
dplyr::filter(.data$cohort_definition_id == id) %>%
dplyr::select("subject_id") %>%
dplyr::distinct() %>%
dplyr::tally() %>%
dplyr::pull("n") %>%
as.numeric(),
reason_id = 1,
reason = "Qualifying initial records",
excluded_records = 0,
excluded_subjects = 0
)
} else {
inclusionMaskId <- getInclusionMaskId(numberInclusion)
inclusionName <- c("Qualifying initial records", inclusionName)
attrition <- list()
for (k in 1:(numberInclusion + 1)) {
attrition[[k]] <- dplyr::tibble(
cohort_definition_id = id,
number_records = inclusionResult %>%
dplyr::filter(.data$cohort_definition_id == id) %>%
dplyr::filter(.data$mode_id == 0) %>%
dplyr::filter(.data$inclusion_rule_mask %in% inclusionMaskId[[k]]) %>%
dplyr::pull("person_count") %>%
base::sum() %>%
as.numeric(),
number_subjects = inclusionResult %>%
dplyr::filter(.data$cohort_definition_id == id) %>%
dplyr::filter(.data$mode_id == 1) %>%
dplyr::filter(.data$inclusion_rule_mask %in% inclusionMaskId[[k]]) %>%
dplyr::pull("person_count") %>%
base::sum() %>%
as.numeric(),
reason_id = k,
reason = inclusionName[k]
)
}
attrition <- attrition %>%
dplyr::bind_rows() %>%
dplyr::mutate(
excluded_records =
dplyr::lag(.data$number_records, 1, order_by = .data$reason_id) -
.data$number_records,
excluded_subjects =
dplyr::lag(.data$number_subjects, 1, order_by = .data$reason_id) -
.data$number_subjects
) %>%
dplyr::mutate(
excluded_records = dplyr::coalesce(.data$excluded_records, 0),
excluded_subjects = dplyr::coalesce(.data$excluded_subjects, 0)
)
}
attritionList[[i]] <- attrition
}
attrition <- attritionList %>%
dplyr::bind_rows() %>%
dplyr::rename_all(tolower)
# upload attrition table to database
DBI::dbWriteTable(con,
name = inSchema(schema, paste0(cohortStem, "_attrition"), dbms = dbms(con)),
value = attrition)
dplyr::tbl(con, inSchema(schema, paste0(cohortStem, "_attrition"), dbms(con))) %>%
dplyr::rename_all(tolower)
}
getInclusionMaskId <- function(numberInclusion) {
inclusionMaskMatrix <- dplyr::tibble(
inclusion_rule_mask = 0:(2^numberInclusion - 1)
)
for (k in 0:(numberInclusion - 1)) {
inclusionMaskMatrix <- inclusionMaskMatrix %>%
dplyr::mutate(!!paste0("inclusion_", k) :=
rep(c(rep(0, 2^k), rep(1, 2^k)), 2^(numberInclusion - k - 1))
)
}
lapply(-1:(numberInclusion - 1), function(x) {
if (x == -1) {
return(inclusionMaskMatrix$inclusion_rule_mask)
} else {
inclusionMaskMatrix <- inclusionMaskMatrix
for (k in 0:x) {
inclusionMaskMatrix <- inclusionMaskMatrix %>%
dplyr::filter(.data[[paste0("inclusion_", k)]] == 1)
}
return(inclusionMaskMatrix$inclusion_rule_mask)
}
})
}
caprConceptToDataframe <- function(x) {
tibble::tibble(
conceptId = purrr::map_int(x@Expression, ~.@Concept@concept_id),
conceptCode = purrr::map_chr(x@Expression, ~.@Concept@concept_code),
conceptName = purrr::map_chr(x@Expression, ~.@Concept@concept_name),
domainId = purrr::map_chr(x@Expression, ~.@Concept@domain_id),
vocabularyId = purrr::map_chr(x@Expression, ~.@Concept@vocabulary_id),
standardConcept = purrr::map_chr(x@Expression, ~.@Concept@standard_concept),
includeDescendants = purrr::map_lgl(x@Expression, "includeDescendants"),
isExcluded = purrr::map_lgl(x@Expression, "isExcluded"),
includeMapped = purrr::map_lgl(x@Expression, "includeMapped")
)
}
#' Add attrition reason to a cohort_table object
#'
#' Update the cohort attrition table with new counts and a reason for attrition.
#'
#' @param cohort A generated cohort set
#' @param reason The reason for attrition as a character string
#' @param cohortId Cohort definition id of the cohort you want to update the
#' attrition
#'
#' @return The cohort object with the attributes created or updated.
#'
#' `r lifecycle::badge("experimental")`
#'
#' @export
#'
#' @examples
#' \dontrun{
#' library(CDMConnector)
#' library(dplyr)
#'
#' con <- DBI::dbConnect(duckdb::duckdb(), eunomia_dir())
#' cdm <- cdm_from_con(con = con, cdm_schema = "main", write_schema = "main")
#' cdm <- generateConceptCohortSet(
#' cdm = cdm, conceptSet = list(pharyngitis = 4112343), name = "new_cohort"
#' )
#'
#' settings(cdm$new_cohort)
#' cohortCount(cdm$new_cohort)
#' cohortAttrition(cdm$new_cohort)
#'
#' cdm$new_cohort <- cdm$new_cohort %>%
#' filter(cohort_start_date >= as.Date("2010-01-01"))
#'
#' cdm$new_cohort <- updateCohortAttributes(
#' cohort = cdm$new_cohort, reason = "Only events after 2010"
#' )
#'
#' settings(cdm$new_cohort)
#' cohortCount(cdm$new_cohort)
#' cohortAttrition(cdm$new_cohort)
#' }
recordCohortAttrition <- function(cohort,
reason,
cohortId = NULL) {
omopgenerics::recordCohortAttrition(cohort = cohort,
reason = reason,
cohortId = cohortId)
}
#' @export
#' @rdname recordCohortAttrition
record_cohort_attrition <- recordCohortAttrition
|
/scratch/gouwar.j/cran-all/cranData/CDMConnector/R/generateCohortSet.R
|
table_refs <- function(domain_id) {
dplyr::tribble(
~domain_id, ~table_name, ~concept_id, ~start_date, ~end_date,
"condition", "condition_occurrence", "condition_concept_id", "condition_start_date", "condition_end_date",
"drug", "drug_exposure", "drug_concept_id", "drug_exposure_start_date", "drug_exposure_end_date",
"procedure", "procedure_occurrence", "procedure_concept_id", "procedure_date", "procedure_date",
"observation", "observation", "observation_concept_id", "observation_date", "observation_date",
"measurement", "measurement", "measurement_concept_id", "measurement_date", "measurement_date",
"visit", "visit_occurrence", "visit_concept_id", "visit_start_date", "visit_end_date",
"device", "device_exposure", "device_concept_id", "device_exposure_start_date", "device_exposure_end_date"
) %>% dplyr::filter(.data$domain_id %in% .env$domain_id)
}
#' Create a new generated cohort set from a list of concept sets
#'
#' @description
#'
#' Generate a new cohort set from one or more concept sets. Each
#' concept set will result in one cohort and represent the time during which
#' the concept was observed for each subject/person. Concept sets can be
#' passed to this function as:
#' \itemize{
#' \item{A named list of numeric vectors, one vector per concept set}
#' \item{A named list of Capr concept sets}
#' }
#'
#' Clinical observation records will be looked up in the respective domain tables
#' using the vocabulary in the CDM. If a required domain table does not exist in
#' the cdm object a warning will be given.
#' Concepts that are not in the vocabulary or in the data will be silently ignored.
#' If end dates are missing or do not exist, as in the case of the procedure and
#' observation domains, the the start date will be used as the end date.
#'
#' @param cdm A cdm reference object created by `CDMConnector::cdmFromCon` or `CDMConnector::cdm_from_con`
#' @param conceptSet,concept_set A named list of numeric vectors or Capr concept sets
#' @param name The name of the new generated cohort table as a character string
#' @param limit Include "first" (default) or "all" occurrences of events in the cohort
#' \itemize{
#' \item{"first" will include only the first occurrence of any event in the concept set in the cohort.}
#' \item{"all" will include all occurrences of the events defined by the concept set in the cohort.}
#' }
#' @param requiredObservation,required_observation A numeric vector of length 2 that specifies the number of days of
#' required observation time prior to index and post index for an event to be included in the cohort.
#' @param end How should the `cohort_end_date` be defined?
#' \itemize{
#' \item{"observation_period_end_date" (default): The earliest observation_period_end_date after the event start date}
#' \item{numeric scalar: A fixed number of days from the event start date}
#' \item{"event_end_date"}: The event end date. If the event end date is not populated then the event start date will be used
#' }
#' @param subsetCohort,subset_cohort A cohort table containing the individuals for which to
#' generate cohorts for. Only individuals in the cohort table will appear in
#' the created generated cohort set.
#' @param subsetCohortId,subset_cohort_id A set of cohort IDs from the cohort table for which
#' to include. If none are provided, all cohorts in the cohort table will
#' be included.
#' @param overwrite Should the cohort table be overwritten if it already exists? TRUE (default) or FALSE.
#'
#' @return A cdm reference object with the new generated cohort set table added
#' @export
generateConceptCohortSet <- function(cdm,
conceptSet = NULL,
name,
limit = "first",
requiredObservation = c(0,0),
end = "observation_period_end_date",
subsetCohort = NULL,
subsetCohortId = NULL,
overwrite = TRUE) {
# check cdm ----
checkmate::assertClass(cdm, "cdm_reference")
con <- cdmCon(cdm)
checkmate::assertTRUE(DBI::dbIsValid(cdmCon(cdm)))
checkmate::assert_character(name, len = 1, min.chars = 1, any.missing = FALSE, pattern = "[a-zA-Z0-9_]+")
assertTables(cdm, "observation_period", empty.ok = FALSE)
assertWriteSchema(cdm)
# check name ----
checkmate::assertLogical(overwrite, len = 1, any.missing = FALSE)
checkmate::assertCharacter(name, len = 1, any.missing = FALSE, min.chars = 1, pattern = "[a-z1-9_]+")
existingTables <- listTables(con, cdmWriteSchema(cdm))
if (name %in% existingTables && !overwrite) {
rlang::abort(glue::glue("{name} already exists in the CDM write_schema and overwrite is FALSE!"))
}
# check limit ----
checkmate::assertChoice(limit, c("first", "all"))
# check requiredObservation ----
checkmate::assertIntegerish(requiredObservation, lower = 0, any.missing = FALSE, len = 2)
# check end ----
if (is.numeric(end)) {
checkmate::assertIntegerish(end, lower = 0L, len = 1)
} else if (is.character(end)) {
checkmate::assertCharacter(end, len = 1)
checkmate::assertChoice(end, choices = c("observation_period_end_date", "event_end_date"))
} else {
rlang::abort('`end` must be a natural number of days from start, "observation_period_end_date", or "event_end_date"')
}
# check ConceptSet ----
checkmate::assertList(conceptSet, min.len = 1, any.missing = FALSE, types = c("numeric", "ConceptSet"), names = "named")
checkmate::assertList(conceptSet, min.len = 1, names = "named")
CDMConnector::assert_tables(cdm, "concept")
if (methods::is(conceptSet[[1]], "ConceptSet")) {
purrr::walk(conceptSet, ~checkmate::assertClass(., "ConceptSet"))
df <- dplyr::tibble(cohort_definition_id = seq_along(conceptSet),
cohort_name = names(conceptSet),
df = purrr::map(conceptSet, caprConceptToDataframe)) %>%
tidyr::unnest(cols = df) %>%
dplyr::mutate(
"limit" = .env$limit,
"prior_observation" = .env$requiredObservation[1],
"future_observation" = .env$requiredObservation[2],
"end" = .env$end
) %>%
dplyr::select(
"cohort_definition_id", "cohort_name", "concept_id" = "conceptId",
"include_descendants" = "includeDescendants",
"is_excluded" = "isExcluded",
dplyr::any_of(c(
"limit", "prior_observation", "future_observation", "end"
))
)
} else {
# conceptSet must be a named list of integer-ish vectors
purrr::walk(conceptSet, ~checkmate::assert_integerish(., lower = 0, min.len = 1, any.missing = FALSE))
df <- dplyr::tibble(cohort_definition_id = seq_along(.env$conceptSet),
cohort_name = names(.env$conceptSet),
limit = .env$limit,
prior_observation = .env$requiredObservation[1],
future_observation = .env$requiredObservation[2],
end = .env$end,
concept_id = .env$conceptSet) %>%
tidyr::unnest(cols = "concept_id") %>%
dplyr::transmute(.data$cohort_definition_id,
.data$cohort_name,
.data$concept_id,
.data$limit,
.data$prior_observation,
.data$future_observation,
.data$end,
include_descendants = FALSE,
is_excluded = FALSE)
}
# check target cohort -----
if(!is.null(subsetCohort)){
assertTables(cdm, subsetCohort)
}
if (!is.null(subsetCohort) && !is.null(subsetCohortId)){
if (!nrow(omopgenerics::settings(cdm[[subsetCohort]]) %>% dplyr::filter(.data$cohort_definition_id %in% .env$subsetCohortId)) > 0){
cli::cli_abort("cohort_definition_id {subsetCohortId} not found in cohort set of {subsetCohort}")
}}
# upload concept data to the database ----
tempName <- paste0("tmp", as.integer(Sys.time()), "_")
DBI::dbWriteTable(cdmCon(cdm),
name = inSchema(cdmWriteSchema(cdm), tempName, dbms = dbms(con)),
value = df,
overwrite = TRUE)
if (any(df$include_descendants)) {
CDMConnector::assert_tables(cdm, "concept_ancestor")
}
# realize full list of concepts ----
concepts <- dplyr::tbl(cdmCon(cdm), inSchema(cdmWriteSchema(cdm),
tempName,
dbms = dbms(con))) %>%
dplyr::rename_all(tolower) %>%
{ if (any(df$include_descendants)) {
dplyr::filter(., .data$include_descendants) %>%
dplyr::inner_join(cdm$concept_ancestor, by = c("concept_id" = "ancestor_concept_id")) %>%
dplyr::select(
"cohort_definition_id", "cohort_name",
"concept_id" = "descendant_concept_id", "is_excluded",
dplyr::any_of(c("limit", "prior_observation", "future_observation", "end"))
) %>%
dplyr::union_all(
dplyr::tbl(
cdmCon(cdm),
inSchema(cdmWriteSchema(cdm), tempName, dbms = dbms(con))
) %>%
dplyr::select(dplyr::any_of(c(
"cohort_definition_id", "cohort_name", "concept_id", "is_excluded",
"limit", "prior_observation", "future_observation", "end"
)))
)
} else . } %>%
dplyr::filter(.data$is_excluded == FALSE) %>%
# Note that concepts that are not in the vocab will be silently ignored
dplyr::inner_join(dplyr::select(cdm$concept, "concept_id", "domain_id"), by = "concept_id") %>%
dplyr::select(
"cohort_definition_id", "cohort_name", "concept_id", "domain_id",
dplyr::any_of(c("limit", "prior_observation", "future_observation", "end"))
) %>%
dplyr::distinct() %>%
dplyr::compute(temporary = TRUE, overwrite = overwrite)
DBI::dbRemoveTable(cdmCon(cdm), name = inSchema(cdmWriteSchema(cdm), tempName, dbms = dbms(con)))
domains <- concepts %>% dplyr::distinct(.data$domain_id) %>% dplyr::pull() %>% tolower()
domains <- domains[!is.na(domains)] # remove NAs
domains <- domains[domains %in% c("condition", "drug", "procedure", "observation", "measurement", "visit", "device")]
if (length(domains) == 0) cli::cli_abort("None of the input concept IDs are in the CDM concept table!")
# check we have references to all required tables ----
missing_tables <- dplyr::setdiff(table_refs(domain_id = domains) %>% dplyr::pull("table_name"), names(cdm))
if (length(missing_tables) > 0) {
s <- ifelse(length(missing_tables) > 1, "s", "")
is <- ifelse(length(missing_tables) > 1, "are", "is")
missing_tables <- paste(missing_tables, collapse = ", ")
cli::cli_warn("Concept set includes concepts from the {missing_tables} table{s} which {is} not found in the cdm reference and will be skipped.")
domains <- table_refs(domain_id = domains) %>%
dplyr::filter(!(.data$table_name %in% missing_tables)) %>%
dplyr::pull("domain_id")
}
# rowbind results from clinical data tables ----
get_domain <- function(domain, cdm, concepts) {
df <- table_refs(domain_id = domain)
if (isFALSE(df$table_name %in% names(cdm))) {
return(NULL)
}
by <- rlang::set_names("concept_id", df[["concept_id"]])
cdm[[df$table_name]] %>%
dplyr::inner_join(concepts, by = local(by)) %>%
dplyr::transmute(.data$cohort_definition_id,
subject_id = .data$person_id,
cohort_start_date = !!rlang::parse_expr(df$start_date),
cohort_end_date = dplyr::coalesce(!!rlang::parse_expr(df$end_date),
!!dateadd(df$start_date, 1)))
}
if (length(domains) == 0) {
cohort <- NULL
} else {
cohort <- purrr::map(domains, ~get_domain(., cdm = cdm, concepts = concepts)) %>%
purrr::reduce(dplyr::union_all)
}
if (is.null(cohort)) {
# no domains included. Create empty cohort.
cohort <- dplyr::tibble(
cohort_definition_id = integer(),
subject_id = integer(),
cohort_start_date = as.Date(x = integer(0), origin = "1970-01-01"),
cohort_end_date = as.Date(x = integer(0), origin = "1970-01-01")
)
cdm <- omopgenerics::insertTable(
cdm = cdm, name = name, table = cohort, overwrite = overwrite
)
cohortRef <- cdm[[name]]
} else {
# drop any outside of an observation period
obs_period <- cdm[["observation_period"]] %>%
dplyr::select("subject_id" = "person_id",
"observation_period_start_date",
"observation_period_end_date")
# subset to target cohort
if(!is.null(subsetCohort)){
if(is.null(subsetCohortId)){
obs_period <- obs_period %>%
dplyr::inner_join(cdm[[subsetCohort]] %>%
dplyr::select("subject_id") %>%
dplyr::distinct(),
by = "subject_id")
} else {
obs_period <- obs_period %>%
dplyr::inner_join(cdm[[subsetCohort]] %>%
dplyr::filter(.data$cohort_definition_id %in%
.env$subsetCohortId) %>%
dplyr::select("subject_id") %>%
dplyr::distinct(),
by = "subject_id")
}
}
# TODO remove this variable since it is confusing
cohort_start_date <- "cohort_start_date"
cohortRef <- cohort %>%
dplyr::inner_join(obs_period, by = "subject_id") %>%
# TODO fix dplyr::between sql translation, also pmin.
dplyr::filter(.data$observation_period_start_date <= .data$cohort_start_date &
.data$cohort_start_date <= .data$observation_period_end_date) %>%
{if (requiredObservation[1] > 0) dplyr::filter(., !!dateadd("observation_period_start_date",
requiredObservation[1]) <=.data$cohort_start_date) else .} %>%
{if (requiredObservation[2] > 0) dplyr::filter(., !!dateadd("cohort_start_date",
requiredObservation[2]) <= .data$observation_period_end_date) else .} %>%
{if (end == "observation_period_end_date") dplyr::mutate(., cohort_end_date = .data$observation_period_end_date) else .} %>%
{if (is.numeric(end)) dplyr::mutate(., cohort_end_date = !!dateadd("cohort_start_date", end)) else .} %>%
dplyr::mutate(cohort_end_date = dplyr::case_when(
.data$cohort_end_date > .data$observation_period_end_date ~ .data$observation_period_end_date,
TRUE ~ .data$cohort_end_date)) %>%
dplyr::select("cohort_definition_id", "subject_id", "cohort_start_date", "cohort_end_date") %>%
# TODO order_by = .data$cohort_start_date
{if (limit == "first") dplyr::slice_min(., n = 1, order_by = cohort_start_date, by = c("cohort_definition_id", "subject_id")) else .} %>%
cohort_collapse() %>%
dplyr::mutate(cohort_start_date = !!asDate(.data$cohort_start_date),
cohort_end_date = !!asDate(.data$cohort_end_date)) %>%
dplyr::compute(name = name, temporary = FALSE, overwrite = overwrite)
}
cohortSetRef <- concepts %>%
dplyr::select(dplyr::any_of(c(
"cohort_definition_id", "cohort_name", "limit", "prior_observation",
"future_observation", "end"
))) %>%
dplyr::distinct() %>%
dplyr::collect()
cohortCountRef <- cohortRef %>%
dplyr::group_by(.data$cohort_definition_id) %>%
dplyr::summarise(
number_records = dplyr::n(),
number_subjects = dplyr::n_distinct(.data$subject_id)) %>%
dplyr::collect()
cohortAttritionRef <- cohortSetRef %>%
dplyr::select("cohort_definition_id") %>%
dplyr::distinct() %>%
dplyr::left_join(cohortCountRef, by = "cohort_definition_id") %>%
dplyr::mutate(
number_records = dplyr::coalesce(.data$number_records, 0L),
number_subjects = dplyr::coalesce(.data$number_subjects, 0L),
reason_id = 1,
reason = "Initial qualifying events",
excluded_records = 0,
excluded_subjects = 0)
cdm[[name]] <- omopgenerics::newCohortTable(
table = cohortRef,
cohortSetRef = cohortSetRef,
cohortAttritionRef = cohortAttritionRef
)
return(cdm)
}
#' @rdname generateConceptCohortSet
#' @export
generate_concept_cohort_set <- function(cdm,
concept_set = NULL,
name = "cohort",
limit = "first",
required_observation = c(0,0),
end = "observation_period_end_date",
subset_cohort = NULL,
subset_cohort_id = NULL,
overwrite = TRUE) {
generateConceptCohortSet(cdm = cdm,
conceptSet = concept_set,
name = name,
limit = limit,
requiredObservation = required_observation,
end = end,
subsetCohort = subset_cohort,
subsetCohortId = subset_cohort_id,
overwrite = overwrite)
}
|
/scratch/gouwar.j/cran-all/cranData/CDMConnector/R/generateConceptCohortSet.R
|
#' @importFrom omopgenerics cohortCount
#' @export
omopgenerics::cohortCount
#' @importFrom omopgenerics settings
#' @export
omopgenerics::settings
#' @importFrom omopgenerics attrition
#' @export
omopgenerics::attrition
#' @importFrom omopgenerics newCohortTable
#' @export
omopgenerics::newCohortTable
#' @importFrom omopgenerics insertTable
#' @export
omopgenerics::insertTable
#' @importFrom omopgenerics dropTable
#' @export
omopgenerics::dropTable
|
/scratch/gouwar.j/cran-all/cranData/CDMConnector/R/reexports-omopgenerics.R
|
#' Quantile calculation using dbplyr
#'
#' @description
#' This function provides DBMS independent syntax for quantiles estimation.
#' Can be used by itself or in combination with `mutate()`
#' when calculating other aggregate metrics (min, max, mean).
#'
#' `summarise_quantile()`, `summarize_quantile()`, `summariseQuantile()` and `summarizeQuantile()` are synonyms.
#'
#' @details
#' Implemented quantiles estimation algorithm returns values analogous to
#' `quantile{stats}` with argument `type = 1`.
#' See discussion in Hyndman and Fan (1996).
#' Results differ from `PERCENTILE_CONT` natively implemented in various DBMS,
#' where returned values are equal to `quantile{stats}` with default argument `type = 7`
#'
#'
#' @param .data lazy data frame backed by a database query.
#' @param x column name whose sample quantiles are wanted.
#' @param probs numeric vector of probabilities with values in \[0,1\].
#' @param name_suffix,nameSuffix character; is appended to numerical quantile value as a column name part.
#' @return
#' An object of the same type as '.data'
#'
#' @importFrom rlang %||%
#' @export
#'
#' @examples
#' \dontrun{
#' con <- DBI::dbConnect(duckdb::duckdb())
#' mtcars_tbl <- dplyr::copy_to(con, mtcars, name = "tmp", overwrite = TRUE, temporary = TRUE)
#'
#' df <- mtcars_tbl %>%
#' dplyr::group_by(cyl) %>%
#' dplyr::mutate(mean = mean(mpg, na.rm = TRUE)) %>%
#' summarise_quantile(mpg, probs = c(0, 0.2, 0.4, 0.6, 0.8, 1),
#' name_suffix = "quant") %>%
#' dplyr::collect()
#'
#' DBI::dbDisconnect(con, shutdown = TRUE)
#' }
summarise_quantile <- function(.data, x = NULL, probs, name_suffix = "value") {
checkmate::assertClass(.data, "tbl_sql")
checkmate::assert_double(probs, min.len = 1, lower = 0, upper = 1)
checkmate::assert_character(name_suffix, null.ok = TRUE)
selection_context <- .data$lazy_query$select_operation
if (!is.null(selection_context) && selection_context == 'summarise') {
rlang::abort("Cannot estimate quantiles in summarise context.
Try using `mutate()` function instead of `summarise()`")
}
vars_context <- NULL
x_context <- NULL
x_arg <- rlang::enexpr(x)
if (!is.null(selection_context)) {
vars_context <- .data$lazy_query$select %>%
dplyr::filter(unlist(purrr::map(.data$expr, rlang::is_quosure)))
if (nrow(vars_context) > 0) {
vars_context <- vars_context %>%
# dplyr::mutate(x_var = purrr::map(purrr::map(.data$expr, rlang::get_expr), ~ if (length(.x) >= 2) {.x[[2]]} else {NULL}))
dplyr::mutate(x_var = purrr::map(.data$expr, ~if(length(rlang::get_expr(.x)) >= 2) {rlang::get_expr(.x)[[2]]} else {NULL}))
x_context <- unique(vars_context$x_var)[[1]]
}
}
if (!is.null(x_context) && !is.null(x_arg) && x_context != x_arg) {
msg <- paste0("Confilicting quantile variables: `", x_context, "` (from context) and `", x_arg, "` (passed argument)")
rlang::abort(msg)
}
if (is.null(x_context) & is.null(x_arg)) {
msg = "Quantile variable is not specified"
rlang::abort(msg)
}
x <- x_context %||% x_arg
group_by_vars <- .data$lazy_query$group_vars
group_1 <- rlang::syms(c(group_by_vars, x))
funs = list()
if (!is.null(selection_context)) {
funs <- purrr::map(vars_context$name, ~ rlang::expr(max(!!rlang::sym(.x), na.rm = TRUE)))
names(funs) <- vars_context$name
}
group_2 <- rlang::syms(c(group_by_vars, names(funs)))
probs = sort(unique(probs))
quant_expr <- purrr::map(probs, ~ rlang::expr(min(ifelse(accumulated >= !!.x * total, !!x, NA), na.rm = TRUE)))
names(quant_expr) <- paste0('p', as.character(probs * 100), '_', name_suffix)
query <- rlang::expr(
.data %>%
dplyr::group_by(!!!group_1) %>%
dplyr::summarise(..n = dplyr::n(), !!!funs, .groups = "drop") %>%
dplyr::group_by(!!!group_2) %>%
dbplyr::window_order(!!x) %>%
dplyr::mutate(accumulated = cumsum(.data$..n),
total = sum(.data$..n, na.rm = TRUE)) %>%
dplyr::summarize(!!!quant_expr, .groups = "drop")
)
eval(query)
}
#' @rdname summarise_quantile
#' @export
summarize_quantile <- summarise_quantile
#' @rdname summarise_quantile
#' @export
summariseQuantile <- function(.data,
x = NULL,
probs,
nameSuffix = "value") {
x <- rlang::enexpr(x)
summarise_quantile(.data = .data,
x = !!x,
probs = probs,
name_suffix = nameSuffix)
}
#' @rdname summarise_quantile
#' @export
summarizeQuantile <- summariseQuantile
|
/scratch/gouwar.j/cran-all/cranData/CDMConnector/R/summariseQuantile.R
|
#' Pipe operator
#'
#' @name %>%
#' @rdname pipe
#' @keywords internal
#' @export
#' @importFrom dplyr %>%
#' @usage lhs \%>\% rhs
#' @param lhs A value or the magrittr placeholder.
#' @param rhs A function call using the magrittr semantics.
#' @return The result of calling `rhs(lhs)`.
NULL
# Workaround for Oracle since ROracle does not define dbIsValid
.dbIsValid <- function(dbObj, ...) {
if (methods::is(dbObj, "OraConnection")) {
is.character(DBI::dbListTables(dbObj))
} else {
DBI::dbIsValid(dbObj, ...)
}
}
#' Helper for working with compound schemas
#'
#' This is similar to dbplyr::in_schema but has been tested across multiple
#' database platforms. It only exists to work around some of the limitations
#' of dbplyr::in_schema.
#'
#' @param schema A schema name as a character string
#' @param table A table name as character string
#' @param dbms The name of the database management system as returned
#' by `dbms(connection)`
#'
#' @return A DBI::Id that represents a qualified table and schema
#' @export
inSchema <- function(schema, table, dbms = NULL) {
checkmate::assertCharacter(schema, min.len = 1, max.len = 3, null.ok = TRUE)
checkmate::assertCharacter(table, len = 1)
checkmate::assertCharacter(dbms, len = 1, null.ok = TRUE)
if (is.null(schema)) {
# return temp table name
if (dbms == "sql server") {
return(paste0("#", table))
}
return(table)
}
if ("prefix" %in% names(schema)) {
checkmate::assertCharacter(schema['prefix'], len = 1, min.chars = 1, pattern = "[a-zA-Z1-9_]+")
# match the case of table name
if (toupper(table) == table) {
table <- paste0(toupper(schema['prefix']), table)
} else {
table <- paste0(schema['prefix'], table)
}
schema <- schema[!names(schema) %in% "prefix"]
checkmate::assertCharacter(schema, min.len = 1, max.len = 2)
}
if (isFALSE(dbms %in% c("snowflake", "sql server"))) {
# only a few dbms support three part names
checkmate::assertCharacter(schema, len = 1)
}
schema <- unname(schema)
if (isTRUE(dbms %in% c("bigquery"))) { #TODO bigrquery needs to fix this
checkmate::assertCharacter(schema, len = 1)
out <- paste(c(schema, table), collapse = ".")
} else {
out <- switch(length(schema),
DBI::Id(schema = schema, table = table),
DBI::Id(catalog = schema[1], schema = schema[2], table = table))
}
return(out)
}
#' @export
#' @rdname inSchema
in_schema <- inSchema
#' List tables in a schema
#'
#' DBI::dbListTables can be used to get all tables in a database but not always in a
#' specific schema. `listTables` will list tables in a schema.
#'
#' @param con A DBI connection to a database
#' @param schema The name of a schema in a database. If NULL, returns DBI::dbListTables(con).
#'
#' @return A character vector of table names
#' @export
#' @importFrom rlang .data
#'
#' @examples
#' \dontrun{
#' con <- DBI::dbConnect(duckdb::duckdb(), dbdir = eunomia_dir())
#' listTables(con, schema = "main")
#' }
list_tables <- function(con, schema = NULL) {
if (methods::is(con, "Pool")) {
if (!rlang::is_installed("pool")) {
rlang::abort("Please install the pool package.")
}
con <- pool::localCheckout(con)
}
checkmate::assertTRUE(DBI::dbIsValid(con))
if (methods::is(schema, "Id")) {
schema <- schema@name
}
if ("prefix" %in% names(schema)) {
prefix <- schema["prefix"]
checkmate::assert_character(prefix, min.chars = 1, len = 1)
schema <- schema[names(schema) != "prefix"]
process_prefix <- function(x) {
stringr::str_subset(x, paste0("^", prefix)) %>% stringr::str_remove(paste0("^", prefix))
}
} else {
process_prefix <- function(x) {x}
}
checkmate::assert_character(schema, null.ok = TRUE, min.len = 1, max.len = 2, min.chars = 1)
if (is.null(schema)) {
if (dbms(con) == "sql server") {
# return temp tables
# tempdb.sys.objects
temp_tables <- DBI::dbGetQuery(con, "select * from tempdb..sysobjects")[[1]] %>%
stringr::str_remove("_________________________.*$") %>%
stringr::str_remove("^#+")
return(temp_tables)
} else if (dbms(con) == "snowflake") {
# return all tables including temp tables
return(DBI::dbGetQuery(con, "show terse tables;")$name)
} else {
return(DBI::dbListTables(con))
}
}
withr::local_options(list(arrow.pull_as_vector = TRUE))
if (methods::is(con, "DatabaseConnectorJdbcConnection")) {
out <- DBI::dbListTables(con, databaseSchema = paste0(schema, collapse = "."))
return(process_prefix(out))
}
if (methods::is(con, "PqConnection") || methods::is(con, "RedshiftConnection")) {
sql <- glue::glue_sql("select table_name from information_schema.tables where table_schema = {unname(schema[1])};", .con = con)
out <- DBI::dbGetQuery(con, sql) %>% dplyr::pull(.data$table_name)
return(process_prefix(out))
}
if (methods::is(con, "duckdb_connection")) {
sql <- glue::glue_sql("select table_name from information_schema.tables where table_schema = {schema[[1]]};", .con = con)
out <- DBI::dbGetQuery(con, sql) %>% dplyr::pull(.data$table_name)
return(process_prefix(out))
}
if (methods::is(con, "Snowflake")) {
if (length(schema) == 2) {
sql <- glue::glue("select table_name from {schema[1]}.information_schema.tables where table_schema = '{schema[2]}';")
} else {
sql <- glue::glue("select table_name from information_schema.tables where table_schema = '{schema[1]}';")
}
out <- DBI::dbGetQuery(con, sql) %>% dplyr::pull(1)
return(process_prefix(out))
}
if (methods::is(con, "Spark SQL")) {
# spark odbc connection
sql <- paste("SHOW TABLES", if (!is.null(schema)) paste("IN", schema[[1]]))
out <- DBI::dbGetQuery(con, sql) %>%
dplyr::filter(.data$isTemporary == FALSE) %>%
dplyr::pull(.data$tableName)
return(process_prefix(out))
}
if (methods::is(con, "OdbcConnection")) {
if (length(schema) == 1) {
out <- DBI::dbListTables(con, schema_name = schema)
} else if (length(schema) == 2) {
out <- DBI::dbListTables(con, catalog_name = schema[[1]], schema_name = schema[[2]])
} else rlang::abort("schema missing!")
return(process_prefix(out))
}
if (methods::is(con, "OraConnection")) {
checkmate::assert_character(schema, null.ok = TRUE, len = 1, min.chars = 1)
out <- DBI::dbListTables(con, schema = schema)
return(process_prefix(out))
}
if (methods::is(con, "BigQueryConnection")) {
checkmate::assert_character(schema, null.ok = TRUE, len = 1, min.chars = 1)
out <- DBI::dbGetQuery(con,
glue::glue("SELECT table_name
FROM `{schema}`.INFORMATION_SCHEMA.TABLES
WHERE table_schema = '{schema}'"))[[1]]
return(process_prefix(out))
}
rlang::abort(paste(paste(class(con), collapse = ", "), "connection not supported"))
}
#' @rdname list_tables
#' @export
listTables <- list_tables
# To silence warning <BigQueryConnection> uses an old dbplyr interface
# https://github.com/r-dbi/bigrquery/issues/508
#' @importFrom dbplyr dbplyr_edition
#' @export
dbplyr_edition.BigQueryConnection<- function(con) 2L
# Create the cdm tables in a database
execute_ddl <- function(con, cdm_schema, cdm_version = "5.3", dbms = "duckdb", tables = tbl_group("all"), prefix = "") {
specs <- spec_cdm_field[[cdm_version]] %>%
dplyr::mutate(cdmDatatype = dplyr::if_else(.data$cdmDatatype == "varchar(max)", "varchar(2000)", .data$cdmDatatype)) %>%
dplyr::mutate(cdmFieldName = dplyr::if_else(.data$cdmFieldName == '"offset"', "offset", .data$cdmFieldName)) %>%
dplyr::mutate(cdmDatatype = dplyr::case_when(
dbms(con) == "postgresql" & .data$cdmDatatype == "datetime" ~ "timestamp",
dbms(con) == "redshift" & .data$cdmDatatype == "datetime" ~ "timestamp",
TRUE ~ cdmDatatype)) %>%
tidyr::nest(col = -"cdmTableName") %>%
dplyr::mutate(col = purrr::map(col, ~setNames(as.character(.$cdmDatatype), .$cdmFieldName)))
for (i in cli::cli_progress_along(tables)) {
fields <- specs %>%
dplyr::filter(.data$cdmTableName == tables[i]) %>%
dplyr::pull(.data$col) %>%
unlist()
DBI::dbCreateTable(con, inSchema(cdm_schema, paste0(prefix, tables[i]), dbms = dbms(con)), fields = fields)
}
}
# get a unique prefix based on current time. internal function.
unique_prefix <- function() {
as.integer((as.numeric(Sys.time())*10) %% 1e6)
}
|
/scratch/gouwar.j/cran-all/cranData/CDMConnector/R/utils.R
|
#' Validation report for a CDM
#'
#' Print a short validation report for a cdm object. The validation includes
#' checking that column names are correct and that no tables are empty. A short
#' report is printed to the console. This function is meant for interactive use.
#'
#'
#' @param cdm A cdm reference object.
#'
#' @return Invisibly returns the cdm input
#' @export
#'
#' @examples
#' \dontrun{
#' con <- DBI::dbConnect(duckdb::duckdb(), eunomia_dir())
#' cdm <- cdm_from_con(con, cdm_schema = "main")
#' validate_cdm(cdm)
#' DBI::dbDisconnect(con)
#' }
validate_cdm <- function(cdm) {
checkmate::assert_class(cdm, "cdm_reference")
if (is.null(cdmCon(cdm))) {
rlang::abort("validate_cdm is not implement for local cdms")
}
cli::cat_rule(
glue::glue("CDM v{version(cdm)} validation (checking {length(cdm)} tables)")
)
validate_cdm_colnames(cdm)
validate_cdm_rowcounts(cdm)
}
#' @export
#' @rdname validate_cdm
validateCdm <- validate_cdm
validate_cdm_colnames <- function(cdm) {
# local option needed for pull with arrow
withr::local_options(list(arrow.pull_as_vector = TRUE))
any_dif <- FALSE
ver <- attr(cdm, "cdm_version")
for (nm in names(cdm)) {
# spec_cdm_field is a a global internal package dataframe created in
# the file extras/package_maintenance.R
expected_columns <- spec_cdm_field[[ver]] %>%
dplyr::filter(.data$cdmTableName == nm) %>%
dplyr::pull(.data$cdmFieldName)
actual_columns <- cdm[[nm]] %>% head(1) %>% dplyr::collect() %>% colnames()
dif <- waldo::compare(expected_columns,
actual_columns,
x_arg = glue::glue("{nm} table expected columns"),
y_arg = glue::glue("{nm} table actual_colums"),
ignore_attr = TRUE)
if (length(dif) > 0) {
print(dif, n = 100)
any_dif <- TRUE
}
}
if (!any_dif) {
cli::cat_bullet("cdm field names are correct",
bullet = "tick",
bullet_col = "green")
}
}
validate_cdm_rowcounts <- function(cdm) {
# arrow.pull_as_vector option needed for dplyr::pull with arrow
withr::local_options(list(arrow.pull_as_vector = TRUE))
nm <- names(cdm)
rowcounts <- purrr::map_dbl(nm, function(.) {
dplyr::tally(cdm[[.]], name = "n") %>%
dplyr::pull(.data$n)
}) %>%
rlang::set_names(nm)
empty_tables <- rowcounts[rowcounts == 0]
if (length(empty_tables) > 0) {
table_text <- cli::col_grey(paste(names(empty_tables), collapse = ", "))
s <- ifelse(length(empty_tables) > 1, "s", "")
cli::cat_bullet(
glue::glue("{length(empty_tables)} empty CDM table{s}: {table_text}"),
bullet_col = "red")
} else {
cli::cat_bullet("all row counts > 0", bullet = "tick", bullet_col = "green")
}
invisible(cdm)
}
#' Assert that tables exist in a cdm object
#'
#' A cdm object is a list of references to a subset of tables in the
#' OMOP Common Data Model.
#' If you write a function that accepts a cdm object as a parameter
#' `assert_tables`/`assertTables` will help you check that the tables you need
#' are in the cdm object, have the correct columns/fields,
#' and (optionally) are not empty.
#'
#' @param cdm A cdm object
#' @param tables A character vector of table names to check.
#' @param empty.ok Should an empty table (0 rows) be considered an error?
#' TRUE or FALSE (default)
#' @param add An optional AssertCollection created by
#' `checkmate::makeAssertCollection()` that errors should be added to.
#'
#' @return Invisibly returns the cdm object
#' @importFrom rlang .env .data
#' @export
#'
#' @examples
#' \dontrun{
#' # Use assertTables inside a function to check that tables exist
#' countDrugsByGender <- function(cdm) {
#' assertTables(cdm, tables = c("person", "drug_era"), empty.ok = FALSE)
#'
#' cdm$person %>%
#' dplyr::inner_join(cdm$drug_era, by = "person_id") %>%
#' dplyr::count(.data$gender_concept_id, .data$drug_concept_id) %>%
#' dplyr::collect()
#' }
#'
#' library(CDMConnector)
#' con <- DBI::dbConnect(duckdb::duckdb(), dbdir = eunomia_dir())
#' cdm <- cdm_from_con(con)
#'
#' countDrugsByGender(cdm)
#'
#' DBI::dbDisconnect(con, shutdown = TRUE)
#'
#' }
assert_tables <- function(cdm, tables, empty.ok = FALSE, add = NULL) {
checkmate::assertClass(add, "AssertCollection", null.ok = TRUE)
checkmate::assertLogical(empty.ok, len = 1, null.ok = FALSE)
checkmate::assertCharacter(tables,
min.len = 1,
min.chars = 1,
null.ok = FALSE)
checkmate::assertClass(cdm, "cdm_reference")
withr::local_options(list(arrow.pull_as_vector = TRUE))
ver <- attr(cdm, "cdm_version")
missingTables <- tables[!(tables %in% names(cdm))]
existingTables <- tables[tables %in% names(cdm)]
if (length(missingTables) > 0) {
s <- ifelse(length(missingTables) > 1, "s", "")
misstbls <- paste(missingTables, collapse = ', ' )
msg <- glue::glue("- {misstbls} table{s} not found in cdm object")
if (is.null(add)) rlang::abort(msg) else add$push(msg)
}
# checking of column names will not throw an error if column names exist but
# are in the wrong order
for (nm in existingTables) {
# spec_cdm_field is global internal package data (list of dataframes)
# created in extras/package_maintenance.R
expectedColumns <- spec_cdm_field[[ver]] %>%
dplyr::filter(.data$cdmTableName == .env$nm) %>%
dplyr::pull(.data$cdmFieldName)
actualColumns <- cdm[[nm]] %>% head(1) %>% dplyr::collect() %>% colnames()
missingColumns <- dplyr::setdiff(expectedColumns, actualColumns)
if (length(missingColumns) > 0) {
s <- ifelse(length(missingColumns) > 1, "s", "")
misscols <- paste(missingColumns, collapse = ", ")
msg <- glue::glue("- {misscols} column{s} not found in cdm table {nm}")
if (is.null(add)) rlang::abort(msg) else add$push(msg)
}
}
if (!empty.ok) {
rowcounts <- purrr::map_dbl(existingTables, function(.) {
dplyr::tally(cdm[[.]], name = "n") %>%
dplyr::pull(.data$n)
}) %>%
rlang::set_names(existingTables)
empty_tables <- rowcounts[rowcounts == 0]
if (length(empty_tables) > 0) {
s <- ifelse(length(empty_tables) > 1, "s are", " is")
emptytbls <- paste(names(empty_tables), collapse = ", ")
msg <- glue::glue("- {emptytbls} cdm table{s} empty")
if (is.null(add)) rlang::abort(msg) else add$push(msg)
}
}
invisible(cdm)
}
#' @export
#' @rdname assert_tables
assertTables <- assert_tables
#' Assert that cdm has a writable schema
#'
#' A cdm object can optionally contain a single schema in a database with
#' write access. assert_write_schema checks that the cdm contains the
#' "write_schema" attribute and tests that local dataframes can be written
#' to tables in this schema.
#'
#' @param cdm A cdm object
#' @param add An optional AssertCollection created by
#' `checkmate::makeAssertCollection()` that errors should be added to.
#'
#' @return Invisibly returns the cdm object
#' @export
assert_write_schema <- function(cdm, add = NULL) {
checkmate::assert_class(cdm, "cdm_reference")
if (is.null(cdmCon(cdm))) {
rlang::abort("Local cdm objects do not have a write schema.")
}
write_schema <- cdmWriteSchema(cdm)
checkmate::assert_character(write_schema, min.len = 1, max.len = 3, min.chars = 1, add = add)
verify_write_access(cdmCon(cdm),
write_schema = write_schema,
add = add)
invisible(cdm)
}
#' @rdname assert_write_schema
#' @export
assertWriteSchema <- assert_write_schema
|
/scratch/gouwar.j/cran-all/cranData/CDMConnector/R/validate.R
|
#' Union all cohorts in a single cohort table
#'
#' @param x A tbl reference to a cohort table
#' @param cohort_definition_id A number to use for the new cohort_definition_id
#'
#' `r lifecycle::badge("superseded")`
#'
#' @return A lazy query that when executed will resolve to a new cohort table with
#' one cohort_definition_id resulting from the union of all cohorts in the original
#' cohort table
#' @export
union_cohorts <- function(x, cohort_definition_id = 1L) {
lifecycle::deprecate_warn("1.1.0", "union_cohorts()", "cohort_union()")
checkmate::assert_class(x, "tbl")
checkmate::assert_integerish(cohort_definition_id, len = 1, lower = 0)
checkmate::assert_subset(c("cohort_definition_id", "subject_id", "cohort_start_date", "cohort_end_date"), colnames(x))
cohort_definition_id <- as.integer(cohort_definition_id)
event_date <- event_type <- NA # to remove r check error initialize these variables to NA
x %>%
dplyr::select("subject_id", event_date = "cohort_start_date") %>%
dplyr::group_by(.data$subject_id) %>%
dplyr::mutate(event_type = -1L, start_ordinal = dplyr::row_number(.data$event_date)) %>%
dplyr::union_all(dplyr::transmute(x, .data$subject_id, event_date = .data$cohort_end_date, event_type = 1L, start_ordinal = NULL)) %>%
{if ("tbl_sql" %in% class(.)) dbplyr::window_order(., event_date, event_type) else dplyr::arrange(., .data$event_date, .data$event_type)} %>%
dplyr::mutate(start_ordinal = cummax(.data$start_ordinal), overall_ordinal = dplyr::row_number()) %>%
dplyr::filter((2 * .data$start_ordinal) == .data$overall_ordinal) %>%
dplyr::transmute(.data$subject_id, end_date = .data$event_date) %>%
dplyr::distinct() %>%
dplyr::inner_join(x, by = "subject_id") %>%
dplyr::filter(.data$end_date >= .data$cohort_start_date) %>%
dplyr::group_by(.data$subject_id, .data$cohort_start_date) %>%
dplyr::summarise(cohort_end_date = min(.data$end_date, na.rm = TRUE), .groups = "drop") %>%
dplyr::group_by(.data$subject_id, .data$cohort_end_date) %>%
dplyr::summarise(cohort_start_date = min(.data$cohort_start_date, na.rm = TRUE), .groups = "drop") %>%
dplyr::mutate(cohort_definition_id = .env$cohort_definition_id) %>%
dplyr::select("cohort_definition_id", "subject_id", "cohort_start_date", "cohort_end_date")
}
#' Intersect all cohorts in a single cohort table
#'
#' @param x A tbl reference to a cohort table
#' @param cohort_definition_id A number to use for the new cohort_definition_id
#'
#' `r lifecycle::badge("superseded")`
#'
#' @return A lazy query that when executed will resolve to a new cohort table with
#' one cohort_definition_id resulting from the intersection of all cohorts in the original
#' cohort table
#' @export
intersect_cohorts <- function(x, cohort_definition_id = 1L) {
lifecycle::deprecate_warn("1.1.0", "intersect_cohorts()", "cohort_intersect()")
checkmate::assert_class(x, "tbl")
checkmate::assert_integerish(cohort_definition_id, len = 1, lower = 0)
checkmate::assert_subset(c("cohort_definition_id", "subject_id", "cohort_start_date", "cohort_end_date"), colnames(x))
cohort_definition_id <- as.integer(cohort_definition_id)
# get the total number of cohorts we are intersecting together
n_cohorts_to_intersect <- x %>%
dplyr::distinct(.data$cohort_definition_id) %>%
dplyr::tally(name = "n") %>%
dplyr::pull("n")
checkmate::checkIntegerish(n_cohorts_to_intersect, len = 1)
# create every possible interval
candidate_intervals <- x %>%
dplyr::select("subject_id", cohort_date = "cohort_start_date") %>%
dplyr::union_all(dplyr::select(x, "subject_id", cohort_date = "cohort_end_date")) %>%
dplyr::group_by(.data$subject_id) %>%
dplyr::mutate(cohort_date_seq = dplyr::row_number(.data$cohort_date)) %>%
dplyr::mutate(candidate_start_date = .data$cohort_date,
candidate_end_date = dplyr::lead(.data$cohort_date, order_by = c("cohort_date", "cohort_date_seq")))
# get intervals that are contained within all of the cohorts
x %>%
dplyr::inner_join(candidate_intervals, by = "subject_id") %>%
dplyr::filter(.data$candidate_start_date >= .data$cohort_start_date,
.data$candidate_end_date <= .data$cohort_end_date) %>%
dplyr::distinct(.data$cohort_definition_id,
.data$subject_id,
.data$candidate_start_date,
.data$candidate_end_date) %>%
dplyr::group_by(.data$subject_id,
.data$candidate_start_date,
.data$candidate_end_date) %>%
dplyr::summarise(n_cohorts_interval_is_inside = dplyr::n(), .groups = "drop") %>%
# only keep intervals that are inside all cohorts we want to intersect (i.e. all cohorts in the input cohort table)
dplyr::filter(.data$n_cohorts_interval_is_inside == .env$n_cohorts_to_intersect) %>%
dplyr::mutate(cohort_definition_id = .env$cohort_definition_id) %>%
dplyr::select("cohort_definition_id",
"subject_id",
cohort_start_date = "candidate_start_date",
cohort_end_date = "candidate_end_date") %>%
union_cohorts(cohort_definition_id = cohort_definition_id)
}
#' @rdname intersect_cohorts
#' @export
intersectCohorts <- intersect_cohorts
#' @rdname union_cohorts
#' @export
unionCohorts <- union_cohorts
|
/scratch/gouwar.j/cran-all/cranData/CDMConnector/R/zzz-deprecated.R
|
## ----setup, include = FALSE---------------------------------------------------
library(CDMConnector)
if (Sys.getenv("EUNOMIA_DATA_FOLDER") == "") Sys.setenv("EUNOMIA_DATA_FOLDER" = tempdir())
if (!dir.exists(Sys.getenv("EUNOMIA_DATA_FOLDER"))) dir.create(Sys.getenv("EUNOMIA_DATA_FOLDER"))
if (!eunomia_is_available()) downloadEunomiaData()
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
## -----------------------------------------------------------------------------
library(CDMConnector)
example_datasets()
con <- DBI::dbConnect(duckdb::duckdb(), eunomia_dir("GiBleed"))
DBI::dbListTables(con)
## -----------------------------------------------------------------------------
cdm <- cdm_from_con(con, cdm_name = "eunomia", cdm_schema = "main", write_schema = "main")
cdm
cdm$observation_period
## -----------------------------------------------------------------------------
cdm$person %>%
dplyr::glimpse()
## ----warning=FALSE------------------------------------------------------------
library(dplyr)
library(ggplot2)
cdm$person %>%
group_by(year_of_birth, gender_concept_id) %>%
summarize(n = n(), .groups = "drop") %>%
collect() %>%
mutate(sex = case_when(
gender_concept_id == 8532 ~ "Female",
gender_concept_id == 8507 ~ "Male"
)) %>%
ggplot(aes(y = n, x = year_of_birth, fill = sex)) +
geom_histogram(stat = "identity", position = "dodge") +
labs(x = "Year of birth",
y = "Person count",
title = "Age Distribution",
subtitle = cdm_name(cdm),
fill = NULL) +
theme_bw()
## ----warning=FALSE------------------------------------------------------------
cdm$condition_occurrence %>%
count(condition_concept_id, sort = T) %>%
left_join(cdm$concept, by = c("condition_concept_id" = "concept_id")) %>%
collect() %>%
select("condition_concept_id", "concept_name", "n")
## ----warning=FALSE------------------------------------------------------------
cdm$condition_occurrence %>%
filter(condition_concept_id == 4112343) %>%
distinct(person_id) %>%
inner_join(cdm$drug_exposure, by = "person_id") %>%
count(drug_concept_id, sort = TRUE) %>%
left_join(cdm$concept, by = c("drug_concept_id" = "concept_id")) %>%
collect() %>%
select("concept_name", "n")
## ----warning=FALSE------------------------------------------------------------
cdm$condition_occurrence %>%
filter(condition_concept_id == 4112343) %>%
distinct(person_id) %>%
inner_join(cdm$drug_exposure, by = "person_id") %>%
count(drug_concept_id, sort = TRUE) %>%
left_join(cdm$concept, by = c("drug_concept_id" = "concept_id")) %>%
show_query()
## -----------------------------------------------------------------------------
DBI::dbExecute(con, "create schema scratch;")
cdm <- cdm_from_con(con, cdm_name = "eunomia", cdm_schema = "main", write_schema = "scratch")
## ----warning=FALSE------------------------------------------------------------
drugs <- cdm$condition_occurrence %>%
filter(condition_concept_id == 4112343) %>%
distinct(person_id) %>%
inner_join(cdm$drug_exposure, by = "person_id") %>%
count(drug_concept_id, sort = TRUE) %>%
left_join(cdm$concept, by = c("drug_concept_id" = "concept_id")) %>%
compute(name = "test", temporary = FALSE, overwrite = TRUE)
drugs %>% show_query()
drugs
## -----------------------------------------------------------------------------
cdm %>% cdm_select_tbl("person", "observation_period") # quoted names
cdm %>% cdm_select_tbl(person, observation_period) # unquoted names
cdm %>% cdm_select_tbl(starts_with("concept")) # tables that start with 'concept'
cdm %>% cdm_select_tbl(contains("era")) # tables that contain the substring 'era'
cdm %>% cdm_select_tbl(matches("person|period")) # regular expression
## -----------------------------------------------------------------------------
# pre-defined groups
cdm %>% cdm_select_tbl(tbl_group("clinical"))
cdm %>% cdm_select_tbl(tbl_group("vocab"))
## -----------------------------------------------------------------------------
tbl_group("default")
## -----------------------------------------------------------------------------
person_ids <- cdm$condition_occurrence %>%
filter(condition_concept_id == 255848) %>%
distinct(person_id) %>%
pull(person_id)
length(person_ids)
cdm_pneumonia <- cdm %>%
cdm_subset(person_id = person_ids)
tally(cdm_pneumonia$person) %>%
pull(n)
cdm_pneumonia$condition_occurrence %>%
distinct(person_id) %>%
tally() %>%
pull(n)
## -----------------------------------------------------------------------------
cdm_100person <- cdm_sample(cdm, n = 100)
tally(cdm_100person$person) %>% pull("n")
## -----------------------------------------------------------------------------
cdm_flatten(cdm_pneumonia,
domain = c("condition", "drug", "measurement")) %>%
collect()
## -----------------------------------------------------------------------------
local_cdm <- cdm_100person %>%
collect()
# The cdm tables are now dataframes
local_cdm$person[1:4, 1:4]
## ----eval=FALSE---------------------------------------------------------------
# save_path <- file.path(tempdir(), "tmp")
# dir.create(save_path)
#
# cdm %>%
# stow(path = save_path, format = "parquet")
#
# list.files(save_path)
## ----eval=FALSE---------------------------------------------------------------
# cdm <- cdm_from_files(save_path, cdm_name = "GI Bleed example data")
## -----------------------------------------------------------------------------
DBI::dbDisconnect(con, shutdown = TRUE)
|
/scratch/gouwar.j/cran-all/cranData/CDMConnector/inst/doc/a01_getting-started.R
|
---
title: "Getting Started"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{Getting Started}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r setup, include = FALSE}
library(CDMConnector)
if (Sys.getenv("EUNOMIA_DATA_FOLDER") == "") Sys.setenv("EUNOMIA_DATA_FOLDER" = tempdir())
if (!dir.exists(Sys.getenv("EUNOMIA_DATA_FOLDER"))) dir.create(Sys.getenv("EUNOMIA_DATA_FOLDER"))
if (!eunomia_is_available()) downloadEunomiaData()
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
```
The Observational Medical Outcomes Partnership (OMOP) Common Data Model (CDM) is a commonly used format for storing and analyzing observational health data derived from electronic health records, insurance claims, registries, and other sources. Source data is "mapped" into the OMOP CDM format providing researchers with a standardized interface for querying and analyzing observational health data. The CDMConnector package provides tools for working with OMOP Common Data Model (CDM) tables using familiar [dplyr](https://dplyr.tidyverse.org) syntax and using the [tidyverse design principles](https://design.tidyverse.org/) popular in the R ecosystem.
This vignette is for new users of CDMConnector who have access to data already mapped into the OMOP CDM format. However, CDMConnector does provide several example synthetic datasets in the OMOP CDM format. To learn more about the OMOP CDM or the mapping process check out these resources.
- <https://academy.ehden.eu/>
- <https://ohdsi.github.io/TheBookOfOhdsi/>
- <https://www.ohdsi.org/join-the-journey/>
- <https://ohdsi.github.io/CommonDataModel/>
## Creating a reference to the OMOP CDM
Typically OMOP CDM datasets are stored in a database and can range in size from hundreds of patients with thousands of records to hundreds of millions of patients with billions of records. The Observational Health Data Science and Infromatics (OHDSI) community supports a selection of popular database platforms including Postgres, Microsoft SQL Server, Oracle, as well as cloud data platforms suchs as Amazon Redshift, Google Big Query, Databricks, and Snowflake. The first step in using CDMConnector is to create a connection to your database from R. This can take some effort the first time you set up drivers. See the "Database Connection Examples" vignette or check out the [Posit's database documentation.](https://solutions.posit.co/connections/db/getting-started/connect-to-database/)
In our example's we will use some synthetic data from the [Synthea project](https://synthetichealth.github.io/synthea/) that has been mapped to the OMOP CDM format. We'll use the [duckdb](https://duckdb.org/) database which is a file based database similar to SQLite but with better date type support. To see all the example datasets available run `example_datasets()`.
```{r}
library(CDMConnector)
example_datasets()
con <- DBI::dbConnect(duckdb::duckdb(), eunomia_dir("GiBleed"))
DBI::dbListTables(con)
```
If you're using CDMConnector for the first time you may get a message about adding an enviroment vairable `EUNOMIA_DATA_FOLDER` . To do this simply create a new text file in your home directory called .Renviron and add the line `EUNOMIA_DATA_FOLDER="path/to/folder/where/we/can/store/example/data"`. If you run `usethis::edit_r_environ()` this file will be created and opened for you and opened in RStudio.
After connecting to a database containing data mapped to the OMOP CDM, use `cdm_from_con` to create a CDM reference. This CDM reference is a single object that contains dplyr table references to each CDM table along with metadata about the CDM instance.
The cdm_schema is the schema in the database that contains the OMOP CDM tables and is required. All other arguments are optional.
```{r}
cdm <- cdm_from_con(con, cdm_name = "eunomia", cdm_schema = "main", write_schema = "main")
cdm
cdm$observation_period
```
Individual CDM table references can be accessed using \`\$\`.
```{r}
cdm$person %>%
dplyr::glimpse()
```
You can then use dplyr to query the cdm tables just as you would an R dataframe. The difference is that the data stays in the database and SQL code is dynamically generated and set to the database backend. The goal is to allow users to not think too much about the database or SQL and instead use familiar R syntax to work with these large tables. `collect` will bring the data from the database into R. Be careful not to request a gigantic result set! In general it is better to aggregate data in the database, if possible, before bringing data into R.
```{r, warning=FALSE}
library(dplyr)
library(ggplot2)
cdm$person %>%
group_by(year_of_birth, gender_concept_id) %>%
summarize(n = n(), .groups = "drop") %>%
collect() %>%
mutate(sex = case_when(
gender_concept_id == 8532 ~ "Female",
gender_concept_id == 8507 ~ "Male"
)) %>%
ggplot(aes(y = n, x = year_of_birth, fill = sex)) +
geom_histogram(stat = "identity", position = "dodge") +
labs(x = "Year of birth",
y = "Person count",
title = "Age Distribution",
subtitle = cdm_name(cdm),
fill = NULL) +
theme_bw()
```
## Joining tables
Since the OMOP CDM is a relational data model joins are very common in analytic code. All of the events in the OMOP CDM are recorded using integers representing standard "concepts". To see the text description of a concept researchers need to join clinical tables to the concept vocabulary table. Every OMOP CDM should have a copy of the vocabulary used to map the data to the OMOP CDM format.
Here is an example query looking at the most common conditions in the CDM.
```{r, warning=FALSE}
cdm$condition_occurrence %>%
count(condition_concept_id, sort = T) %>%
left_join(cdm$concept, by = c("condition_concept_id" = "concept_id")) %>%
collect() %>%
select("condition_concept_id", "concept_name", "n")
```
Let's look at the most common drugs used by patients with "Acute viral pharyngitis".
```{r, warning=FALSE}
cdm$condition_occurrence %>%
filter(condition_concept_id == 4112343) %>%
distinct(person_id) %>%
inner_join(cdm$drug_exposure, by = "person_id") %>%
count(drug_concept_id, sort = TRUE) %>%
left_join(cdm$concept, by = c("drug_concept_id" = "concept_id")) %>%
collect() %>%
select("concept_name", "n")
```
To inspect the generated SQL use `show_query` from dplyr.
```{r, warning=FALSE}
cdm$condition_occurrence %>%
filter(condition_concept_id == 4112343) %>%
distinct(person_id) %>%
inner_join(cdm$drug_exposure, by = "person_id") %>%
count(drug_concept_id, sort = TRUE) %>%
left_join(cdm$concept, by = c("drug_concept_id" = "concept_id")) %>%
show_query()
```
These are a few simple queries. More complex queries can be built by combining simple queries like the ones above and other analytic packages provide functions that implement common analytic use cases.
For example a "cohort definition" is a set of criteria that persons must satisfy that can be quite complex. The "Working with Cohorts" vignette describes creating and using cohorts with CDMConnector.
## Saving query results to the database
Sometime it is helpful to save query results to the database instead of reading the result into R. dplyr provides the `compute` function but due to differences between database systems CDMConnector has needed to export a its own method that handles the slight differences. Internally CDMConnector runs `compute_query` function that is tested across the OHDSI supported database platforms.
If we are writing data to the CDM database we need to add one more argument when creating our cdm reference object, the "write_schema". This is a schema in the database where you have write permissions. Typically this should be a separate schema from the "cdm_schema".
```{r}
DBI::dbExecute(con, "create schema scratch;")
cdm <- cdm_from_con(con, cdm_name = "eunomia", cdm_schema = "main", write_schema = "scratch")
```
```{r, warning=FALSE}
drugs <- cdm$condition_occurrence %>%
filter(condition_concept_id == 4112343) %>%
distinct(person_id) %>%
inner_join(cdm$drug_exposure, by = "person_id") %>%
count(drug_concept_id, sort = TRUE) %>%
left_join(cdm$concept, by = c("drug_concept_id" = "concept_id")) %>%
compute(name = "test", temporary = FALSE, overwrite = TRUE)
drugs %>% show_query()
drugs
```
We can see that the query has been saved to a new table in the scratch schema. `compute` returns a dplyr reference to this table.
## Selecting a subset of CDM tables
If you do not need references to all tables you can easily select only a subset of tables to include in the CDM reference. The `cdm_select_tbl` function supports the [tidyselect selection language](https://tidyselect.r-lib.org/reference/language.html) and provides a new selection helper: `tbl_group`.
```{r}
cdm %>% cdm_select_tbl("person", "observation_period") # quoted names
cdm %>% cdm_select_tbl(person, observation_period) # unquoted names
cdm %>% cdm_select_tbl(starts_with("concept")) # tables that start with 'concept'
cdm %>% cdm_select_tbl(contains("era")) # tables that contain the substring 'era'
cdm %>% cdm_select_tbl(matches("person|period")) # regular expression
```
Predefined sets of tables can also be selected using `tbl_group` which supports several subsets of the CDM: "all", "clinical", "vocab", "derived", and "default".
```{r}
# pre-defined groups
cdm %>% cdm_select_tbl(tbl_group("clinical"))
cdm %>% cdm_select_tbl(tbl_group("vocab"))
```
The default set of CDM tables included in a CDM object is:
```{r}
tbl_group("default")
```
## Subsetting a CDM
Sometimes it is helpful to subset a CDM to a specific set of persons or simply down sample the data to a more reasonable size. Let's subset our cdm to just persons with a Pneumonia (concept_id 255848). This works best then the number of persons in the subset is quite small and the database has indexes on the "person_id" columns of each table.
```{r}
person_ids <- cdm$condition_occurrence %>%
filter(condition_concept_id == 255848) %>%
distinct(person_id) %>%
pull(person_id)
length(person_ids)
cdm_pneumonia <- cdm %>%
cdm_subset(person_id = person_ids)
tally(cdm_pneumonia$person) %>%
pull(n)
cdm_pneumonia$condition_occurrence %>%
distinct(person_id) %>%
tally() %>%
pull(n)
```
Alternatively if we simply want a random sample of the entire CDM we can use `cdm_sample`.
```{r}
cdm_100person <- cdm_sample(cdm, n = 100)
tally(cdm_100person$person) %>% pull("n")
```
# Flatten a CDM
An OMOP CDM is a relational data model. Sometimes it is helpful to flatten this relational structure into a "tidy" dataframe with one row per observation. This transformation should only be done with a small number of persons and events.
```{r}
cdm_flatten(cdm_pneumonia,
domain = c("condition", "drug", "measurement")) %>%
collect()
```
## Saving a local copy of a CDM
We can use `collect` to bring the whole cdm object into R as dataframes. If you would like to save a subset of the CDM and then restore it in R as a local CDM object, CDMConnector provides the `stow` and `cdm_from_files` functions to do this.
```{r}
local_cdm <- cdm_100person %>%
collect()
# The cdm tables are now dataframes
local_cdm$person[1:4, 1:4]
```
```{r, eval=FALSE}
save_path <- file.path(tempdir(), "tmp")
dir.create(save_path)
cdm %>%
stow(path = save_path, format = "parquet")
list.files(save_path)
```
Restore a saved cdm object from files with `cdm_from_files`.
```{r, eval=FALSE}
cdm <- cdm_from_files(save_path, cdm_name = "GI Bleed example data")
```
## Closing connections
Close the database connection with `dbDisconnect`. After a connection is closed any cdm objects created with that connection can no longer be used.
```{r}
DBI::dbDisconnect(con, shutdown = TRUE)
```
## Summary
CDMConnector provides an interface to working with observational health data in the OMOP CDM format from R. Check out the other vignettes for more details about the package.
<div style="margin-bottom:3cm;"></div>
|
/scratch/gouwar.j/cran-all/cranData/CDMConnector/inst/doc/a01_getting-started.Rmd
|
## ----include = FALSE----------------------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
eval = rlang::is_installed("CirceR") && rlang::is_installed("Capr"),
comment = "#>"
)
library(CDMConnector)
library(dplyr, warn.conflicts = FALSE)
if (Sys.getenv("EUNOMIA_DATA_FOLDER") == "") Sys.setenv("EUNOMIA_DATA_FOLDER" = file.path(tempdir(), "eunomia"))
if (!dir.exists(Sys.getenv("EUNOMIA_DATA_FOLDER"))) dir.create(Sys.getenv("EUNOMIA_DATA_FOLDER"))
if (!eunomia_is_available()) downloadEunomiaData()
## -----------------------------------------------------------------------------
path_to_cohort_json_files <- system.file("cohorts1", package = "CDMConnector")
list.files(path_to_cohort_json_files)
readr::read_csv(file.path(path_to_cohort_json_files, "CohortsToCreate.csv"),
show_col_types = FALSE)
## -----------------------------------------------------------------------------
library(CDMConnector)
path_to_cohort_json_files <- system.file("example_cohorts",
package = "CDMConnector")
list.files(path_to_cohort_json_files)
con <- DBI::dbConnect(duckdb::duckdb(), eunomia_dir("GiBleed"))
cdm <- cdm_from_con(con, cdm_name = "eunomia", cdm_schema = "main", write_schema = "main")
cohort_details <- read_cohort_set(path_to_cohort_json_files) |>
mutate(cohort_name = snakecase::to_snake_case(cohort_name))
cohort_details
cdm <- generate_cohort_set(
cdm = cdm,
cohort_set = cohort_details,
name = "study_cohorts"
)
cdm$study_cohorts
## -----------------------------------------------------------------------------
cohort_count(cdm$study_cohorts)
cohort_set(cdm$study_cohorts)
attrition(cdm$study_cohorts)
## ----eval=FALSE---------------------------------------------------------------
# cdm_gibleed <- cdm %>%
# cdm_subset_cohort(cohort_table = "study_cohorts")
## -----------------------------------------------------------------------------
library(Capr)
gibleed_concept_set <- cs(192671, name = "gibleed")
gibleed_definition <- cohort(
entry = conditionOccurrence(gibleed_concept_set)
)
gibleed_male_definition <- cohort(
entry = conditionOccurrence(gibleed_concept_set, male())
)
# create a named list of Capr cohort definitions
cohort_details = list(gibleed = gibleed_definition,
gibleed_male = gibleed_male_definition)
# generate cohorts
cdm <- generate_cohort_set(
cdm,
cohort_set = cohort_details,
name = "gibleed" # name for the cohort table in the cdm
)
cdm$gibleed
## -----------------------------------------------------------------------------
DBI::dbDisconnect(con, shutdown = TRUE)
## -----------------------------------------------------------------------------
library(CDMConnector)
con <- DBI::dbConnect(duckdb::duckdb(), eunomia_dir())
cdm <- cdm_from_con(con, cdm_schema = "main", write_schema = "main")
cohort_set <- read_cohort_set(system.file("cohorts3", package = "CDMConnector"))
cdm <- generate_cohort_set(cdm, cohort_set, name = "cohort")
cdm$cohort
cohort_count(cdm$cohort)
## -----------------------------------------------------------------------------
library(dplyr)
cdm$cohort_subset <- cdm$cohort %>%
# only keep persons who are in the cohort at least 28 days
filter(!!datediff("cohort_start_date", "cohort_end_date") >= 28) %>%
# optionally you can modify the cohort_id
mutate(cohort_definition_id = 100 + cohort_definition_id) %>%
compute(name = "cohort_subset", temporary = FALSE, overwrite = TRUE) %>%
new_generated_cohort_set()
cdm$cohort2
cohort_count(cdm$cohort_subset)
## -----------------------------------------------------------------------------
days_in_cohort <- cdm$cohort %>%
filter(cohort_definition_id %in% c(1,5)) %>%
mutate(days_in_cohort = !!datediff("cohort_start_date", "cohort_end_date")) %>%
count(cohort_definition_id, days_in_cohort) %>%
collect()
days_in_cohort
## -----------------------------------------------------------------------------
cdm$cohort_subset <- cdm$cohort %>%
filter(!!datediff("cohort_start_date", "cohort_end_date") >= 14) %>%
mutate(cohort_definition_id = 10 + cohort_definition_id) %>%
union_all(
cdm$cohort %>%
filter(!!datediff("cohort_start_date", "cohort_end_date") >= 21) %>%
mutate(cohort_definition_id = 100 + cohort_definition_id)
) %>%
union_all(
cdm$cohort %>%
filter(!!datediff("cohort_start_date", "cohort_end_date") >= 28) %>%
mutate(cohort_definition_id = 1000 + cohort_definition_id)
) %>%
compute(name = "cohort_subset", temporary = FALSE, overwrite = TRUE) %>%
new_generated_cohort_set() # this function creates the cohort object and metadata
cdm$cohort_subset %>%
mutate(days_in_cohort = !!datediff("cohort_start_date", "cohort_end_date")) %>%
group_by(cohort_definition_id) %>%
summarize(mean_days_in_cohort = mean(days_in_cohort, na.rm = TRUE)) %>%
collect() %>%
arrange(mean_days_in_cohort)
## -----------------------------------------------------------------------------
library(dplyr, warn.conflicts = FALSE)
cdm <- generate_concept_cohort_set(
cdm,
concept_set = list(gibleed = 192671),
name = "gibleed2", # name of the cohort table
limit = "all", # use all occurrences of the concept instead of just the first
end = 10 # set explicit cohort end date 10 days after start
)
cdm$gibleed2 <- cdm$gibleed2 %>%
semi_join(
filter(cdm$person, gender_concept_id == 8507),
by = c("subject_id" = "person_id")
) %>%
record_cohort_attrition(reason = "Male")
attrition(cdm$gibleed2)
## ----fig.width= 7, fig.height=10----------------------------------------------
library(visR)
gibleed2_attrition <- CDMConnector::attrition(cdm$gibleed2) %>%
dplyr::select(Criteria = "reason", `Remaining N` = "number_subjects")
class(gibleed2_attrition) <- c("attrition", class(gibleed2_attrition))
visr(gibleed2_attrition)
## -----------------------------------------------------------------------------
cohort <- dplyr::tibble(
cohort_definition_id = 1L,
subject_id = 1L,
cohort_start_date = as.Date("1999-01-01"),
cohort_end_date = as.Date("2001-01-01")
)
cohort
## -----------------------------------------------------------------------------
library(omopgenerics)
cdm <- insertTable(cdm = cdm, name = "cohort", table = cohort, overwrite = TRUE)
cdm$cohort
## -----------------------------------------------------------------------------
cdm$cohort <- newCohortTable(cdm$cohort)
## -----------------------------------------------------------------------------
cohort_count(cdm$cohort)
cohort_set(cdm$cohort)
attrition(cdm$cohort)
## -----------------------------------------------------------------------------
cdm <- insertTable(cdm = cdm, name = "cohort2", table = cohort, overwrite = TRUE)
cdm$cohort2 <- newCohortTable(cdm$cohort2)
settings(cdm$cohort2)
cohort_set <- data.frame(cohort_definition_id = 1L,
cohort_name = "made_up_cohort")
cdm$cohort2 <- newCohortTable(cdm$cohort2, cohortSetRef = cohort_set)
settings(cdm$cohort2)
## -----------------------------------------------------------------------------
DBI::dbDisconnect(con, shutdown = TRUE)
|
/scratch/gouwar.j/cran-all/cranData/CDMConnector/inst/doc/a02_cohorts.R
|
---
title: "Working with cohorts"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{Working with cohorts}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
eval = rlang::is_installed("CirceR") && rlang::is_installed("Capr"),
comment = "#>"
)
library(CDMConnector)
library(dplyr, warn.conflicts = FALSE)
if (Sys.getenv("EUNOMIA_DATA_FOLDER") == "") Sys.setenv("EUNOMIA_DATA_FOLDER" = file.path(tempdir(), "eunomia"))
if (!dir.exists(Sys.getenv("EUNOMIA_DATA_FOLDER"))) dir.create(Sys.getenv("EUNOMIA_DATA_FOLDER"))
if (!eunomia_is_available()) downloadEunomiaData()
```
Cohorts are a fundamental building block for observational health data analysis. A "cohort" is a set of persons satisfying a one or more inclusion criteria for a duration of time. If you are familiar with the idea of sets in math then a cohort can be nicely represented as a set of person-days. In the OMOP Common Data Model we represent cohorts using a table with four columns.
| cohort_definition_id | subject_id | cohort_start_date | cohort_end_date |
|----------------------|------------|-------------------|-----------------|
| 1 | 1000 | 2020-01-01 | 2020-05-01 |
| 1 | 1000 | 2021-06-01 | 2020-07-01 |
| 1 | 2000 | 2020-03-01 | 2020-09-01 |
| 2 | 1000 | 2020-02-01 | 2020-03-01 |
: An example cohort table
A cohort table can contain multiple cohorts and each cohort can have multiple persons. There can even be multiple records for the same person in a single cohort as long as the date ranges do not overlap. In the same way that an element is either in a set or not, a single person-day is either in a cohort or not. For a more comprehensive treatment of cohorts in OHDSI check out the Cohorts chapter in [The Book of OHDSI](https://ohdsi.github.io/TheBookOfOhdsi/Cohorts.html).
## Cohort Generation
The $n*4$ cohort table is created through the process of cohort *generation*. To generate a cohort on a specific CDM dataset means that we combine a *cohort definition* with CDM to produce a cohort table. The standardization provided by the OMOP CDM allows researchers to generate the same cohort definition on any OMOP CDM dataset.
A cohort definition is an expression of the rules goverining the inclusion/exclusion of person-days in the cohort. There are three common ways to create cohort definitions for the OMOP CDM.
1. The Atlas cohort builder
2. The Capr R package
3. Custom SQL and/or R code
Atlas is a web application that provides a graphical user interface for creating cohort definitions. . To get started with Atlas check out the free course on [Ehden Academy](https://academy.ehden.eu/course/index.php) and the demo at <https://atlas-demo.ohdsi.org/>.
Capr is an R package that provides a code-based interface for creating cohort definitions. The options available in Capr exactly match the options available in Atlas and the resulting cohort tables should be identical.
There are times when more customization is needed and it is possible to use bespoke SQL or dplyr code to build a cohort. CDMConnector provides the `generate_concept_cohort_set` function for quickly building simple cohorts that can then be a starting point for further subsetting.
Atlas cohorts are represented using json text files. To "generate" one or more Atlas cohorts on a cdm object use the `read_cohort_set` function to first read a folder of Atlas cohort json files into R. Then create the cohort table with `generate_cohort_set`. There can be an optional csv file called "CohortsToCreate.csv" in the folder that specifies the cohort IDs and names to use. If this file doesn't exist IDs will be assigned automatically using alphabetical order of the filenames.
```{r}
path_to_cohort_json_files <- system.file("cohorts1", package = "CDMConnector")
list.files(path_to_cohort_json_files)
readr::read_csv(file.path(path_to_cohort_json_files, "CohortsToCreate.csv"),
show_col_types = FALSE)
```
### Atlas cohort definitions
First we need to create our CDM object. Note that we will need to specify a `write_schema` when creating the object. Cohort tables will go into the CDM's `write_schema`.
```{r}
library(CDMConnector)
path_to_cohort_json_files <- system.file("example_cohorts",
package = "CDMConnector")
list.files(path_to_cohort_json_files)
con <- DBI::dbConnect(duckdb::duckdb(), eunomia_dir("GiBleed"))
cdm <- cdm_from_con(con, cdm_name = "eunomia", cdm_schema = "main", write_schema = "main")
cohort_details <- read_cohort_set(path_to_cohort_json_files) |>
mutate(cohort_name = snakecase::to_snake_case(cohort_name))
cohort_details
cdm <- generate_cohort_set(
cdm = cdm,
cohort_set = cohort_details,
name = "study_cohorts"
)
cdm$study_cohorts
```
The generated cohort has associated metadata tables. We can access these with utility functions.
- `cohort_count` contains the person and record counts for each cohort in the cohort set
- `settings` table contains the cohort id and cohort name
- `attrition` table contains the attrition information (persons, and records dropped at each sequential inclusion rule)
```{r}
cohort_count(cdm$study_cohorts)
cohort_set(cdm$study_cohorts)
attrition(cdm$study_cohorts)
```
Note the this cohort table is still in the database so it can be quite large. We can also join it to other CDM table or subset the entire cdm to just the persons in the cohort.
```{r, eval=FALSE}
cdm_gibleed <- cdm %>%
cdm_subset_cohort(cohort_table = "study_cohorts")
```
### Capr cohort definitions
Capr allows us to use R code to create the same cohorts that can be created in Atlas. This is helpful when you need to create a large number of similar cohort definitions. Below we create a single Cohort definition with one inclusion criteria
`generate_cohort_set` will accept a named list of Capr
```{r}
library(Capr)
gibleed_concept_set <- cs(192671, name = "gibleed")
gibleed_definition <- cohort(
entry = conditionOccurrence(gibleed_concept_set)
)
gibleed_male_definition <- cohort(
entry = conditionOccurrence(gibleed_concept_set, male())
)
# create a named list of Capr cohort definitions
cohort_details = list(gibleed = gibleed_definition,
gibleed_male = gibleed_male_definition)
# generate cohorts
cdm <- generate_cohort_set(
cdm,
cohort_set = cohort_details,
name = "gibleed" # name for the cohort table in the cdm
)
cdm$gibleed
```
We should get the exact same result from Capr and Atlas if the definitions are equivalent.
Learn more about Capr at the package website <https://ohdsi.github.io/Capr/>.
```{r}
DBI::dbDisconnect(con, shutdown = TRUE)
```
### Subset a cohort
Suppose you have a generated cohort and you would like to create a new cohort that is a subset of the first. This can be done using the
First we will generate an example cohort set and then create a new cohort based on filtering the Atlas cohort.
```{r}
library(CDMConnector)
con <- DBI::dbConnect(duckdb::duckdb(), eunomia_dir())
cdm <- cdm_from_con(con, cdm_schema = "main", write_schema = "main")
cohort_set <- read_cohort_set(system.file("cohorts3", package = "CDMConnector"))
cdm <- generate_cohort_set(cdm, cohort_set, name = "cohort")
cdm$cohort
cohort_count(cdm$cohort)
```
As an example we will take only people in the cohort that have a cohort duration that is longer than 4 weeks.
Using dplyr we can write this query and save the result in a new table in the cdm.
```{r}
library(dplyr)
cdm$cohort_subset <- cdm$cohort %>%
# only keep persons who are in the cohort at least 28 days
filter(!!datediff("cohort_start_date", "cohort_end_date") >= 28) %>%
# optionally you can modify the cohort_id
mutate(cohort_definition_id = 100 + cohort_definition_id) %>%
compute(name = "cohort_subset", temporary = FALSE, overwrite = TRUE) %>%
new_generated_cohort_set()
cdm$cohort2
cohort_count(cdm$cohort_subset)
```
In this case we can see that cohorts 1 and 5 were dropped completely and some patients were dropped from cohorts 2, 3, and 4.
Let's confirm that everyone in cohorts 1 and 5 were in the cohort for less than 28 days.
```{r}
days_in_cohort <- cdm$cohort %>%
filter(cohort_definition_id %in% c(1,5)) %>%
mutate(days_in_cohort = !!datediff("cohort_start_date", "cohort_end_date")) %>%
count(cohort_definition_id, days_in_cohort) %>%
collect()
days_in_cohort
```
We have confirmed that everyone in cohorts 1 and 5 were in the cohort less than 10 days.
Now suppose we would like to create a new cohort table with three different versions of the cohorts in the original cohort table. We will keep persons who are in the cohort at 2 weeks, 3 weeks, and 4 weeks. We can simply write some custom dplyr to create the table and then call `new_generated_cohort_set` just like in the previous example.
```{r}
cdm$cohort_subset <- cdm$cohort %>%
filter(!!datediff("cohort_start_date", "cohort_end_date") >= 14) %>%
mutate(cohort_definition_id = 10 + cohort_definition_id) %>%
union_all(
cdm$cohort %>%
filter(!!datediff("cohort_start_date", "cohort_end_date") >= 21) %>%
mutate(cohort_definition_id = 100 + cohort_definition_id)
) %>%
union_all(
cdm$cohort %>%
filter(!!datediff("cohort_start_date", "cohort_end_date") >= 28) %>%
mutate(cohort_definition_id = 1000 + cohort_definition_id)
) %>%
compute(name = "cohort_subset", temporary = FALSE, overwrite = TRUE) %>%
new_generated_cohort_set() # this function creates the cohort object and metadata
cdm$cohort_subset %>%
mutate(days_in_cohort = !!datediff("cohort_start_date", "cohort_end_date")) %>%
group_by(cohort_definition_id) %>%
summarize(mean_days_in_cohort = mean(days_in_cohort, na.rm = TRUE)) %>%
collect() %>%
arrange(mean_days_in_cohort)
```
This is an example of creating new cohorts from existing cohorts using CDMConnector. There is a lot of flexibility with this approach. Next we will look at completely custom cohort creation which is quite similar.
### Custom Cohort Creation
Sometimes you may want to create cohorts that cannot be easily expressed using Atlas or Capr. In these situations you can create implement cohort creation using SQL or R. See the chapter in [The Book of OHDSI](https://ohdsi.github.io/TheBookOfOhdsi/Cohorts.html#implementing-the-cohort-using-sql) for details on using SQL to create cohorts. CDMConnector provides a helper function to build simple cohorts from a list of OMOP concepts. `generate_concept_cohort_set` accepts a named list of concept sets and will create cohorts based on those concept sets. While this function does not allow for inclusion/exclusion criteria in the initial definition, additional criteria can be applied "manually" after the initial generation.
```{r}
library(dplyr, warn.conflicts = FALSE)
cdm <- generate_concept_cohort_set(
cdm,
concept_set = list(gibleed = 192671),
name = "gibleed2", # name of the cohort table
limit = "all", # use all occurrences of the concept instead of just the first
end = 10 # set explicit cohort end date 10 days after start
)
cdm$gibleed2 <- cdm$gibleed2 %>%
semi_join(
filter(cdm$person, gender_concept_id == 8507),
by = c("subject_id" = "person_id")
) %>%
record_cohort_attrition(reason = "Male")
attrition(cdm$gibleed2)
```
We could visualise attrition using a package like VisR
```{r, fig.width= 7, fig.height=10}
library(visR)
gibleed2_attrition <- CDMConnector::attrition(cdm$gibleed2) %>%
dplyr::select(Criteria = "reason", `Remaining N` = "number_subjects")
class(gibleed2_attrition) <- c("attrition", class(gibleed2_attrition))
visr(gibleed2_attrition)
```
In the above example we built a cohort table from a concept set. The cohort essentially captures patient-time based off of the presence or absence of OMOP standard concept IDs. We then manually applied an inclusion criteria and recorded a new attrition record in the cohort. To learn more about this approach to building cohorts check out the [PatientProfiles](https://darwin-eu-dev.github.io/PatientProfiles/) R package.
You can also create a generated cohort set using any method you choose. As long as the table is in the CDM database and has the four required columns it can be added to the CDM object as a generated cohort set.
Suppose for example our cohort table is
```{r}
cohort <- dplyr::tibble(
cohort_definition_id = 1L,
subject_id = 1L,
cohort_start_date = as.Date("1999-01-01"),
cohort_end_date = as.Date("2001-01-01")
)
cohort
```
First make sure the table is in the database and create a dplyr table reference to it and add it to the CDM object.
```{r}
library(omopgenerics)
cdm <- insertTable(cdm = cdm, name = "cohort", table = cohort, overwrite = TRUE)
cdm$cohort
```
To make this a true generated cohort object use the `cohort_table`
```{r}
cdm$cohort <- newCohortTable(cdm$cohort)
```
We can see that this cohort is now has the class "cohort_table" as well as the various metadata tables.
```{r}
cohort_count(cdm$cohort)
cohort_set(cdm$cohort)
attrition(cdm$cohort)
```
If you would like to override the attribute tables then pass additional dataframes to cohortTable
```{r}
cdm <- insertTable(cdm = cdm, name = "cohort2", table = cohort, overwrite = TRUE)
cdm$cohort2 <- newCohortTable(cdm$cohort2)
settings(cdm$cohort2)
cohort_set <- data.frame(cohort_definition_id = 1L,
cohort_name = "made_up_cohort")
cdm$cohort2 <- newCohortTable(cdm$cohort2, cohortSetRef = cohort_set)
settings(cdm$cohort2)
```
```{r}
DBI::dbDisconnect(con, shutdown = TRUE)
```
Cohort building is a fundamental building block of observational health analysis and CDMConnector supports different ways of creating cohorts. As long as your cohort table is has the required structure and columns you can add it to the cdm with the `new_generated_cohort_set` function and use it in any downstream OHDSI analytic packages.
<div style="margin-bottom:3cm;"></div>
|
/scratch/gouwar.j/cran-all/cranData/CDMConnector/inst/doc/a02_cohorts.Rmd
|
## ----include = FALSE----------------------------------------------------------
library(CDMConnector)
if (Sys.getenv("EUNOMIA_DATA_FOLDER") == "") Sys.setenv("EUNOMIA_DATA_FOLDER" = file.path(tempdir(), "eunomia"))
if (!dir.exists(Sys.getenv("EUNOMIA_DATA_FOLDER"))) dir.create(Sys.getenv("EUNOMIA_DATA_FOLDER"))
if (!eunomia_is_available()) downloadEunomiaData()
knitr::opts_chunk$set(
collapse = TRUE,
# eval = FALSE,
comment = "#>"
)
## -----------------------------------------------------------------------------
library(CDMConnector)
library(dplyr, warn.conflicts = FALSE)
library(ggplot2)
## -----------------------------------------------------------------------------
con <- DBI::dbConnect(duckdb::duckdb(), dbdir = eunomia_dir())
cdm <- cdm_from_con(con, cdm_name = "eunomia", cdm_schema = "main", write_schema = "main")
cdm
## ----message=FALSE------------------------------------------------------------
cdm$person %>%
select(year_of_birth) %>%
collect() %>%
ggplot(aes(x = year_of_birth)) +
geom_histogram(bins = 30)
## -----------------------------------------------------------------------------
cdm$observation_period %>%
select(observation_period_start_date, observation_period_end_date) %>%
mutate(observation_period = (observation_period_end_date - observation_period_start_date)/365, 25) %>%
select(observation_period) %>%
collect() %>%
ggplot(aes(x = observation_period)) +
geom_boxplot()
## -----------------------------------------------------------------------------
cdm$person %>%
tally() %>%
show_query()
## -----------------------------------------------------------------------------
cdm$person %>%
summarise(median(year_of_birth))%>%
show_query()
## ----warning=FALSE------------------------------------------------------------
cdm$person %>%
mutate(gender = case_when(
gender_concept_id == "8507" ~ "Male",
gender_concept_id == "8532" ~ "Female",
TRUE ~ NA_character_))%>%
show_query()
## -----------------------------------------------------------------------------
DBI::dbDisconnect(con, shutdown = TRUE)
|
/scratch/gouwar.j/cran-all/cranData/CDMConnector/inst/doc/a03_dbplyr.R
|
---
title: "CDMConnector and dbplyr"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{CDMConnector and dbplyr}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r, include = FALSE}
library(CDMConnector)
if (Sys.getenv("EUNOMIA_DATA_FOLDER") == "") Sys.setenv("EUNOMIA_DATA_FOLDER" = file.path(tempdir(), "eunomia"))
if (!dir.exists(Sys.getenv("EUNOMIA_DATA_FOLDER"))) dir.create(Sys.getenv("EUNOMIA_DATA_FOLDER"))
if (!eunomia_is_available()) downloadEunomiaData()
knitr::opts_chunk$set(
collapse = TRUE,
# eval = FALSE,
comment = "#>"
)
```
## Set up
First let's load the required packages for the code in this vignette. If you haven't already installed them, all the other packages can be installed using ´install.packages()´
```{r}
library(CDMConnector)
library(dplyr, warn.conflicts = FALSE)
library(ggplot2)
```
## Creating the cdm reference
Now let´s connect to a duckdb database with the Eunomia data (https://github.com/OHDSI/Eunomia).
```{r}
con <- DBI::dbConnect(duckdb::duckdb(), dbdir = eunomia_dir())
cdm <- cdm_from_con(con, cdm_name = "eunomia", cdm_schema = "main", write_schema = "main")
cdm
```
This cdm object is now what we´ll use going forward. It provides a reference to the OMOP CDM tables. We can see that these tables are still in the database, but now we have a reference to each of the ones we might want to use in our analysis. For example, the person table can be referenced like so
## Putting it all together
Say we want to make a histogram of year of birth in the person table. We can select that variable, bring it into memory, and then use ggplot to make the histogram.
```{r, message=FALSE}
cdm$person %>%
select(year_of_birth) %>%
collect() %>%
ggplot(aes(x = year_of_birth)) +
geom_histogram(bins = 30)
```
If we wanted to make a boxplot for length of observation periods we could do the computation on the database side, bring in the new variable into memory, and use ggplot to produce the boxplot
```{r}
cdm$observation_period %>%
select(observation_period_start_date, observation_period_end_date) %>%
mutate(observation_period = (observation_period_end_date - observation_period_start_date)/365, 25) %>%
select(observation_period) %>%
collect() %>%
ggplot(aes(x = observation_period)) +
geom_boxplot()
```
## Behind the scenes
We use show_query to check the sql that is being run against duckdb
```{r}
cdm$person %>%
tally() %>%
show_query()
```
```{r}
cdm$person %>%
summarise(median(year_of_birth))%>%
show_query()
```
```{r, warning=FALSE}
cdm$person %>%
mutate(gender = case_when(
gender_concept_id == "8507" ~ "Male",
gender_concept_id == "8532" ~ "Female",
TRUE ~ NA_character_))%>%
show_query()
```
```{r}
DBI::dbDisconnect(con, shutdown = TRUE)
```
<div style="margin-bottom:3cm;"></div>
|
/scratch/gouwar.j/cran-all/cranData/CDMConnector/inst/doc/a03_dbplyr.Rmd
|
## ----include = FALSE----------------------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>",
eval = FALSE
)
## ----eval=FALSE---------------------------------------------------------------
# con <- DBI::dbConnect(RPostgres::Postgres(),
# dbname = Sys.getenv("CDM5_POSTGRESQL_DBNAME"),
# host = Sys.getenv("CDM5_POSTGRESQL_HOST"),
# user = Sys.getenv("CDM5_POSTGRESQL_USER"),
# password = Sys.getenv("CDM5_POSTGRESQL_PASSWORD"))
#
# cdm <- cdm_from_con(con,
# cdm_schema = Sys.getenv("CDM5_POSTGRESQL_CDM_SCHEMA"),
# write_schema = Sys.getenv("CDM5_POSTGRESQL_SCRATCH_SCHEMA"))
# DBI::dbDisconnect(con)
## ----eval=FALSE---------------------------------------------------------------
# con <- DBI::dbConnect(RPostgres::Redshift(),
# dbname = Sys.getenv("CDM5_REDSHIFT_DBNAME"),
# host = Sys.getenv("CDM5_REDSHIFT_HOST"),
# port = Sys.getenv("CDM5_REDSHIFT_PORT"),
# user = Sys.getenv("CDM5_REDSHIFT_USER"),
# password = Sys.getenv("CDM5_REDSHIFT_PASSWORD"))
#
# cdm <- cdm_from_con(con,
# cdm_schema = Sys.getenv("CDM5_REDSHIFT_CDM_SCHEMA"),
# write_schema = Sys.getenv("CDM5_REDSHIFT_SCRATCH_SCHEMA"))
# DBI::dbDisconnect(con)
## ----eval=FALSE---------------------------------------------------------------
# con <- DBI::dbConnect(odbc::odbc(),
# Driver = "ODBC Driver 18 for SQL Server",
# Server = Sys.getenv("CDM5_SQL_SERVER_SERVER"),
# Database = Sys.getenv("CDM5_SQL_SERVER_CDM_DATABASE"),
# UID = Sys.getenv("CDM5_SQL_SERVER_USER"),
# PWD = Sys.getenv("CDM5_SQL_SERVER_PASSWORD"),
# TrustServerCertificate="yes",
# Port = 1433)
#
# cdm <- cdm_from_con(con,
# cdm_schema = c("tempdb", "dbo"),
# write_schema = c("ATLAS", "RESULTS"))
# DBI::dbDisconnect(con)
## ----eval=FALSE---------------------------------------------------------------
# con <- DBI::dbConnect(odbc::odbc(), "SQL")
# cdm <- cdm_from_con(con,
# cdm_schema = c("tempdb", "dbo"),
# write_schema = c("ATLAS", "RESULTS"))
# DBI::dbDisconnect(con)
## ----eval=FALSE---------------------------------------------------------------
# con <- DBI::dbConnect(odbc::odbc(),
# SERVER = Sys.getenv("SNOWFLAKE_SERVER"),
# UID = Sys.getenv("SNOWFLAKE_USER"),
# PWD = Sys.getenv("SNOWFLAKE_PASSWORD"),
# DATABASE = Sys.getenv("SNOWFLAKE_DATABASE"),
# WAREHOUSE = Sys.getenv("SNOWFLAKE_WAREHOUSE"),
# DRIVER = Sys.getenv("SNOWFLAKE_DRIVER"))
# cdm <- cdm_from_con(con,
# cdm_schema = c("OMOP_SYNTHETIC_DATASET", "CDM53"),
# write_schema = c("ATLAS", "RESULTS"))
# DBI::dbDisconnect(con)
## ----eval=FALSE---------------------------------------------------------------
# con <- DBI::dbConnect(duckdb::duckdb(),
# dbdir=Sys.getenv("CDM5_DUCKDB_FILE"))
# cdm <- cdm_from_con(con,
# cdm_schema = "main",
# write_schema = "main")
# DBI::dbDisconnect(con)
|
/scratch/gouwar.j/cran-all/cranData/CDMConnector/inst/doc/a04_DBI_connection_examples.R
|
---
title: "DBI connection examples"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{DBI connection examples}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>",
eval = FALSE
)
```
The following connection examples are provided for reference.
### Postgres
```{r, eval=FALSE}
con <- DBI::dbConnect(RPostgres::Postgres(),
dbname = Sys.getenv("CDM5_POSTGRESQL_DBNAME"),
host = Sys.getenv("CDM5_POSTGRESQL_HOST"),
user = Sys.getenv("CDM5_POSTGRESQL_USER"),
password = Sys.getenv("CDM5_POSTGRESQL_PASSWORD"))
cdm <- cdm_from_con(con,
cdm_schema = Sys.getenv("CDM5_POSTGRESQL_CDM_SCHEMA"),
write_schema = Sys.getenv("CDM5_POSTGRESQL_SCRATCH_SCHEMA"))
DBI::dbDisconnect(con)
```
### Redshift
Redshift is almost identical to Postgres.
```{r, eval=FALSE}
con <- DBI::dbConnect(RPostgres::Redshift(),
dbname = Sys.getenv("CDM5_REDSHIFT_DBNAME"),
host = Sys.getenv("CDM5_REDSHIFT_HOST"),
port = Sys.getenv("CDM5_REDSHIFT_PORT"),
user = Sys.getenv("CDM5_REDSHIFT_USER"),
password = Sys.getenv("CDM5_REDSHIFT_PASSWORD"))
cdm <- cdm_from_con(con,
cdm_schema = Sys.getenv("CDM5_REDSHIFT_CDM_SCHEMA"),
write_schema = Sys.getenv("CDM5_REDSHIFT_SCRATCH_SCHEMA"))
DBI::dbDisconnect(con)
```
### SQL Server
Using odbc with SQL Server requires driver setup described [here](https://solutions.posit.co/connections/db/r-packages/odbc/). Note, you'll likely need to [download the ODBC Driver for SQL Server](https://learn.microsoft.com/en-us/sql/connect/odbc/download-odbc-driver-for-sql-server?view=sql-server-ver16).
```{r, eval=FALSE}
con <- DBI::dbConnect(odbc::odbc(),
Driver = "ODBC Driver 18 for SQL Server",
Server = Sys.getenv("CDM5_SQL_SERVER_SERVER"),
Database = Sys.getenv("CDM5_SQL_SERVER_CDM_DATABASE"),
UID = Sys.getenv("CDM5_SQL_SERVER_USER"),
PWD = Sys.getenv("CDM5_SQL_SERVER_PASSWORD"),
TrustServerCertificate="yes",
Port = 1433)
cdm <- cdm_from_con(con,
cdm_schema = c("tempdb", "dbo"),
write_schema = c("ATLAS", "RESULTS"))
DBI::dbDisconnect(con)
```
The connection to SQL Server can be simplified by configuring a DSN. See [here](https://www.r-bloggers.com/2018/05/setting-up-an-odbc-connection-with-ms-sql-server-on-windows/) for instructions on how to set up the DSN.If we named it "SQL", our connection is then simplified to.
```{r, eval=FALSE}
con <- DBI::dbConnect(odbc::odbc(), "SQL")
cdm <- cdm_from_con(con,
cdm_schema = c("tempdb", "dbo"),
write_schema = c("ATLAS", "RESULTS"))
DBI::dbDisconnect(con)
```
### Snowflake
We can use the odbc package to connect to snowflake.
```{r, eval=FALSE}
con <- DBI::dbConnect(odbc::odbc(),
SERVER = Sys.getenv("SNOWFLAKE_SERVER"),
UID = Sys.getenv("SNOWFLAKE_USER"),
PWD = Sys.getenv("SNOWFLAKE_PASSWORD"),
DATABASE = Sys.getenv("SNOWFLAKE_DATABASE"),
WAREHOUSE = Sys.getenv("SNOWFLAKE_WAREHOUSE"),
DRIVER = Sys.getenv("SNOWFLAKE_DRIVER"))
cdm <- cdm_from_con(con,
cdm_schema = c("OMOP_SYNTHETIC_DATASET", "CDM53"),
write_schema = c("ATLAS", "RESULTS"))
DBI::dbDisconnect(con)
```
Note, as with SQL server we could set up a DSN to simplify this connection as described [here](https://docs.snowflake.com/developer-guide/odbc/odbc-windows) for windows and [here](https://docs.snowflake.com/developer-guide/odbc/odbc-mac) for macOS.
### Duckdb
Duckdb is an in-process database. We use the duckdb package to connect.
```{r, eval=FALSE}
con <- DBI::dbConnect(duckdb::duckdb(),
dbdir=Sys.getenv("CDM5_DUCKDB_FILE"))
cdm <- cdm_from_con(con,
cdm_schema = "main",
write_schema = "main")
DBI::dbDisconnect(con)
```
|
/scratch/gouwar.j/cran-all/cranData/CDMConnector/inst/doc/a04_DBI_connection_examples.Rmd
|
## ----include = FALSE----------------------------------------------------------
library(CDMConnector)
if (Sys.getenv("EUNOMIA_DATA_FOLDER") == "") Sys.setenv("EUNOMIA_DATA_FOLDER" = file.path(tempdir(), "eunomia"))
if (!dir.exists(Sys.getenv("EUNOMIA_DATA_FOLDER"))) dir.create(Sys.getenv("EUNOMIA_DATA_FOLDER"))
if (!eunomia_is_available()) downloadEunomiaData()
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>",
eval = FALSE,
build = eunomia_is_available()
)
## ----pressure, echo=FALSE, out.width = '80%'----------------------------------
# # knitr::include_graphics("locations.png")
## ----message=FALSE, warning=FALSE---------------------------------------------
# library(CDMConnector)
# library(dplyr, warn.conflicts = FALSE)
# library(ggplot2)
## ----message=FALSE, warning=FALSE---------------------------------------------
# con <- DBI::dbConnect(duckdb::duckdb(), dbdir = eunomia_dir())
# cdm <- cdm_from_con(con, cdm_name = "eunomia", cdm_schema = "main", write_schema = "main")
#
# # first filter to only those with condition_concept_id "4035415"
# cdm$condition_occurrence %>% tally()
#
# cdm$condition_occurrence <- cdm$condition_occurrence %>%
# filter(condition_concept_id == "4035415") %>%
# select(person_id, condition_start_date)
#
# cdm$condition_occurrence %>% tally()
#
# # then left_join person table
# cdm$person %>% tally()
# cdm$condition_occurrence %>%
# select(person_id) %>%
# left_join(select(cdm$person, person_id, year_of_birth), by = "person_id") %>%
# tally()
## ----message=FALSE, warning=FALSE---------------------------------------------
# dOut <- tempfile()
# dir.create(dOut)
# CDMConnector::stow(cdm, dOut, format = "parquet")
## ----message=FALSE, warning=FALSE---------------------------------------------
# cdm_arrow <- cdm_from_files(dOut, as_data_frame = FALSE, cdm_name = "GiBleed")
#
# cdm_arrow$person %>%
# nrow()
#
# cdm_arrow$condition_occurrence %>%
# nrow()
#
## ----message=FALSE, warning=FALSE---------------------------------------------
# result <- cdm_arrow$person %>%
# left_join(cdm_arrow$condition_occurrence, by = "person_id") %>%
# mutate(age_diag = year(condition_start_date) - year_of_birth) %>%
# collect()
## ----message=FALSE, warning=FALSE---------------------------------------------
# str(result)
#
# result %>%
# ggplot(aes(age_diag)) +
# geom_histogram()
## -----------------------------------------------------------------------------
# DBI::dbDisconnect(con, shutdown = TRUE)
|
/scratch/gouwar.j/cran-all/cranData/CDMConnector/inst/doc/a05_cdm_reference_backends.R
|
---
title: "CDM reference backends"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{CDM reference backends}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r, include = FALSE}
library(CDMConnector)
if (Sys.getenv("EUNOMIA_DATA_FOLDER") == "") Sys.setenv("EUNOMIA_DATA_FOLDER" = file.path(tempdir(), "eunomia"))
if (!dir.exists(Sys.getenv("EUNOMIA_DATA_FOLDER"))) dir.create(Sys.getenv("EUNOMIA_DATA_FOLDER"))
if (!eunomia_is_available()) downloadEunomiaData()
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>",
eval = FALSE,
build = eunomia_is_available()
)
```
## Overview
The CDMConnector package allows us to work with cdm data in different locations consistently. The `cdm_reference` may be to tables in a database, files on disk, or tables loaded into R. This allows computation to take place wherever is most convenient.
Here we have a schematic of how CDMConnector can be used to create `cdm_references` to different locations.
```{r pressure, echo=FALSE, out.width = '80%'}
# knitr::include_graphics("locations.png")
```
## Example
To show how this can work (and slightly overcomplicate things to show different options), let´s say we want to create a histogram with age of patients at diagnosis of tear of meniscus of knee (concept_id of "4035415"). We can start in the database and, after loading the required packages, subset our person table people to only include those people in the condition_occurrence table with condition_concept_id "4035415"
```{r, message=FALSE, warning=FALSE}
library(CDMConnector)
library(dplyr, warn.conflicts = FALSE)
library(ggplot2)
```
```{r, message=FALSE, warning=FALSE}
con <- DBI::dbConnect(duckdb::duckdb(), dbdir = eunomia_dir())
cdm <- cdm_from_con(con, cdm_name = "eunomia", cdm_schema = "main", write_schema = "main")
# first filter to only those with condition_concept_id "4035415"
cdm$condition_occurrence %>% tally()
cdm$condition_occurrence <- cdm$condition_occurrence %>%
filter(condition_concept_id == "4035415") %>%
select(person_id, condition_start_date)
cdm$condition_occurrence %>% tally()
# then left_join person table
cdm$person %>% tally()
cdm$condition_occurrence %>%
select(person_id) %>%
left_join(select(cdm$person, person_id, year_of_birth), by = "person_id") %>%
tally()
```
We can save these tables to file
```{r, message=FALSE, warning=FALSE}
dOut <- tempfile()
dir.create(dOut)
CDMConnector::stow(cdm, dOut, format = "parquet")
```
And now we can create a `cdm_reference` to the files
```{r, message=FALSE, warning=FALSE}
cdm_arrow <- cdm_from_files(dOut, as_data_frame = FALSE, cdm_name = "GiBleed")
cdm_arrow$person %>%
nrow()
cdm_arrow$condition_occurrence %>%
nrow()
```
And create an age at diagnosis variable
```{r, message=FALSE, warning=FALSE}
result <- cdm_arrow$person %>%
left_join(cdm_arrow$condition_occurrence, by = "person_id") %>%
mutate(age_diag = year(condition_start_date) - year_of_birth) %>%
collect()
```
We can then bring in this result to R and make the histogram
```{r, message=FALSE, warning=FALSE}
str(result)
result %>%
ggplot(aes(age_diag)) +
geom_histogram()
```
```{r}
DBI::dbDisconnect(con, shutdown = TRUE)
```
<div style="margin-bottom:3cm;"></div>
|
/scratch/gouwar.j/cran-all/cranData/CDMConnector/inst/doc/a05_cdm_reference_backends.Rmd
|
## ----include = FALSE----------------------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
# eval = FALSE,
comment = "#>"
)
## ----include = FALSE----------------------------------------------------------
library(CDMConnector)
if (Sys.getenv("EUNOMIA_DATA_FOLDER") == "") Sys.setenv("EUNOMIA_DATA_FOLDER" = file.path(tempdir(), "eunomia"))
if (!dir.exists(Sys.getenv("EUNOMIA_DATA_FOLDER"))) dir.create(Sys.getenv("EUNOMIA_DATA_FOLDER"))
if (!eunomia_is_available()) downloadEunomiaData()
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
## ----message=FALSE, warning=FALSE---------------------------------------------
library(CDMConnector)
library(omopgenerics)
library(dplyr)
write_schema <- "main"
cdm_schema <- "main"
con <- DBI::dbConnect(duckdb::duckdb(), dbdir = eunomia_dir())
cdm <- cdm_from_con(con, cdm_name = "eunomia", cdm_schema = cdm_schema, write_schema = write_schema)
## -----------------------------------------------------------------------------
#attr(cdm, "cdm_name")
## -----------------------------------------------------------------------------
cdmName(cdm)
cdm_name(cdm)
## -----------------------------------------------------------------------------
#attr(cdm, "cdm_version")
## -----------------------------------------------------------------------------
#cdmCon(cdm)
## -----------------------------------------------------------------------------
#DBI::dbListTables(cdmCon(cdm))
#DBI::dbListFields(cdmCon(cdm), "person")
#DBI::dbGetQuery(cdmCon(cdm), "SELECT * FROM person LIMIT 5")
## -----------------------------------------------------------------------------
# debugonce(generateConceptCohortSet)
cdm <- generateConceptCohortSet(cdm = cdm,
conceptSet = list("gi_bleed" = 192671,
"celecoxib" = 1118084),
name = "study_cohorts",
overwrite = TRUE)
cdm$study_cohorts %>%
glimpse()
## -----------------------------------------------------------------------------
attr(cdm$study_cohorts, "cohort_set")
## ----eval=FALSE---------------------------------------------------------------
# settings(cdm$study_cohorts)
# cohort_set(cdm$study_cohorts)
## -----------------------------------------------------------------------------
attr(cdm$study_cohorts, "cohort_count")
## ----eval=FALSE---------------------------------------------------------------
# cohortCount(cdm$study_cohorts)
# cohort_count(cdm$study_cohorts)
## ----eval=FALSE---------------------------------------------------------------
# attr(cdm$study_cohorts, "cohort_attrition")
## ----eval=FALSE---------------------------------------------------------------
# cohortAttrition(cdm$study_cohorts)
# cohort_attrition(cdm$study_cohorts)
## -----------------------------------------------------------------------------
attr(cdm$study_cohorts, "cdm_reference")
## -----------------------------------------------------------------------------
cdm$gi_bleed <- cdm$condition_occurrence %>%
filter(condition_concept_id == 192671) %>%
mutate(cohort_definition_id = 1) %>%
select(
cohort_definition_id,
subject_id = person_id,
cohort_start_date = condition_start_date,
cohort_end_date = condition_start_date
) %>%
compute(name = "gi_bleed", temporary = FALSE, overwrite = TRUE)
cdm$gi_bleed %>%
glimpse()
## -----------------------------------------------------------------------------
GI_bleed_cohort_ref <- tibble(cohort_definition_id = 1, cohort_name = "custom_gi_bleed")
cdm$gi_bleed <- omopgenerics::newCohortTable(
table = cdm$gi_bleed, cohortSetRef = GI_bleed_cohort_ref
)
## -----------------------------------------------------------------------------
settings(cdm$gi_bleed)
cohortCount(cdm$gi_bleed)
cohortAttrition(cdm$gi_bleed)
attr(cdm$gi_bleed, "cdm_reference")
|
/scratch/gouwar.j/cran-all/cranData/CDMConnector/inst/doc/a06_using_cdm_attributes.R
|
---
title: "Using CDM attributes"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{Using CDM attributes}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
# eval = FALSE,
comment = "#>"
)
```
```{r, include = FALSE}
library(CDMConnector)
if (Sys.getenv("EUNOMIA_DATA_FOLDER") == "") Sys.setenv("EUNOMIA_DATA_FOLDER" = file.path(tempdir(), "eunomia"))
if (!dir.exists(Sys.getenv("EUNOMIA_DATA_FOLDER"))) dir.create(Sys.getenv("EUNOMIA_DATA_FOLDER"))
if (!eunomia_is_available()) downloadEunomiaData()
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
```
## Set up
Let's again load required packages and connect to our Eunomia dataset in duckdb.
```{r, message=FALSE, warning=FALSE}
library(CDMConnector)
library(omopgenerics)
library(dplyr)
write_schema <- "main"
cdm_schema <- "main"
con <- DBI::dbConnect(duckdb::duckdb(), dbdir = eunomia_dir())
cdm <- cdm_from_con(con, cdm_name = "eunomia", cdm_schema = cdm_schema, write_schema = write_schema)
```
## CDM reference attributes
Our cdm reference has various attributes associated with it. These can be useful both when programming and when developing analytic packages on top of CDMConnector.
### CDM name
It's a requirement that every cdm reference has name associated with it. This is particularly useful for network studies so that we can associate results with a particular cdm. We can access this attribute like so
```{r}
#attr(cdm, "cdm_name")
```
Because it is so regularly used, to make getting the cdm name even easier, we can also use `cdmName` (or it's snake case equivalent `cdm_name`)
```{r}
cdmName(cdm)
cdm_name(cdm)
```
### CDM version
The OMOP CDM has various versions. We also have an attribute giving the version of the cdm we have connected to.
```{r}
#attr(cdm, "cdm_version")
```
### Database connection
We also have an attribute identifying the database connection underlying the cdm reference.
```{r}
#cdmCon(cdm)
```
This can be useful, for example, if we want to make use of DBI functions to work with the database. For example we could use `dbListTables` to list the names of remote tables accessible through the connection, `dbListFields` to list the field names of a specific remote table, and `dbGetQuery` to returns the result of a query
```{r}
#DBI::dbListTables(cdmCon(cdm))
#DBI::dbListFields(cdmCon(cdm), "person")
#DBI::dbGetQuery(cdmCon(cdm), "SELECT * FROM person LIMIT 5")
```
## Cohort attributes
### Generated cohort set
When we generate a cohort in addition to the cohort table itself we also have various attributes that can be useful for subsequent analysis.
Here we create a cohort table with a single cohort.
```{r}
# debugonce(generateConceptCohortSet)
cdm <- generateConceptCohortSet(cdm = cdm,
conceptSet = list("gi_bleed" = 192671,
"celecoxib" = 1118084),
name = "study_cohorts",
overwrite = TRUE)
cdm$study_cohorts %>%
glimpse()
```
We have a cohort set attribute that gives details on the settings associated with the cohorts (along with utility functions to make it easier to access this attribute).
```{r}
attr(cdm$study_cohorts, "cohort_set")
```
```{r, eval=FALSE}
settings(cdm$study_cohorts)
cohort_set(cdm$study_cohorts)
```
We have a cohort_count attribute with counts for each of the cohorts.
```{r}
attr(cdm$study_cohorts, "cohort_count")
```
```{r, eval=FALSE}
cohortCount(cdm$study_cohorts)
cohort_count(cdm$study_cohorts)
```
And we also have an attribute, cohort attrition, with a summary of attrition when creating the cohorts.
```{r, eval=FALSE}
attr(cdm$study_cohorts, "cohort_attrition")
```
```{r, eval=FALSE}
cohortAttrition(cdm$study_cohorts)
cohort_attrition(cdm$study_cohorts)
```
In addition, we also have the cdm reference itself as an attribute of the cohorts. This is particularly useful when developing analytic packages on top of CDMConnector.
```{r}
attr(cdm$study_cohorts, "cdm_reference")
```
### Creating a bespoke cohort
Say we create a custom GI bleed cohort with the standard cohort structure
```{r}
cdm$gi_bleed <- cdm$condition_occurrence %>%
filter(condition_concept_id == 192671) %>%
mutate(cohort_definition_id = 1) %>%
select(
cohort_definition_id,
subject_id = person_id,
cohort_start_date = condition_start_date,
cohort_end_date = condition_start_date
) %>%
compute(name = "gi_bleed", temporary = FALSE, overwrite = TRUE)
cdm$gi_bleed %>%
glimpse()
```
We can add the required attributes using the `newGeneratedCohortSet` function. The minimum requirement for this is that we also define the cohort set to associate with our set of custom cohorts.
```{r}
GI_bleed_cohort_ref <- tibble(cohort_definition_id = 1, cohort_name = "custom_gi_bleed")
cdm$gi_bleed <- omopgenerics::newCohortTable(
table = cdm$gi_bleed, cohortSetRef = GI_bleed_cohort_ref
)
```
Now our custom cohort GI_bleed has the same attributes associated with it as if it had been created by `generateConceptCohortSet`. This will mean that it can be used by analytic packages designed to work with cdm cohorts.
```{r}
settings(cdm$gi_bleed)
cohortCount(cdm$gi_bleed)
cohortAttrition(cdm$gi_bleed)
attr(cdm$gi_bleed, "cdm_reference")
```
<div style="margin-bottom:3cm;"></div>
|
/scratch/gouwar.j/cran-all/cranData/CDMConnector/inst/doc/a06_using_cdm_attributes.Rmd
|
---
title: "Getting Started"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{Getting Started}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r setup, include = FALSE}
library(CDMConnector)
if (Sys.getenv("EUNOMIA_DATA_FOLDER") == "") Sys.setenv("EUNOMIA_DATA_FOLDER" = tempdir())
if (!dir.exists(Sys.getenv("EUNOMIA_DATA_FOLDER"))) dir.create(Sys.getenv("EUNOMIA_DATA_FOLDER"))
if (!eunomia_is_available()) downloadEunomiaData()
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
```
The Observational Medical Outcomes Partnership (OMOP) Common Data Model (CDM) is a commonly used format for storing and analyzing observational health data derived from electronic health records, insurance claims, registries, and other sources. Source data is "mapped" into the OMOP CDM format providing researchers with a standardized interface for querying and analyzing observational health data. The CDMConnector package provides tools for working with OMOP Common Data Model (CDM) tables using familiar [dplyr](https://dplyr.tidyverse.org) syntax and using the [tidyverse design principles](https://design.tidyverse.org/) popular in the R ecosystem.
This vignette is for new users of CDMConnector who have access to data already mapped into the OMOP CDM format. However, CDMConnector does provide several example synthetic datasets in the OMOP CDM format. To learn more about the OMOP CDM or the mapping process check out these resources.
- <https://academy.ehden.eu/>
- <https://ohdsi.github.io/TheBookOfOhdsi/>
- <https://www.ohdsi.org/join-the-journey/>
- <https://ohdsi.github.io/CommonDataModel/>
## Creating a reference to the OMOP CDM
Typically OMOP CDM datasets are stored in a database and can range in size from hundreds of patients with thousands of records to hundreds of millions of patients with billions of records. The Observational Health Data Science and Infromatics (OHDSI) community supports a selection of popular database platforms including Postgres, Microsoft SQL Server, Oracle, as well as cloud data platforms suchs as Amazon Redshift, Google Big Query, Databricks, and Snowflake. The first step in using CDMConnector is to create a connection to your database from R. This can take some effort the first time you set up drivers. See the "Database Connection Examples" vignette or check out the [Posit's database documentation.](https://solutions.posit.co/connections/db/getting-started/connect-to-database/)
In our example's we will use some synthetic data from the [Synthea project](https://synthetichealth.github.io/synthea/) that has been mapped to the OMOP CDM format. We'll use the [duckdb](https://duckdb.org/) database which is a file based database similar to SQLite but with better date type support. To see all the example datasets available run `example_datasets()`.
```{r}
library(CDMConnector)
example_datasets()
con <- DBI::dbConnect(duckdb::duckdb(), eunomia_dir("GiBleed"))
DBI::dbListTables(con)
```
If you're using CDMConnector for the first time you may get a message about adding an enviroment vairable `EUNOMIA_DATA_FOLDER` . To do this simply create a new text file in your home directory called .Renviron and add the line `EUNOMIA_DATA_FOLDER="path/to/folder/where/we/can/store/example/data"`. If you run `usethis::edit_r_environ()` this file will be created and opened for you and opened in RStudio.
After connecting to a database containing data mapped to the OMOP CDM, use `cdm_from_con` to create a CDM reference. This CDM reference is a single object that contains dplyr table references to each CDM table along with metadata about the CDM instance.
The cdm_schema is the schema in the database that contains the OMOP CDM tables and is required. All other arguments are optional.
```{r}
cdm <- cdm_from_con(con, cdm_name = "eunomia", cdm_schema = "main", write_schema = "main")
cdm
cdm$observation_period
```
Individual CDM table references can be accessed using \`\$\`.
```{r}
cdm$person %>%
dplyr::glimpse()
```
You can then use dplyr to query the cdm tables just as you would an R dataframe. The difference is that the data stays in the database and SQL code is dynamically generated and set to the database backend. The goal is to allow users to not think too much about the database or SQL and instead use familiar R syntax to work with these large tables. `collect` will bring the data from the database into R. Be careful not to request a gigantic result set! In general it is better to aggregate data in the database, if possible, before bringing data into R.
```{r, warning=FALSE}
library(dplyr)
library(ggplot2)
cdm$person %>%
group_by(year_of_birth, gender_concept_id) %>%
summarize(n = n(), .groups = "drop") %>%
collect() %>%
mutate(sex = case_when(
gender_concept_id == 8532 ~ "Female",
gender_concept_id == 8507 ~ "Male"
)) %>%
ggplot(aes(y = n, x = year_of_birth, fill = sex)) +
geom_histogram(stat = "identity", position = "dodge") +
labs(x = "Year of birth",
y = "Person count",
title = "Age Distribution",
subtitle = cdm_name(cdm),
fill = NULL) +
theme_bw()
```
## Joining tables
Since the OMOP CDM is a relational data model joins are very common in analytic code. All of the events in the OMOP CDM are recorded using integers representing standard "concepts". To see the text description of a concept researchers need to join clinical tables to the concept vocabulary table. Every OMOP CDM should have a copy of the vocabulary used to map the data to the OMOP CDM format.
Here is an example query looking at the most common conditions in the CDM.
```{r, warning=FALSE}
cdm$condition_occurrence %>%
count(condition_concept_id, sort = T) %>%
left_join(cdm$concept, by = c("condition_concept_id" = "concept_id")) %>%
collect() %>%
select("condition_concept_id", "concept_name", "n")
```
Let's look at the most common drugs used by patients with "Acute viral pharyngitis".
```{r, warning=FALSE}
cdm$condition_occurrence %>%
filter(condition_concept_id == 4112343) %>%
distinct(person_id) %>%
inner_join(cdm$drug_exposure, by = "person_id") %>%
count(drug_concept_id, sort = TRUE) %>%
left_join(cdm$concept, by = c("drug_concept_id" = "concept_id")) %>%
collect() %>%
select("concept_name", "n")
```
To inspect the generated SQL use `show_query` from dplyr.
```{r, warning=FALSE}
cdm$condition_occurrence %>%
filter(condition_concept_id == 4112343) %>%
distinct(person_id) %>%
inner_join(cdm$drug_exposure, by = "person_id") %>%
count(drug_concept_id, sort = TRUE) %>%
left_join(cdm$concept, by = c("drug_concept_id" = "concept_id")) %>%
show_query()
```
These are a few simple queries. More complex queries can be built by combining simple queries like the ones above and other analytic packages provide functions that implement common analytic use cases.
For example a "cohort definition" is a set of criteria that persons must satisfy that can be quite complex. The "Working with Cohorts" vignette describes creating and using cohorts with CDMConnector.
## Saving query results to the database
Sometime it is helpful to save query results to the database instead of reading the result into R. dplyr provides the `compute` function but due to differences between database systems CDMConnector has needed to export a its own method that handles the slight differences. Internally CDMConnector runs `compute_query` function that is tested across the OHDSI supported database platforms.
If we are writing data to the CDM database we need to add one more argument when creating our cdm reference object, the "write_schema". This is a schema in the database where you have write permissions. Typically this should be a separate schema from the "cdm_schema".
```{r}
DBI::dbExecute(con, "create schema scratch;")
cdm <- cdm_from_con(con, cdm_name = "eunomia", cdm_schema = "main", write_schema = "scratch")
```
```{r, warning=FALSE}
drugs <- cdm$condition_occurrence %>%
filter(condition_concept_id == 4112343) %>%
distinct(person_id) %>%
inner_join(cdm$drug_exposure, by = "person_id") %>%
count(drug_concept_id, sort = TRUE) %>%
left_join(cdm$concept, by = c("drug_concept_id" = "concept_id")) %>%
compute(name = "test", temporary = FALSE, overwrite = TRUE)
drugs %>% show_query()
drugs
```
We can see that the query has been saved to a new table in the scratch schema. `compute` returns a dplyr reference to this table.
## Selecting a subset of CDM tables
If you do not need references to all tables you can easily select only a subset of tables to include in the CDM reference. The `cdm_select_tbl` function supports the [tidyselect selection language](https://tidyselect.r-lib.org/reference/language.html) and provides a new selection helper: `tbl_group`.
```{r}
cdm %>% cdm_select_tbl("person", "observation_period") # quoted names
cdm %>% cdm_select_tbl(person, observation_period) # unquoted names
cdm %>% cdm_select_tbl(starts_with("concept")) # tables that start with 'concept'
cdm %>% cdm_select_tbl(contains("era")) # tables that contain the substring 'era'
cdm %>% cdm_select_tbl(matches("person|period")) # regular expression
```
Predefined sets of tables can also be selected using `tbl_group` which supports several subsets of the CDM: "all", "clinical", "vocab", "derived", and "default".
```{r}
# pre-defined groups
cdm %>% cdm_select_tbl(tbl_group("clinical"))
cdm %>% cdm_select_tbl(tbl_group("vocab"))
```
The default set of CDM tables included in a CDM object is:
```{r}
tbl_group("default")
```
## Subsetting a CDM
Sometimes it is helpful to subset a CDM to a specific set of persons or simply down sample the data to a more reasonable size. Let's subset our cdm to just persons with a Pneumonia (concept_id 255848). This works best then the number of persons in the subset is quite small and the database has indexes on the "person_id" columns of each table.
```{r}
person_ids <- cdm$condition_occurrence %>%
filter(condition_concept_id == 255848) %>%
distinct(person_id) %>%
pull(person_id)
length(person_ids)
cdm_pneumonia <- cdm %>%
cdm_subset(person_id = person_ids)
tally(cdm_pneumonia$person) %>%
pull(n)
cdm_pneumonia$condition_occurrence %>%
distinct(person_id) %>%
tally() %>%
pull(n)
```
Alternatively if we simply want a random sample of the entire CDM we can use `cdm_sample`.
```{r}
cdm_100person <- cdm_sample(cdm, n = 100)
tally(cdm_100person$person) %>% pull("n")
```
# Flatten a CDM
An OMOP CDM is a relational data model. Sometimes it is helpful to flatten this relational structure into a "tidy" dataframe with one row per observation. This transformation should only be done with a small number of persons and events.
```{r}
cdm_flatten(cdm_pneumonia,
domain = c("condition", "drug", "measurement")) %>%
collect()
```
## Saving a local copy of a CDM
We can use `collect` to bring the whole cdm object into R as dataframes. If you would like to save a subset of the CDM and then restore it in R as a local CDM object, CDMConnector provides the `stow` and `cdm_from_files` functions to do this.
```{r}
local_cdm <- cdm_100person %>%
collect()
# The cdm tables are now dataframes
local_cdm$person[1:4, 1:4]
```
```{r, eval=FALSE}
save_path <- file.path(tempdir(), "tmp")
dir.create(save_path)
cdm %>%
stow(path = save_path, format = "parquet")
list.files(save_path)
```
Restore a saved cdm object from files with `cdm_from_files`.
```{r, eval=FALSE}
cdm <- cdm_from_files(save_path, cdm_name = "GI Bleed example data")
```
## Closing connections
Close the database connection with `dbDisconnect`. After a connection is closed any cdm objects created with that connection can no longer be used.
```{r}
DBI::dbDisconnect(con, shutdown = TRUE)
```
## Summary
CDMConnector provides an interface to working with observational health data in the OMOP CDM format from R. Check out the other vignettes for more details about the package.
<div style="margin-bottom:3cm;"></div>
|
/scratch/gouwar.j/cran-all/cranData/CDMConnector/vignettes/a01_getting-started.Rmd
|
---
title: "Working with cohorts"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{Working with cohorts}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
eval = rlang::is_installed("CirceR") && rlang::is_installed("Capr"),
comment = "#>"
)
library(CDMConnector)
library(dplyr, warn.conflicts = FALSE)
if (Sys.getenv("EUNOMIA_DATA_FOLDER") == "") Sys.setenv("EUNOMIA_DATA_FOLDER" = file.path(tempdir(), "eunomia"))
if (!dir.exists(Sys.getenv("EUNOMIA_DATA_FOLDER"))) dir.create(Sys.getenv("EUNOMIA_DATA_FOLDER"))
if (!eunomia_is_available()) downloadEunomiaData()
```
Cohorts are a fundamental building block for observational health data analysis. A "cohort" is a set of persons satisfying a one or more inclusion criteria for a duration of time. If you are familiar with the idea of sets in math then a cohort can be nicely represented as a set of person-days. In the OMOP Common Data Model we represent cohorts using a table with four columns.
| cohort_definition_id | subject_id | cohort_start_date | cohort_end_date |
|----------------------|------------|-------------------|-----------------|
| 1 | 1000 | 2020-01-01 | 2020-05-01 |
| 1 | 1000 | 2021-06-01 | 2020-07-01 |
| 1 | 2000 | 2020-03-01 | 2020-09-01 |
| 2 | 1000 | 2020-02-01 | 2020-03-01 |
: An example cohort table
A cohort table can contain multiple cohorts and each cohort can have multiple persons. There can even be multiple records for the same person in a single cohort as long as the date ranges do not overlap. In the same way that an element is either in a set or not, a single person-day is either in a cohort or not. For a more comprehensive treatment of cohorts in OHDSI check out the Cohorts chapter in [The Book of OHDSI](https://ohdsi.github.io/TheBookOfOhdsi/Cohorts.html).
## Cohort Generation
The $n*4$ cohort table is created through the process of cohort *generation*. To generate a cohort on a specific CDM dataset means that we combine a *cohort definition* with CDM to produce a cohort table. The standardization provided by the OMOP CDM allows researchers to generate the same cohort definition on any OMOP CDM dataset.
A cohort definition is an expression of the rules goverining the inclusion/exclusion of person-days in the cohort. There are three common ways to create cohort definitions for the OMOP CDM.
1. The Atlas cohort builder
2. The Capr R package
3. Custom SQL and/or R code
Atlas is a web application that provides a graphical user interface for creating cohort definitions. . To get started with Atlas check out the free course on [Ehden Academy](https://academy.ehden.eu/course/index.php) and the demo at <https://atlas-demo.ohdsi.org/>.
Capr is an R package that provides a code-based interface for creating cohort definitions. The options available in Capr exactly match the options available in Atlas and the resulting cohort tables should be identical.
There are times when more customization is needed and it is possible to use bespoke SQL or dplyr code to build a cohort. CDMConnector provides the `generate_concept_cohort_set` function for quickly building simple cohorts that can then be a starting point for further subsetting.
Atlas cohorts are represented using json text files. To "generate" one or more Atlas cohorts on a cdm object use the `read_cohort_set` function to first read a folder of Atlas cohort json files into R. Then create the cohort table with `generate_cohort_set`. There can be an optional csv file called "CohortsToCreate.csv" in the folder that specifies the cohort IDs and names to use. If this file doesn't exist IDs will be assigned automatically using alphabetical order of the filenames.
```{r}
path_to_cohort_json_files <- system.file("cohorts1", package = "CDMConnector")
list.files(path_to_cohort_json_files)
readr::read_csv(file.path(path_to_cohort_json_files, "CohortsToCreate.csv"),
show_col_types = FALSE)
```
### Atlas cohort definitions
First we need to create our CDM object. Note that we will need to specify a `write_schema` when creating the object. Cohort tables will go into the CDM's `write_schema`.
```{r}
library(CDMConnector)
path_to_cohort_json_files <- system.file("example_cohorts",
package = "CDMConnector")
list.files(path_to_cohort_json_files)
con <- DBI::dbConnect(duckdb::duckdb(), eunomia_dir("GiBleed"))
cdm <- cdm_from_con(con, cdm_name = "eunomia", cdm_schema = "main", write_schema = "main")
cohort_details <- read_cohort_set(path_to_cohort_json_files) |>
mutate(cohort_name = snakecase::to_snake_case(cohort_name))
cohort_details
cdm <- generate_cohort_set(
cdm = cdm,
cohort_set = cohort_details,
name = "study_cohorts"
)
cdm$study_cohorts
```
The generated cohort has associated metadata tables. We can access these with utility functions.
- `cohort_count` contains the person and record counts for each cohort in the cohort set
- `settings` table contains the cohort id and cohort name
- `attrition` table contains the attrition information (persons, and records dropped at each sequential inclusion rule)
```{r}
cohort_count(cdm$study_cohorts)
cohort_set(cdm$study_cohorts)
attrition(cdm$study_cohorts)
```
Note the this cohort table is still in the database so it can be quite large. We can also join it to other CDM table or subset the entire cdm to just the persons in the cohort.
```{r, eval=FALSE}
cdm_gibleed <- cdm %>%
cdm_subset_cohort(cohort_table = "study_cohorts")
```
### Capr cohort definitions
Capr allows us to use R code to create the same cohorts that can be created in Atlas. This is helpful when you need to create a large number of similar cohort definitions. Below we create a single Cohort definition with one inclusion criteria
`generate_cohort_set` will accept a named list of Capr
```{r}
library(Capr)
gibleed_concept_set <- cs(192671, name = "gibleed")
gibleed_definition <- cohort(
entry = conditionOccurrence(gibleed_concept_set)
)
gibleed_male_definition <- cohort(
entry = conditionOccurrence(gibleed_concept_set, male())
)
# create a named list of Capr cohort definitions
cohort_details = list(gibleed = gibleed_definition,
gibleed_male = gibleed_male_definition)
# generate cohorts
cdm <- generate_cohort_set(
cdm,
cohort_set = cohort_details,
name = "gibleed" # name for the cohort table in the cdm
)
cdm$gibleed
```
We should get the exact same result from Capr and Atlas if the definitions are equivalent.
Learn more about Capr at the package website <https://ohdsi.github.io/Capr/>.
```{r}
DBI::dbDisconnect(con, shutdown = TRUE)
```
### Subset a cohort
Suppose you have a generated cohort and you would like to create a new cohort that is a subset of the first. This can be done using the
First we will generate an example cohort set and then create a new cohort based on filtering the Atlas cohort.
```{r}
library(CDMConnector)
con <- DBI::dbConnect(duckdb::duckdb(), eunomia_dir())
cdm <- cdm_from_con(con, cdm_schema = "main", write_schema = "main")
cohort_set <- read_cohort_set(system.file("cohorts3", package = "CDMConnector"))
cdm <- generate_cohort_set(cdm, cohort_set, name = "cohort")
cdm$cohort
cohort_count(cdm$cohort)
```
As an example we will take only people in the cohort that have a cohort duration that is longer than 4 weeks.
Using dplyr we can write this query and save the result in a new table in the cdm.
```{r}
library(dplyr)
cdm$cohort_subset <- cdm$cohort %>%
# only keep persons who are in the cohort at least 28 days
filter(!!datediff("cohort_start_date", "cohort_end_date") >= 28) %>%
# optionally you can modify the cohort_id
mutate(cohort_definition_id = 100 + cohort_definition_id) %>%
compute(name = "cohort_subset", temporary = FALSE, overwrite = TRUE) %>%
new_generated_cohort_set()
cdm$cohort2
cohort_count(cdm$cohort_subset)
```
In this case we can see that cohorts 1 and 5 were dropped completely and some patients were dropped from cohorts 2, 3, and 4.
Let's confirm that everyone in cohorts 1 and 5 were in the cohort for less than 28 days.
```{r}
days_in_cohort <- cdm$cohort %>%
filter(cohort_definition_id %in% c(1,5)) %>%
mutate(days_in_cohort = !!datediff("cohort_start_date", "cohort_end_date")) %>%
count(cohort_definition_id, days_in_cohort) %>%
collect()
days_in_cohort
```
We have confirmed that everyone in cohorts 1 and 5 were in the cohort less than 10 days.
Now suppose we would like to create a new cohort table with three different versions of the cohorts in the original cohort table. We will keep persons who are in the cohort at 2 weeks, 3 weeks, and 4 weeks. We can simply write some custom dplyr to create the table and then call `new_generated_cohort_set` just like in the previous example.
```{r}
cdm$cohort_subset <- cdm$cohort %>%
filter(!!datediff("cohort_start_date", "cohort_end_date") >= 14) %>%
mutate(cohort_definition_id = 10 + cohort_definition_id) %>%
union_all(
cdm$cohort %>%
filter(!!datediff("cohort_start_date", "cohort_end_date") >= 21) %>%
mutate(cohort_definition_id = 100 + cohort_definition_id)
) %>%
union_all(
cdm$cohort %>%
filter(!!datediff("cohort_start_date", "cohort_end_date") >= 28) %>%
mutate(cohort_definition_id = 1000 + cohort_definition_id)
) %>%
compute(name = "cohort_subset", temporary = FALSE, overwrite = TRUE) %>%
new_generated_cohort_set() # this function creates the cohort object and metadata
cdm$cohort_subset %>%
mutate(days_in_cohort = !!datediff("cohort_start_date", "cohort_end_date")) %>%
group_by(cohort_definition_id) %>%
summarize(mean_days_in_cohort = mean(days_in_cohort, na.rm = TRUE)) %>%
collect() %>%
arrange(mean_days_in_cohort)
```
This is an example of creating new cohorts from existing cohorts using CDMConnector. There is a lot of flexibility with this approach. Next we will look at completely custom cohort creation which is quite similar.
### Custom Cohort Creation
Sometimes you may want to create cohorts that cannot be easily expressed using Atlas or Capr. In these situations you can create implement cohort creation using SQL or R. See the chapter in [The Book of OHDSI](https://ohdsi.github.io/TheBookOfOhdsi/Cohorts.html#implementing-the-cohort-using-sql) for details on using SQL to create cohorts. CDMConnector provides a helper function to build simple cohorts from a list of OMOP concepts. `generate_concept_cohort_set` accepts a named list of concept sets and will create cohorts based on those concept sets. While this function does not allow for inclusion/exclusion criteria in the initial definition, additional criteria can be applied "manually" after the initial generation.
```{r}
library(dplyr, warn.conflicts = FALSE)
cdm <- generate_concept_cohort_set(
cdm,
concept_set = list(gibleed = 192671),
name = "gibleed2", # name of the cohort table
limit = "all", # use all occurrences of the concept instead of just the first
end = 10 # set explicit cohort end date 10 days after start
)
cdm$gibleed2 <- cdm$gibleed2 %>%
semi_join(
filter(cdm$person, gender_concept_id == 8507),
by = c("subject_id" = "person_id")
) %>%
record_cohort_attrition(reason = "Male")
attrition(cdm$gibleed2)
```
We could visualise attrition using a package like VisR
```{r, fig.width= 7, fig.height=10}
library(visR)
gibleed2_attrition <- CDMConnector::attrition(cdm$gibleed2) %>%
dplyr::select(Criteria = "reason", `Remaining N` = "number_subjects")
class(gibleed2_attrition) <- c("attrition", class(gibleed2_attrition))
visr(gibleed2_attrition)
```
In the above example we built a cohort table from a concept set. The cohort essentially captures patient-time based off of the presence or absence of OMOP standard concept IDs. We then manually applied an inclusion criteria and recorded a new attrition record in the cohort. To learn more about this approach to building cohorts check out the [PatientProfiles](https://darwin-eu-dev.github.io/PatientProfiles/) R package.
You can also create a generated cohort set using any method you choose. As long as the table is in the CDM database and has the four required columns it can be added to the CDM object as a generated cohort set.
Suppose for example our cohort table is
```{r}
cohort <- dplyr::tibble(
cohort_definition_id = 1L,
subject_id = 1L,
cohort_start_date = as.Date("1999-01-01"),
cohort_end_date = as.Date("2001-01-01")
)
cohort
```
First make sure the table is in the database and create a dplyr table reference to it and add it to the CDM object.
```{r}
library(omopgenerics)
cdm <- insertTable(cdm = cdm, name = "cohort", table = cohort, overwrite = TRUE)
cdm$cohort
```
To make this a true generated cohort object use the `cohort_table`
```{r}
cdm$cohort <- newCohortTable(cdm$cohort)
```
We can see that this cohort is now has the class "cohort_table" as well as the various metadata tables.
```{r}
cohort_count(cdm$cohort)
cohort_set(cdm$cohort)
attrition(cdm$cohort)
```
If you would like to override the attribute tables then pass additional dataframes to cohortTable
```{r}
cdm <- insertTable(cdm = cdm, name = "cohort2", table = cohort, overwrite = TRUE)
cdm$cohort2 <- newCohortTable(cdm$cohort2)
settings(cdm$cohort2)
cohort_set <- data.frame(cohort_definition_id = 1L,
cohort_name = "made_up_cohort")
cdm$cohort2 <- newCohortTable(cdm$cohort2, cohortSetRef = cohort_set)
settings(cdm$cohort2)
```
```{r}
DBI::dbDisconnect(con, shutdown = TRUE)
```
Cohort building is a fundamental building block of observational health analysis and CDMConnector supports different ways of creating cohorts. As long as your cohort table is has the required structure and columns you can add it to the cdm with the `new_generated_cohort_set` function and use it in any downstream OHDSI analytic packages.
<div style="margin-bottom:3cm;"></div>
|
/scratch/gouwar.j/cran-all/cranData/CDMConnector/vignettes/a02_cohorts.Rmd
|
---
title: "CDMConnector and dbplyr"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{CDMConnector and dbplyr}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r, include = FALSE}
library(CDMConnector)
if (Sys.getenv("EUNOMIA_DATA_FOLDER") == "") Sys.setenv("EUNOMIA_DATA_FOLDER" = file.path(tempdir(), "eunomia"))
if (!dir.exists(Sys.getenv("EUNOMIA_DATA_FOLDER"))) dir.create(Sys.getenv("EUNOMIA_DATA_FOLDER"))
if (!eunomia_is_available()) downloadEunomiaData()
knitr::opts_chunk$set(
collapse = TRUE,
# eval = FALSE,
comment = "#>"
)
```
## Set up
First let's load the required packages for the code in this vignette. If you haven't already installed them, all the other packages can be installed using ´install.packages()´
```{r}
library(CDMConnector)
library(dplyr, warn.conflicts = FALSE)
library(ggplot2)
```
## Creating the cdm reference
Now let´s connect to a duckdb database with the Eunomia data (https://github.com/OHDSI/Eunomia).
```{r}
con <- DBI::dbConnect(duckdb::duckdb(), dbdir = eunomia_dir())
cdm <- cdm_from_con(con, cdm_name = "eunomia", cdm_schema = "main", write_schema = "main")
cdm
```
This cdm object is now what we´ll use going forward. It provides a reference to the OMOP CDM tables. We can see that these tables are still in the database, but now we have a reference to each of the ones we might want to use in our analysis. For example, the person table can be referenced like so
## Putting it all together
Say we want to make a histogram of year of birth in the person table. We can select that variable, bring it into memory, and then use ggplot to make the histogram.
```{r, message=FALSE}
cdm$person %>%
select(year_of_birth) %>%
collect() %>%
ggplot(aes(x = year_of_birth)) +
geom_histogram(bins = 30)
```
If we wanted to make a boxplot for length of observation periods we could do the computation on the database side, bring in the new variable into memory, and use ggplot to produce the boxplot
```{r}
cdm$observation_period %>%
select(observation_period_start_date, observation_period_end_date) %>%
mutate(observation_period = (observation_period_end_date - observation_period_start_date)/365, 25) %>%
select(observation_period) %>%
collect() %>%
ggplot(aes(x = observation_period)) +
geom_boxplot()
```
## Behind the scenes
We use show_query to check the sql that is being run against duckdb
```{r}
cdm$person %>%
tally() %>%
show_query()
```
```{r}
cdm$person %>%
summarise(median(year_of_birth))%>%
show_query()
```
```{r, warning=FALSE}
cdm$person %>%
mutate(gender = case_when(
gender_concept_id == "8507" ~ "Male",
gender_concept_id == "8532" ~ "Female",
TRUE ~ NA_character_))%>%
show_query()
```
```{r}
DBI::dbDisconnect(con, shutdown = TRUE)
```
<div style="margin-bottom:3cm;"></div>
|
/scratch/gouwar.j/cran-all/cranData/CDMConnector/vignettes/a03_dbplyr.Rmd
|
---
title: "DBI connection examples"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{DBI connection examples}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>",
eval = FALSE
)
```
The following connection examples are provided for reference.
### Postgres
```{r, eval=FALSE}
con <- DBI::dbConnect(RPostgres::Postgres(),
dbname = Sys.getenv("CDM5_POSTGRESQL_DBNAME"),
host = Sys.getenv("CDM5_POSTGRESQL_HOST"),
user = Sys.getenv("CDM5_POSTGRESQL_USER"),
password = Sys.getenv("CDM5_POSTGRESQL_PASSWORD"))
cdm <- cdm_from_con(con,
cdm_schema = Sys.getenv("CDM5_POSTGRESQL_CDM_SCHEMA"),
write_schema = Sys.getenv("CDM5_POSTGRESQL_SCRATCH_SCHEMA"))
DBI::dbDisconnect(con)
```
### Redshift
Redshift is almost identical to Postgres.
```{r, eval=FALSE}
con <- DBI::dbConnect(RPostgres::Redshift(),
dbname = Sys.getenv("CDM5_REDSHIFT_DBNAME"),
host = Sys.getenv("CDM5_REDSHIFT_HOST"),
port = Sys.getenv("CDM5_REDSHIFT_PORT"),
user = Sys.getenv("CDM5_REDSHIFT_USER"),
password = Sys.getenv("CDM5_REDSHIFT_PASSWORD"))
cdm <- cdm_from_con(con,
cdm_schema = Sys.getenv("CDM5_REDSHIFT_CDM_SCHEMA"),
write_schema = Sys.getenv("CDM5_REDSHIFT_SCRATCH_SCHEMA"))
DBI::dbDisconnect(con)
```
### SQL Server
Using odbc with SQL Server requires driver setup described [here](https://solutions.posit.co/connections/db/r-packages/odbc/). Note, you'll likely need to [download the ODBC Driver for SQL Server](https://learn.microsoft.com/en-us/sql/connect/odbc/download-odbc-driver-for-sql-server?view=sql-server-ver16).
```{r, eval=FALSE}
con <- DBI::dbConnect(odbc::odbc(),
Driver = "ODBC Driver 18 for SQL Server",
Server = Sys.getenv("CDM5_SQL_SERVER_SERVER"),
Database = Sys.getenv("CDM5_SQL_SERVER_CDM_DATABASE"),
UID = Sys.getenv("CDM5_SQL_SERVER_USER"),
PWD = Sys.getenv("CDM5_SQL_SERVER_PASSWORD"),
TrustServerCertificate="yes",
Port = 1433)
cdm <- cdm_from_con(con,
cdm_schema = c("tempdb", "dbo"),
write_schema = c("ATLAS", "RESULTS"))
DBI::dbDisconnect(con)
```
The connection to SQL Server can be simplified by configuring a DSN. See [here](https://www.r-bloggers.com/2018/05/setting-up-an-odbc-connection-with-ms-sql-server-on-windows/) for instructions on how to set up the DSN.If we named it "SQL", our connection is then simplified to.
```{r, eval=FALSE}
con <- DBI::dbConnect(odbc::odbc(), "SQL")
cdm <- cdm_from_con(con,
cdm_schema = c("tempdb", "dbo"),
write_schema = c("ATLAS", "RESULTS"))
DBI::dbDisconnect(con)
```
### Snowflake
We can use the odbc package to connect to snowflake.
```{r, eval=FALSE}
con <- DBI::dbConnect(odbc::odbc(),
SERVER = Sys.getenv("SNOWFLAKE_SERVER"),
UID = Sys.getenv("SNOWFLAKE_USER"),
PWD = Sys.getenv("SNOWFLAKE_PASSWORD"),
DATABASE = Sys.getenv("SNOWFLAKE_DATABASE"),
WAREHOUSE = Sys.getenv("SNOWFLAKE_WAREHOUSE"),
DRIVER = Sys.getenv("SNOWFLAKE_DRIVER"))
cdm <- cdm_from_con(con,
cdm_schema = c("OMOP_SYNTHETIC_DATASET", "CDM53"),
write_schema = c("ATLAS", "RESULTS"))
DBI::dbDisconnect(con)
```
Note, as with SQL server we could set up a DSN to simplify this connection as described [here](https://docs.snowflake.com/developer-guide/odbc/odbc-windows) for windows and [here](https://docs.snowflake.com/developer-guide/odbc/odbc-mac) for macOS.
### Duckdb
Duckdb is an in-process database. We use the duckdb package to connect.
```{r, eval=FALSE}
con <- DBI::dbConnect(duckdb::duckdb(),
dbdir=Sys.getenv("CDM5_DUCKDB_FILE"))
cdm <- cdm_from_con(con,
cdm_schema = "main",
write_schema = "main")
DBI::dbDisconnect(con)
```
|
/scratch/gouwar.j/cran-all/cranData/CDMConnector/vignettes/a04_DBI_connection_examples.Rmd
|
---
title: "CDM reference backends"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{CDM reference backends}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r, include = FALSE}
library(CDMConnector)
if (Sys.getenv("EUNOMIA_DATA_FOLDER") == "") Sys.setenv("EUNOMIA_DATA_FOLDER" = file.path(tempdir(), "eunomia"))
if (!dir.exists(Sys.getenv("EUNOMIA_DATA_FOLDER"))) dir.create(Sys.getenv("EUNOMIA_DATA_FOLDER"))
if (!eunomia_is_available()) downloadEunomiaData()
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>",
eval = FALSE,
build = eunomia_is_available()
)
```
## Overview
The CDMConnector package allows us to work with cdm data in different locations consistently. The `cdm_reference` may be to tables in a database, files on disk, or tables loaded into R. This allows computation to take place wherever is most convenient.
Here we have a schematic of how CDMConnector can be used to create `cdm_references` to different locations.
```{r pressure, echo=FALSE, out.width = '80%'}
# knitr::include_graphics("locations.png")
```
## Example
To show how this can work (and slightly overcomplicate things to show different options), let´s say we want to create a histogram with age of patients at diagnosis of tear of meniscus of knee (concept_id of "4035415"). We can start in the database and, after loading the required packages, subset our person table people to only include those people in the condition_occurrence table with condition_concept_id "4035415"
```{r, message=FALSE, warning=FALSE}
library(CDMConnector)
library(dplyr, warn.conflicts = FALSE)
library(ggplot2)
```
```{r, message=FALSE, warning=FALSE}
con <- DBI::dbConnect(duckdb::duckdb(), dbdir = eunomia_dir())
cdm <- cdm_from_con(con, cdm_name = "eunomia", cdm_schema = "main", write_schema = "main")
# first filter to only those with condition_concept_id "4035415"
cdm$condition_occurrence %>% tally()
cdm$condition_occurrence <- cdm$condition_occurrence %>%
filter(condition_concept_id == "4035415") %>%
select(person_id, condition_start_date)
cdm$condition_occurrence %>% tally()
# then left_join person table
cdm$person %>% tally()
cdm$condition_occurrence %>%
select(person_id) %>%
left_join(select(cdm$person, person_id, year_of_birth), by = "person_id") %>%
tally()
```
We can save these tables to file
```{r, message=FALSE, warning=FALSE}
dOut <- tempfile()
dir.create(dOut)
CDMConnector::stow(cdm, dOut, format = "parquet")
```
And now we can create a `cdm_reference` to the files
```{r, message=FALSE, warning=FALSE}
cdm_arrow <- cdm_from_files(dOut, as_data_frame = FALSE, cdm_name = "GiBleed")
cdm_arrow$person %>%
nrow()
cdm_arrow$condition_occurrence %>%
nrow()
```
And create an age at diagnosis variable
```{r, message=FALSE, warning=FALSE}
result <- cdm_arrow$person %>%
left_join(cdm_arrow$condition_occurrence, by = "person_id") %>%
mutate(age_diag = year(condition_start_date) - year_of_birth) %>%
collect()
```
We can then bring in this result to R and make the histogram
```{r, message=FALSE, warning=FALSE}
str(result)
result %>%
ggplot(aes(age_diag)) +
geom_histogram()
```
```{r}
DBI::dbDisconnect(con, shutdown = TRUE)
```
<div style="margin-bottom:3cm;"></div>
|
/scratch/gouwar.j/cran-all/cranData/CDMConnector/vignettes/a05_cdm_reference_backends.Rmd
|
---
title: "Using CDM attributes"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{Using CDM attributes}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
# eval = FALSE,
comment = "#>"
)
```
```{r, include = FALSE}
library(CDMConnector)
if (Sys.getenv("EUNOMIA_DATA_FOLDER") == "") Sys.setenv("EUNOMIA_DATA_FOLDER" = file.path(tempdir(), "eunomia"))
if (!dir.exists(Sys.getenv("EUNOMIA_DATA_FOLDER"))) dir.create(Sys.getenv("EUNOMIA_DATA_FOLDER"))
if (!eunomia_is_available()) downloadEunomiaData()
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
```
## Set up
Let's again load required packages and connect to our Eunomia dataset in duckdb.
```{r, message=FALSE, warning=FALSE}
library(CDMConnector)
library(omopgenerics)
library(dplyr)
write_schema <- "main"
cdm_schema <- "main"
con <- DBI::dbConnect(duckdb::duckdb(), dbdir = eunomia_dir())
cdm <- cdm_from_con(con, cdm_name = "eunomia", cdm_schema = cdm_schema, write_schema = write_schema)
```
## CDM reference attributes
Our cdm reference has various attributes associated with it. These can be useful both when programming and when developing analytic packages on top of CDMConnector.
### CDM name
It's a requirement that every cdm reference has name associated with it. This is particularly useful for network studies so that we can associate results with a particular cdm. We can access this attribute like so
```{r}
#attr(cdm, "cdm_name")
```
Because it is so regularly used, to make getting the cdm name even easier, we can also use `cdmName` (or it's snake case equivalent `cdm_name`)
```{r}
cdmName(cdm)
cdm_name(cdm)
```
### CDM version
The OMOP CDM has various versions. We also have an attribute giving the version of the cdm we have connected to.
```{r}
#attr(cdm, "cdm_version")
```
### Database connection
We also have an attribute identifying the database connection underlying the cdm reference.
```{r}
#cdmCon(cdm)
```
This can be useful, for example, if we want to make use of DBI functions to work with the database. For example we could use `dbListTables` to list the names of remote tables accessible through the connection, `dbListFields` to list the field names of a specific remote table, and `dbGetQuery` to returns the result of a query
```{r}
#DBI::dbListTables(cdmCon(cdm))
#DBI::dbListFields(cdmCon(cdm), "person")
#DBI::dbGetQuery(cdmCon(cdm), "SELECT * FROM person LIMIT 5")
```
## Cohort attributes
### Generated cohort set
When we generate a cohort in addition to the cohort table itself we also have various attributes that can be useful for subsequent analysis.
Here we create a cohort table with a single cohort.
```{r}
# debugonce(generateConceptCohortSet)
cdm <- generateConceptCohortSet(cdm = cdm,
conceptSet = list("gi_bleed" = 192671,
"celecoxib" = 1118084),
name = "study_cohorts",
overwrite = TRUE)
cdm$study_cohorts %>%
glimpse()
```
We have a cohort set attribute that gives details on the settings associated with the cohorts (along with utility functions to make it easier to access this attribute).
```{r}
attr(cdm$study_cohorts, "cohort_set")
```
```{r, eval=FALSE}
settings(cdm$study_cohorts)
cohort_set(cdm$study_cohorts)
```
We have a cohort_count attribute with counts for each of the cohorts.
```{r}
attr(cdm$study_cohorts, "cohort_count")
```
```{r, eval=FALSE}
cohortCount(cdm$study_cohorts)
cohort_count(cdm$study_cohorts)
```
And we also have an attribute, cohort attrition, with a summary of attrition when creating the cohorts.
```{r, eval=FALSE}
attr(cdm$study_cohorts, "cohort_attrition")
```
```{r, eval=FALSE}
cohortAttrition(cdm$study_cohorts)
cohort_attrition(cdm$study_cohorts)
```
In addition, we also have the cdm reference itself as an attribute of the cohorts. This is particularly useful when developing analytic packages on top of CDMConnector.
```{r}
attr(cdm$study_cohorts, "cdm_reference")
```
### Creating a bespoke cohort
Say we create a custom GI bleed cohort with the standard cohort structure
```{r}
cdm$gi_bleed <- cdm$condition_occurrence %>%
filter(condition_concept_id == 192671) %>%
mutate(cohort_definition_id = 1) %>%
select(
cohort_definition_id,
subject_id = person_id,
cohort_start_date = condition_start_date,
cohort_end_date = condition_start_date
) %>%
compute(name = "gi_bleed", temporary = FALSE, overwrite = TRUE)
cdm$gi_bleed %>%
glimpse()
```
We can add the required attributes using the `newGeneratedCohortSet` function. The minimum requirement for this is that we also define the cohort set to associate with our set of custom cohorts.
```{r}
GI_bleed_cohort_ref <- tibble(cohort_definition_id = 1, cohort_name = "custom_gi_bleed")
cdm$gi_bleed <- omopgenerics::newCohortTable(
table = cdm$gi_bleed, cohortSetRef = GI_bleed_cohort_ref
)
```
Now our custom cohort GI_bleed has the same attributes associated with it as if it had been created by `generateConceptCohortSet`. This will mean that it can be used by analytic packages designed to work with cdm cohorts.
```{r}
settings(cdm$gi_bleed)
cohortCount(cdm$gi_bleed)
cohortAttrition(cdm$gi_bleed)
attr(cdm$gi_bleed, "cdm_reference")
```
<div style="margin-bottom:3cm;"></div>
|
/scratch/gouwar.j/cran-all/cranData/CDMConnector/vignettes/a06_using_cdm_attributes.Rmd
|
#' @title Get OAuth token
#' @description Gets an OAuth authentication token (long character string)
#' @param id character, user OAuth client id
#' @param secret character, user OAuth client secret
#' @param url character, endpoint for requesting tokens. Default: Copernicus Data Space Ecosystem OAuth endpoint
#' @return Long character string containing the authentication token.
#' @details The token can be used in queries requiring the authentication.
#' @examples
#' \dontrun{
#' #EXAMPLE1
#' id <- "..."
#' secret <- "..."
#' token <- GetOAuthToken(id = id, secret = secret)
#' }
#' @seealso
#' \code{\link[CDSE]{GetOAuthClient}}
#' @rdname GetOAuthToken
#' @export
#' @source \url{https://documentation.dataspace.copernicus.eu/APIs/SentinelHub/Overview/Authentication.html}
#' @importFrom httr2 oauth_client oauth_flow_client_credentials
GetOAuthToken <- function(id, secret, url = getOption("CDSE.auth_url")) {
client <- httr2::oauth_client(id = id, token_url = url,secret = secret, auth = "header")
token <- httr2::oauth_flow_client_credentials(client)
out <- token$access_token
attr(out, "expires") <- as.POSIXct(token$expires_at, origin = "1970-01-01")
return(out)
}
#' @title Get OAuth client
#' @description Gets an OAuth authentication client (\code{httr2} OAuth client object)
#' @param id character, user OAuth client id
#' @param secret character, user OAuth client secret
#' @param url character, endpoint for requesting tokens. Default: Copernicus Data Space Ecosystem OAuth endpoint
#' @return \code{httr2} OAuth client object
#' @details The client can be used in queries requiring the authentication.
#' @examples
#' \dontrun{
#' #EXAMPLE1
#' id <- "..."
#' secret <- "..."
#' OAuthClient <- GetOAuthClient(id = id, secret = secret)
#' }
#' @seealso
#' \code{\link[CDSE]{GetOAuthToken}}
#' @rdname GetOAuthClient
#' @export
#' @source \url{https://documentation.dataspace.copernicus.eu/APIs/SentinelHub/Overview/Authentication.html}
#' @importFrom httr2 oauth_client
GetOAuthClient <- function(id, secret, url = getOption("CDSE.auth_url")) {
client <- httr2::oauth_client(id = id, token_url = url, secret = secret, auth = "header")
return(client)
}
|
/scratch/gouwar.j/cran-all/cranData/CDSE/R/Authentication.R
|
#' @title Get image from the archive
#' @description Retrieves the image for the area of interest using the parameters provided.
#' @param aoi sf or sfc object, typically a (multi)polygon, describing the Area of Interest.
#' @param bbox numeric vector of four elements describing the bounding box of interest.
#' Specify with a coordinate pair on two (opposite) vertices of the bounding box rectangle.
#' Coordinates need to be in longitude, latitude.
#'
#' Only one of either \code{aoi} or \code{bbox} may be specified.
#' @param time_range scalar or vector (Date or character that can be converted to date) defining the time interval.
#' @param collection character indicating which collection to search.
#' Must be one of the collections returned by \code{GetCollections}.
#' @param script a length one character string containing the evaluation script or the name of the file containing the script.
#' @param mosaicking_order character indicating the order in which tiles are overlapped from which the output result is mosaicked.
#' Must be one of "mostRecent", "leastRecent", or "leastCC". Default: "mostRecent"
#' @param file name of the file to save the image. If NULL, a \code{SpatRaster} object is returned. Default: NULL
#' @param format character indicating the output file format.
#' Must be one of "image/tiff", "image/png", or "image/jpeg". Default: "image/tiff"
#' @param pixels integer scalar or length-two vector indicating the request image width and height.
#' Values must be integers between 1 and 2500.
#' @param resolution numeric scalar or length-two vector indicating the spatial resolution of the request image
#' in horizontal and vertical direction (in meters).
#'
#' Only one of the arguments "pixels" or "resolution" must be set at the same time.
#' If the argument "pixels" or "resolution" is scalar, the same value is used for horizontal and vertical direction (width and height).
#' @param buffer numeric, width of the buffer to retrieve the image of enlarged area. Default: 0
#' @param mask logical indicating if the image should contain only pixels within Area of Interest. Default: FALSE
#' @param client OAuth client object to use for authentication.
#' @param token OAuth token character string to use for authentication.
#'
#' Exactly one of either \code{client} or \code{token} must be specified. It is recommended to use \code{client}.
#' @param url character indicating the process endpoint. Default: Copernicus Data Space Ecosystem process endpoint
#' @return \code{SpatRaster} object (from the package \code{terra}) of the requested image (if \code{file} is \code{NULL}),
#' or the (invisible) name of the file created.
#' @details If \code{aoi} argument is provided, the result is returned in the same coordinate reference system.
#' @examples
#' \dontrun{
#' #EXAMPLE1
#' dsn <- system.file("extdata", "centralpark.geojson", package = "CDSE")
#' aoi <- sf::read_sf(dsn, as_tibble = FALSE)
#' script_file <- system.file("scripts", "NDVI_uint8.js", package = "CDSE")
#' day <- "2023-07-11"
#' ras <- GetArchiveImage(aoi = aoi, time_range = day, script = script_file,
#' collection = "sentinel-2-l2a",format = "image/tiff",
#' mosaicking_order = "leastCC", resolution = 10, client = OAuthClient)
#' }
#' @seealso
#' \code{\link[CDSE]{GetCollections}}, \code{\link[CDSE]{SearchCatalog}}
#' @rdname GetArchiveImage
#' @export
#' @source \url{https://documentation.dataspace.copernicus.eu/APIs/SentinelHub/Process.html}
#' @importFrom sf st_transform st_geometry st_bbox st_buffer st_coordinates st_centroid
#' @importFrom geojsonsf sfc_geojson
#' @importFrom jsonlite fromJSON
#' @importFrom httr2 request req_headers req_body_json req_auth_bearer_token req_oauth_client_credentials req_perform
#' @importFrom terra rast crs project mask writeRaster
GetArchiveImage <- function(aoi, bbox, time_range, collection, script, file = NULL,
format = c("image/tiff", "image/png", "image/jpeg"),
mosaicking_order = c("mostRecent", "leastRecent", "leastCC"),
pixels, resolution, buffer = 0, mask = FALSE,
client, token, url = getOption("CDSE.process_url")) {
# Only one of either aoi or bbox may be specified.
if (!missing(aoi) & !missing(bbox)) {
stop("Only one of either aoi or bbox may be specified.")
}
if (missing(aoi) & missing(bbox)) {
stop("Either aoi or bbox must be specified.")
}
# Check bbox is valid
if (!missing(bbox)) {
CheckBbox(bbox)
}
# authentication
if (missing(client) & missing(token)) {
stop("Either client or token must be specified.")
}
# Only one of either pixels or resolution may be specified.
if (!missing(pixels) & !missing(resolution)) {
stop("Only one of either pixels or resolution may be specified.")
}
if (missing(pixels) & missing(resolution)) {
stop("Either pixels or resolution must be specified.")
}
if (!missing(pixels)) {
pixels <- CheckLengthIs2(pixels)
}
if (!missing(resolution)) {
resolution <- CheckLengthIs2(resolution)
}
if (missing(aoi)) { # query by bbox
# make bounds from bbox
bounds <- PolyFromBbox(bbox)
} else { # query by aoi / intersects
# convert to WGS84 first to get longitude/latitude coordinates
bounds <- sf::st_transform(sf::st_geometry(aoi), crs = 4326)
# bounding box
bbox <- as.numeric(sf::st_bbox(bounds))
}
# add buffer if required
if (buffer > 0) {
bounds <- sf::st_buffer(bounds, dist = buffer, joinStyle = "MITRE", mitreLimit = 999999)
bbox <- as.numeric(sf::st_bbox(bounds))
}
# get the number of pixels required
# boundsPlanar <- Flatten(bounds)
# if (buffer > 0) {
# boundsPlanar <- sf::st_buffer(boundsPlanar, dist = buffer, joinStyle = "MITRE", mitreLimit = 999999)
# bbox <- as.numeric(sf::st_bbox(sf::st_transform(boundsPlanar, crs = 4326)))
# }
# dims <- apply(matrix(as.numeric(sf::st_bbox(boundsPlanar)), ncol = 2), 1, diff)
# pixels <- ceiling(dims/resolution)
#
# dims.m <- apply(matrix(as.numeric(sf::st_bbox(boundsPlanar)), ncol = 2), 1, diff)
# dims.d <- apply(matrix(as.numeric(bbox), ncol = 2), 1, diff)
# res <- resolution * dims.d / dims.m
# get the time range
period <- MakeTimeRange(time_range)
data <- list(
list(
dataFilter = list(timeRange = period, mosaickingOrder = mosaicking_order[1]),
type = collection)
)
# build input part of the request
if (missing(aoi)) { # query by bbox
input <- list(bounds = list(bbox = bbox,
properties = list(crs = "http://www.opengis.net/def/crs/OGC/1.3/CRS84")),
data = data)
} else { # query by aoi
geom <- geojsonsf::sfc_geojson(sf::st_geometry(bounds))
input <- list(bounds = list(bbox = bbox, geometry = jsonlite::fromJSON(geom),
properties = list(crs = "http://www.opengis.net/def/crs/OGC/1.3/CRS84")),
data = data)
}
# responses
# if (missing(responses)) {
responses <- list(list(identifier = "default", format = list(type = format[1])))
# }
# build output part of the request
if (missing(resolution)) { # use width and height provided
output <- list(width = pixels[1], height = pixels[2],
responses = responses)
} else {
# compute the resolution at the latitude of the centroid
lat <- sf::st_coordinates(sf::st_centroid(bounds))[1, "Y"]
res <- resolution / DegLength(lat)
output <- list(resx = res[1], resy = res[2],
responses = responses)
}
# read the evalscript from file if needed
if (file.exists(script)) {
script <- paste(readLines(script), collapse = "\n")
}
# make the request body
bdy <- list(input = input, output = output, evalscript = script)
# build the request
req <- httr2::request(url)
req <- httr2::req_headers(req, Accept = format)
req <- httr2::req_body_json(req, bdy)
# select the appropriate authentication method
if (missing(client)) {
req <- httr2::req_auth_bearer_token(req, token = as.character(token))
} else {
req <- httr2::req_oauth_client_credentials(req, client = client)
}
# run the request
resp <- try(httr2::req_perform(req), silent = TRUE)
if (inherits(resp, "try-error")) {
if (length(grep("SSL peer certificate", resp[1])) == 1L) {
req <- httr2::req_options(req, ssl_verifyhost = 0L, ssl_verifypeer = 0L)
resp <- httr2::req_perform(req)
} else {
stop(LastError())
}
}
if (unlist(strsplit(format, split = "/", fixed = TRUE))[1] != "image") {
# TBD - process multipart response
} else {
if ((format != "image/tiff")) { # JPEG or PNG image
if (is.null(file)) {
tmpfic <- tempfile()
writeBin(resp$body, tmpfic)
# read the file as raster
ras <- suppressWarnings(terra::rast(tmpfic)) # warning if not tiff file
return(ras)
} else {
writeBin(resp$body, file)
invisible(file)
}
} else {
# write to temporary file to post-process the image
tmpfic <- tempfile()
writeBin(resp$body, tmpfic)
# read the file to transform in original CRS
ras <- terra::rast(tmpfic)
# if only bbox provided or format not tiff this can't be done
if (!missing(aoi)) {
ras <- terra::project(ras, terra::crs(aoi))
if (mask) {
ras <- terra::mask(ras, aoi)
}
}
# save to file or return the raster
if (is.null(file)) {
return(ras)
} else {
terra::writeRaster(ras, filename = file)
invisible(file)
}
}
}
}
|
/scratch/gouwar.j/cran-all/cranData/CDSE/R/GetArchiveImage.R
|
#' @title List available collections
#' @description Retrieves the list of available imagery collections.
#' @param as_data_frame logical indicating if the result should be returned as data frame. Default: TRUE
#' @param url character indicating the STAC catalog search endpoint. Default: Copernicus Data Space Ecosystem STAC endpoint
#' @return A \code{list} or a \code{data.frame} of all available imagery collections and their attributes.
#' @details This function doesn't require authentication.
#' @examples
#' \dontrun{
#' #EXAMPLE1
#' GetCollections(as_data_frame = TRUE)
#' }
#' @seealso
#' \code{\link[CDSE]{GetArchiveImage}}, \code{\link[CDSE]{SearchCatalog}}
#' @rdname GetCollections
#' @export
#' @source \url{https://documentation.dataspace.copernicus.eu/APIs/SentinelHub/Catalog.html}
#' @importFrom httr2 request req_perform resp_body_json
GetCollections <- function(as_data_frame = TRUE, url = getOption("CDSE.catalog_url")) {
req <- httr2::request(paste0(url, "collections"))
resp <- try(httr2::req_perform(req), silent = TRUE)
if (inherits(resp, "try-error")) {
if (length(grep("SSL peer certificate", resp[1])) == 1L) {
req <- httr2::req_options(req, ssl_verifyhost = 0L, ssl_verifypeer = 0L)
resp <- httr2::req_perform(req)
} else {
stop(LastError())
}
}
if (isTRUE(as_data_frame)) {
cnt <- httr2::resp_body_json(resp, simplifyVector = TRUE)
collezioni <- cnt$collections
bbox <- data.frame(matrix(unlist(collezioni$extent$spatial$bbox), ncol = 4, byrow = TRUE))
names(bbox) <- c("long.min", "lat.min", "long.max", "lat.max")
out <- data.frame(
id = collezioni$id,
title = collezioni$title,
description = collezioni$description,
since = sapply(collezioni$extent$temporal$interval, "[", 1),
instrument = unlist(collezioni$summaries$instrument),
gsd = sapply(collezioni$summaries$gsd, FUN = function(x) SafeNull(x)),
bands = sapply(collezioni$summaries$`eo:bands`, FUN = function(x) ifelse(is.null(x), NA, nrow(x))),
constellation = sapply(collezioni$summaries$constellation, FUN = function(x) SafeNull(x)),
stringsAsFactors = FALSE, row.names = NULL)
out <- cbind(out, bbox)
} else {
cnt <- httr2::resp_body_json(resp, simplifyVector = FALSE)
out <- cnt$collections
}
return(out)
}
|
/scratch/gouwar.j/cran-all/cranData/CDSE/R/GetCollections.R
|
#' @title Search collection for available images
#' @description Searches the specified collection for available images in the given time interval and intersecting with the
#' bounding box or the area of interest.
#' @param aoi sf or sfc object, typically a (multi)polygon, describing the Area of Interest.
#' @param bbox numeric vector of four elements describing the bounding box of interest.
#' Specify with a coordinate pair on two (opposite) vertices of the bounding box rectangle.
#' Coordinates need to be in longitude, latitude.
#'
#' Only one of either \code{aoi} or \code{bbox} may be specified.
#' @param from start of the time interval to search.
#' @param to end of the time interval to search.
#'
#' \code{from} and \code{to} can be either Date or character that can be converted to date by \code{as.Date}.
#'
#' Open interval (one side only) can be obtained by providing the \code{NA} or \code{NULL} value for the corresponding argument.
#' @param collection character indicating which collection to search.
#' Must be one of the collections returned by \code{GetCollections}.
#' @param as_data_frame logical indicating if the result should be returned as data frame. Default: TRUE
#' @param with_geometry logical indicating if the granule geometries should be included in the data.frame. Default: TRUE
#' @param client OAuth client object to use for authentication.
#' @param token OAuth token character string to use for authentication.
#'
#' Exactly one of either \code{client} or \code{token} must be specified. It is recommended to use \code{client}.
#' @param url character indicating the STAC catalog search endpoint. Default: Copernicus Data Space Ecosystem STAC endpoint
#' @return A \code{list}, \code{data.frame} or a \code{sf} object.
#' @details If no images found, a \code{NULL} value is returned.
#' @examples
#' \dontrun{
#' #EXAMPLE1
#' dsn <- system.file("extdata", "luxembourg.geojson", package = "CDSE")
#' aoi <- sf::read_sf(dsn, as_tibble = FALSE)
#' images <- SearchCatalog(aoi = aoi, from = "2023-07-01", to = "2023-07-31",
#' collection = "sentinel-2-l2a", with_geometry = TRUE, client = OAuthClient)
#' }
#' @seealso
#' \code{\link[CDSE]{GetCollections}}, \code{\link[CDSE]{GetArchiveImage}}
#' @rdname SearchCatalog
#' @export
#' @source \url{https://documentation.dataspace.copernicus.eu/APIs/SentinelHub/Catalog.html}
#' @importFrom sf st_transform st_geometry st_bbox st_polygon st_as_sfc st_intersects st_area st_intersection
#' @importFrom geojsonsf sfc_geojson
#' @importFrom jsonlite fromJSON
#' @importFrom httr2 request req_body_json req_auth_bearer_token req_oauth_client_credentials req_perform resp_body_json
#' @importFrom lutz tz_lookup_coords
#' @importFrom lubridate with_tz
SearchCatalog <- function(aoi, bbox, from, to, collection, as_data_frame = TRUE, with_geometry = TRUE, client, token,
url = getOption("CDSE.catalog_url")) {
# Only one of either intersects or bbox may be specified.
if (!missing(aoi) & !missing(bbox)) {
stop("Only one of either aoi or bbox may be specified.")
}
if (missing(aoi) & missing(bbox)) {
stop("Either aoi or bbox must be specified.")
}
# Check bbox is valid
if (!missing(bbox)) {
CheckBbox(bbox)
}
# authentication
if (missing(client) & missing(token)) {
stop("Either client or token must be specified.")
}
# determine the requested period
if (is.na(from) || is.null(from)) {
p1 <- ".."
} else {
p1 <- sprintf("%sT00:00:00Z", as.Date(from))
}
if (is.na(to) || is.null(to)) {
p2 <- ".."
} else {
p2 <- sprintf("%sT23:59:59Z", as.Date(to))
}
period <- paste(p1, p2, sep = "/")
# check period
if (period == "../..") {
stop("only one side of the time interval can be open")
}
limes <- 100 # 100 is the maximal allowed value
# request body
if (missing(aoi)) { # query by bbox
# trick to deal with the fact that 'collections' should be boxed but 'limit' unboxed
# -> wrap collections in a list and use auto_unbox = TRUE
bdy <- list("bbox" = bbox, "datetime" = period, "collections" = list(collection), "limit" = limes)
} else { # query by aoi / intersects
# convert to WGS84 first to get longitude/latitude coordinates
bounds <- sf::st_transform(sf::st_geometry(aoi), crs = 4326)
# bounding box
bbox <- as.numeric(sf::st_bbox(bounds))
# intersects
geom <- geojsonsf::sfc_geojson(sf::st_geometry(bounds))
# trick to deal with the fact that 'collections' should be boxed but 'limit' unboxed
# -> wrap collections in a list and use auto_unbox = TRUE
bdy <- list("intersects" = jsonlite::fromJSON(geom), "datetime" = period, "collections" = list(collection), "limit" = limes)
}
# build the request
req <- httr2::request(paste0(url, "search"))
req <- httr2::req_body_json(req, data = bdy, auto_unbox = TRUE)
# select the appropriate authentication method
if (missing(client)) {
req <- httr2::req_auth_bearer_token(req, token = as.character(token))
} else {
req <- httr2::req_oauth_client_credentials(req, client = client)
}
# run the request
resp <- try(httr2::req_perform(req), silent = TRUE)
if (inherits(resp, "try-error")) {
if (length(grep("SSL peer certificate", resp[1])) == 1L) {
req <- httr2::req_options(req, ssl_verifyhost = 0L, ssl_verifypeer = 0L)
resp <- httr2::req_perform(req)
} else {
stop(LastError())
}
}
cnt <- httr2::resp_body_json(resp, simplifyVector = FALSE)
features <- cnt$features
# if no features found return NULL
if (length(features) == 0L) return(NULL)
# pagination
while (!is.null(cnt$context$`next`)) {
bdy$`next` <- cnt$context$`next`
req <- httr2::req_body_json(req, data = bdy, auto_unbox = TRUE)
resp <- httr2::req_perform(req)
cnt <- httr2::resp_body_json(resp, simplifyVector = FALSE)
features <- c(features, cnt$features)
}
# prepare output
if (as_data_frame) {
datetime_txt <- sapply(features, FUN = function(x) x$properties$datetime)
datetime_utc <- as.POSIXct(substr(datetime_txt, 1, 19), tz = "UTC", format = "%Y-%m-%dT%H:%M:%S")
centre <- apply(matrix(bbox, ncol = 2), 1, mean)
tz <- lutz::tz_lookup_coords(lon = centre[1], lat = centre[2], method = "fast", warn = FALSE)
datetime_tz <- lubridate::with_tz(datetime_utc, tzone = tz)
bboxmat <- data.frame(t(sapply(features, FUN = function(x) as.numeric(x$bbox))))
names(bboxmat) <- c("long.min", "lat.min", "long.max", "lat.max")
out <- data.frame(
acquisitionDate = as.Date(datetime_utc),
tileCloudCover = sapply(features, FUN = function(x) SafeNull(x$properties$`eo:cloud_cover`)),
satellite = sapply(features, FUN = function(x) x$properties$platform),
acquisitionTimestampUTC = datetime_utc,
acquisitionTimestampLocal = datetime_tz,
sourceId = sapply(features, FUN = function(x) x$id),
stringsAsFactors = FALSE)
out <- cbind(out, bboxmat)
if (with_geometry) {
lst1 <- lapply(features, FUN = function(x) matrix(unlist(x$geometry$coordinates), ncol = 2, byrow = TRUE))
lst2 <- lapply(1:length(lst1), FUN = function(i) sf::st_polygon(lst1[i]))
geom_col <- sf::st_as_sfc(lst2, crs = 4326)
if (missing(aoi)) {
bounds <- PolyFromBbox(bbox)
}
doIntersect <- unlist(sf::st_intersects(sf::st_geometry(bounds), geom_col))
out[doIntersect, "areaCoverage"] <- 100.0 * sf::st_area(sf::st_intersection(sf::st_geometry(bounds), geom_col)) /
sf::st_area(sf::st_geometry(bounds))
sf::st_geometry(out) <- geom_col
col_ord <- c("acquisitionDate", "tileCloudCover", "areaCoverage", "satellite",
"acquisitionTimestampUTC", "acquisitionTimestampLocal", "sourceId",
"long.min", "lat.min", "long.max", "lat.max", "geometry")
out <- out[, col_ord]
}
} else {
out <- features
}
return(out)
}
|
/scratch/gouwar.j/cran-all/cranData/CDSE/R/SearchCatalog.R
|
#' @title Produce image catalog without multiple entries per date
#' @description Sometimes several images could be available for the given day. It can be useful to have a list
#' where for any given day there is just one row in the list. This unique row can be selected to represent either
#' the least cloud coverage or the biggest coverage of the are of interest.
#' @param imageCatalog \code{data.frame} as returned by the \code{SearchCatalog} function.
#' @param by character indicating which attribute is used to select the best image per date.
#' Can be either "areaCoverage" or "tileCloudCover".
#' @param keep list of columns to keep in output. Default: all columns in input.
#' @return \code{data.frame} with one row per date.
#' @details By default, the returned \code{data.frame} has the same columns as the input catalog.
#' User can specify a subset of columns to include in the output through the \code{keep} parameter.
#' @examples
#' \dontrun{
#' #EXAMPLE1
#' dsn <- system.file("extdata", "luxembourg.geojson", package = "CDSE")
#' aoi <- sf::read_sf(dsn, as_tibble = FALSE)
#' images <- SearchCatalog(aoi = aoi, from = "2023-07-01", to = "2023-07-31",
#' collection = "sentinel-2-l2a", with_geometry = TRUE, client = OAuthClient)
#' best_daily <- UniqueCatalog(images, by = "areaCoverage")
#' }
#' @seealso
#' \code{\link[CDSE]{SearchCatalog}}
#' @rdname UniqueCatalog
#' @export
#' @importFrom stats aggregate
UniqueCatalog <- function(imageCatalog, by = c("areaCoverage", "tileCloudCover"), keep = names(imageCatalog)) {
by <- match.arg(by, choices = c("areaCoverage", "tileCloudCover"))
if (by == "areaCoverage") {
# get image with maximal area coverage for the day
agg1 <- stats::aggregate(areaCoverage ~ acquisitionDate, data = imageCatalog, FUN = max)
tmp <- merge.data.frame(agg1, imageCatalog, by = c("acquisitionDate", "areaCoverage"), sort = FALSE)
} else {
# get image with minimal cloud cover for the day
agg1 <- stats::aggregate(tileCloudCover ~ acquisitionDate, data = imageCatalog, FUN = min)
tmp <- merge.data.frame(agg1, imageCatalog, by = c("acquisitionDate", "tileCloudCover"), sort = FALSE)
}
# in case of ties, get an arbitrary image (here the smallest sourceId, could also be the biggest)
agg2 <- stats::aggregate(sourceId ~ acquisitionDate, data = tmp, FUN = min)
tmp <- merge.data.frame(agg2, tmp, by = c("acquisitionDate", "sourceId"), sort = FALSE)
out <- tmp[, keep]
out <- out[rev(order(tmp$acquisitionDate)), ]
# clear the row names
row.names(out) <- NULL
return(out)
}
|
/scratch/gouwar.j/cran-all/cranData/CDSE/R/UniqueCatalog.R
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.